hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c36133b0966ccd18e0da9ba6b34a0be86bea39d | 5,095 | py | Python | cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py | cloudification-io/cinder | 23d76e01f2b4f3771b57fb287084a4884238b827 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py | cloudification-io/cinder | 23d76e01f2b4f3771b57fb287084a4884238b827 | [
"Apache-2.0"
] | 1 | 2020-12-22T20:40:20.000Z | 2020-12-23T18:34:42.000Z | cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py | cloudification-io/cinder | 23d76e01f2b4f3771b57fb287084a4884238b827 | [
"Apache-2.0"
] | 1 | 2019-06-24T20:21:33.000Z | 2019-06-24T20:21:33.000Z | # Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cinder import exception
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.dell_emc import powerstore
class TestVolumeCreateFromSource(powerstore.TestPowerStoreDriver):
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_chap_config")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_appliance_id_by_name")
def setUp(self, mock_appliance, mock_chap):
super(TestVolumeCreateFromSource, self).setUp()
mock_appliance.return_value = "A1"
self.driver.check_for_setup_error()
self.volume = fake_volume.fake_volume_obj(
{},
host="host@backend#test-appliance",
provider_id="fake_id",
size=8
)
self.source_volume = fake_volume.fake_volume_obj(
{},
host="host@backend#test-appliance",
provider_id="fake_id_1",
size=8
)
self.source_snapshot = fake_snapshot.fake_snapshot_obj(
{},
provider_id="fake_id_2",
volume_size=8
)
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_cloned_volume(self, mock_create_cloned):
mock_create_cloned.return_value = self.volume.provider_id
self.driver.create_cloned_volume(self.volume, self.source_volume)
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.extend_volume")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_cloned_volume_extended(self,
mock_create_cloned,
mock_extend):
mock_create_cloned.return_value = self.volume.provider_id
self.volume.size = 16
self.driver.create_cloned_volume(self.volume, self.source_volume)
mock_extend.assert_called_once()
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_volume_from_snapshot(self, mock_create_from_snap):
mock_create_from_snap.return_value = self.volume.provider_id
self.driver.create_volume_from_snapshot(self.volume,
self.source_snapshot)
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.extend_volume")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_volume_from_snapshot_extended(self,
mock_create_from_snap,
mock_extend):
mock_create_from_snap.return_value = self.volume.provider_id
self.volume.size = 16
self.driver.create_volume_from_snapshot(self.volume,
self.source_snapshot)
mock_extend.assert_called_once()
@mock.patch("requests.request")
def test_create_volume_from_source_bad_status(self, mock_create_request):
mock_create_request.return_value = powerstore.MockResponse(rc=400)
error = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.adapter._create_volume_from_source,
self.volume,
self.source_volume
)
self.assertIn("Failed to create clone", error.msg)
@mock.patch("requests.request")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_volume_from_source_extende_bad_status(
self,
mock_create_from_source,
mock_extend_request
):
mock_extend_request.return_value = powerstore.MockResponse(rc=400)
self.volume.size = 16
error = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.adapter._create_volume_from_source,
self.volume,
self.source_volume
)
self.assertIn("Failed to extend PowerStore volume", error.msg)
| 43.547009 | 77 | 0.666143 |
from unittest import mock
from cinder import exception
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.dell_emc import powerstore
class TestVolumeCreateFromSource(powerstore.TestPowerStoreDriver):
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_chap_config")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.get_appliance_id_by_name")
def setUp(self, mock_appliance, mock_chap):
super(TestVolumeCreateFromSource, self).setUp()
mock_appliance.return_value = "A1"
self.driver.check_for_setup_error()
self.volume = fake_volume.fake_volume_obj(
{},
host="host@backend#test-appliance",
provider_id="fake_id",
size=8
)
self.source_volume = fake_volume.fake_volume_obj(
{},
host="host@backend#test-appliance",
provider_id="fake_id_1",
size=8
)
self.source_snapshot = fake_snapshot.fake_snapshot_obj(
{},
provider_id="fake_id_2",
volume_size=8
)
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_cloned_volume(self, mock_create_cloned):
mock_create_cloned.return_value = self.volume.provider_id
self.driver.create_cloned_volume(self.volume, self.source_volume)
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.extend_volume")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_cloned_volume_extended(self,
mock_create_cloned,
mock_extend):
mock_create_cloned.return_value = self.volume.provider_id
self.volume.size = 16
self.driver.create_cloned_volume(self.volume, self.source_volume)
mock_extend.assert_called_once()
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_volume_from_snapshot(self, mock_create_from_snap):
mock_create_from_snap.return_value = self.volume.provider_id
self.driver.create_volume_from_snapshot(self.volume,
self.source_snapshot)
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.extend_volume")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_volume_from_snapshot_extended(self,
mock_create_from_snap,
mock_extend):
mock_create_from_snap.return_value = self.volume.provider_id
self.volume.size = 16
self.driver.create_volume_from_snapshot(self.volume,
self.source_snapshot)
mock_extend.assert_called_once()
@mock.patch("requests.request")
def test_create_volume_from_source_bad_status(self, mock_create_request):
mock_create_request.return_value = powerstore.MockResponse(rc=400)
error = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.adapter._create_volume_from_source,
self.volume,
self.source_volume
)
self.assertIn("Failed to create clone", error.msg)
@mock.patch("requests.request")
@mock.patch("cinder.volume.drivers.dell_emc.powerstore.client."
"PowerStoreClient.clone_volume_or_snapshot")
def test_create_volume_from_source_extende_bad_status(
self,
mock_create_from_source,
mock_extend_request
):
mock_extend_request.return_value = powerstore.MockResponse(rc=400)
self.volume.size = 16
error = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.adapter._create_volume_from_source,
self.volume,
self.source_volume
)
self.assertIn("Failed to extend PowerStore volume", error.msg)
| true | true |
1c3613d8c60187cb67e66c9ddbb7856a216206f8 | 988 | py | Python | cbmc_parser/gate_helper.py | fabian-hk/Secure-Two-Party-Computation | f7e10a0a5c1b0361dd700391d81cdcc75612666d | [
"BSD-2-Clause"
] | 6 | 2019-05-21T18:40:50.000Z | 2021-10-19T10:27:50.000Z | cbmc_parser/gate_helper.py | fabian-hk/Secure-Two-Party-Computation | f7e10a0a5c1b0361dd700391d81cdcc75612666d | [
"BSD-2-Clause"
] | null | null | null | cbmc_parser/gate_helper.py | fabian-hk/Secure-Two-Party-Computation | f7e10a0a5c1b0361dd700391d81cdcc75612666d | [
"BSD-2-Clause"
] | null | null | null | class GateHelper:
def __init__(self, id, type, num_of_inputs, output_to, is_circuit_output, output_number_list):
# self.id, together with the self.type forms a unique identifier (since Input Gates have ids 1 to size-of-input)
self.id = id
# types used are strings: INPUT, AND, OR, XOR, NOT, NAND
self.type = type
# number of inputs that a gate has - all except NOT have 2
self.num_of_inputs = num_of_inputs
# ids of gates which this gate outputs to as a tuple with the input on the gate
self.output_to = output_to
# boolean flag set if gate output is circuit output
self.is_circuit_output = is_circuit_output
# output of this gate used for evaluation of circuit
self.output_value = None
# the id of the output bits whose value is the same as this gates output value
# should be empty if is_circuit_output = False
self.output_number_list = output_number_list
| 41.166667 | 120 | 0.682186 | class GateHelper:
def __init__(self, id, type, num_of_inputs, output_to, is_circuit_output, output_number_list):
self.id = id
self.type = type
self.num_of_inputs = num_of_inputs
self.output_to = output_to
self.is_circuit_output = is_circuit_output
self.output_value = None
self.output_number_list = output_number_list
| true | true |
1c36146626076745b572e10e1350c4b4cd80b4f8 | 27 | py | Python | wepppy/rhem/out/__init__.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | wepppy/rhem/out/__init__.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | wepppy/rhem/out/__init__.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | from .full_output import *
| 13.5 | 26 | 0.777778 | from .full_output import *
| true | true |
1c36149a8e107ddd2113e699cfc4dcdca607f882 | 137 | py | Python | Lists/extend-operation.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 3 | 2022-03-28T09:10:08.000Z | 2022-03-29T10:47:56.000Z | Lists/extend-operation.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 1 | 2022-03-27T11:52:58.000Z | 2022-03-27T11:52:58.000Z | Lists/extend-operation.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | null | null | null | # extend = Adding two lists together
my_list=[3,2,4,"python",5.6]
my_new_list=["hello",7]
my_list.extend(my_new_list)
print(my_list)
| 13.7 | 36 | 0.722628 |
my_list=[3,2,4,"python",5.6]
my_new_list=["hello",7]
my_list.extend(my_new_list)
print(my_list)
| true | true |
1c3614e0b753d2dea0e0d735e88de2457cb4e21e | 1,622 | py | Python | modules/networkx/readwrite/tests/test_leda.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
] | 12 | 2015-05-14T17:38:41.000Z | 2021-04-25T17:28:28.000Z | modules/networkx/readwrite/tests/test_leda.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
] | 71 | 2015-01-05T16:50:55.000Z | 2020-09-30T19:17:47.000Z | modules/networkx/readwrite/tests/test_leda.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
] | 14 | 2015-02-15T22:19:18.000Z | 2020-09-30T18:54:54.000Z | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
import os,tempfile
class TestLEDA(object):
def test_parse_leda(self):
data="""#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
G=nx.parse_leda(data)
G=nx.parse_leda(data.split('\n'))
assert_equal(sorted(G.nodes()),
['v1', 'v2', 'v3', 'v4', 'v5'])
assert_equal([e for e in sorted(G.edges(data=True))],
[('v1', 'v2', {'label': '4'}),
('v1', 'v3', {'label': '3'}),
('v2', 'v3', {'label': '2'}),
('v3', 'v4', {'label': '3'}),
('v3', 'v5', {'label': '7'}),
('v4', 'v5', {'label': '6'}),
('v5', 'v1', {'label': 'foo'})])
def test_read_LEDA(self):
data="""#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
G=nx.parse_leda(data)
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
b=fh.write(data)
fh.close()
Gin=nx.read_leda(fname)
assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
assert_equal(sorted(G.edges()),sorted(Gin.edges()))
os.close(fd)
os.unlink(fname)
| 45.055556 | 253 | 0.461159 |
from nose.tools import *
import networkx as nx
import os,tempfile
class TestLEDA(object):
def test_parse_leda(self):
data="""#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
G=nx.parse_leda(data)
G=nx.parse_leda(data.split('\n'))
assert_equal(sorted(G.nodes()),
['v1', 'v2', 'v3', 'v4', 'v5'])
assert_equal([e for e in sorted(G.edges(data=True))],
[('v1', 'v2', {'label': '4'}),
('v1', 'v3', {'label': '3'}),
('v2', 'v3', {'label': '2'}),
('v3', 'v4', {'label': '3'}),
('v3', 'v5', {'label': '7'}),
('v4', 'v5', {'label': '6'}),
('v5', 'v1', {'label': 'foo'})])
def test_read_LEDA(self):
data="""#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
G=nx.parse_leda(data)
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
b=fh.write(data)
fh.close()
Gin=nx.read_leda(fname)
assert_equal(sorted(G.nodes()),sorted(Gin.nodes()))
assert_equal(sorted(G.edges()),sorted(Gin.edges()))
os.close(fd)
os.unlink(fname)
| true | true |
1c3615627142e743da18e285932d7e2758d44ec6 | 400 | py | Python | docs/server.py | rohanbari/2048.wasm | 9e9bd6a2a47e16f51a7f0aef641079d4307b6491 | [
"MIT"
] | 3 | 2017-04-17T19:00:38.000Z | 2017-04-18T02:24:28.000Z | docs/server.py | rohanbari/2048.wasm | 9e9bd6a2a47e16f51a7f0aef641079d4307b6491 | [
"MIT"
] | 6 | 2017-03-19T20:48:36.000Z | 2020-08-31T22:22:45.000Z | docs/server.py | rohanbari/2048.wasm | 9e9bd6a2a47e16f51a7f0aef641079d4307b6491 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
Handler.extensions_map.update({
'.wasm': 'application/wasm',
})
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer(("", PORT), Handler) as httpd:
httpd.allow_reuse_address = True
print("serving at port", PORT)
httpd.serve_forever()
| 22.222222 | 58 | 0.75 |
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
Handler.extensions_map.update({
'.wasm': 'application/wasm',
})
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer(("", PORT), Handler) as httpd:
httpd.allow_reuse_address = True
print("serving at port", PORT)
httpd.serve_forever()
| true | true |
1c3616143c9e4a5ae1644977cd66a796e97fd758 | 1,070 | py | Python | app/bin/dltk/core/deployment/status.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 11 | 2020-10-13T05:27:59.000Z | 2021-09-23T02:56:32.000Z | app/bin/dltk/core/deployment/status.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 48 | 2020-10-15T09:53:36.000Z | 2021-07-05T15:33:24.000Z | app/bin/dltk/core/deployment/status.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 4 | 2020-12-04T08:51:35.000Z | 2022-03-27T09:42:20.000Z |
STATUS_DEPLOYING = "deploying"
STATUS_DEPLOYED = "deployed"
STATUS_UNDEPLOYING = "undeploying"
STATUS_DISABLING = "disabling"
STATUS_DISABLED = "disabled"
STATUS_ERROR = "error"
class DeploymentStatus(Exception):
status = None
message = None
def __init__(self, status, message=""):
self.status = status
self.message = message
class StillDeploying(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_DEPLOYING, message)
class Deployed(DeploymentStatus):
def __init__(self):
super().__init__(STATUS_DEPLOYED)
class StillUndeploying(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_UNDEPLOYING, message)
class StillStopping(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_DISABLING, message)
class Disabled(DeploymentStatus):
def __init__(self):
super().__init__(STATUS_DISABLED)
class DeploymentError(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_ERROR, message)
| 22.765957 | 53 | 0.720561 |
STATUS_DEPLOYING = "deploying"
STATUS_DEPLOYED = "deployed"
STATUS_UNDEPLOYING = "undeploying"
STATUS_DISABLING = "disabling"
STATUS_DISABLED = "disabled"
STATUS_ERROR = "error"
class DeploymentStatus(Exception):
status = None
message = None
def __init__(self, status, message=""):
self.status = status
self.message = message
class StillDeploying(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_DEPLOYING, message)
class Deployed(DeploymentStatus):
def __init__(self):
super().__init__(STATUS_DEPLOYED)
class StillUndeploying(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_UNDEPLOYING, message)
class StillStopping(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_DISABLING, message)
class Disabled(DeploymentStatus):
def __init__(self):
super().__init__(STATUS_DISABLED)
class DeploymentError(DeploymentStatus):
def __init__(self, message):
super().__init__(STATUS_ERROR, message)
| true | true |
1c36165dd976d6ca6db8448b1b180bc4cc6dca99 | 689 | py | Python | numpy_nn/optimizers/RMSProp.py | rahuldshetty/numpy_nn | 0d61b7fc47cecfd9701972de322c08ed799271b4 | [
"MIT"
] | null | null | null | numpy_nn/optimizers/RMSProp.py | rahuldshetty/numpy_nn | 0d61b7fc47cecfd9701972de322c08ed799271b4 | [
"MIT"
] | 5 | 2020-06-09T03:24:46.000Z | 2020-06-30T07:06:01.000Z | numpy_nn/optimizers/RMSProp.py | rahuldshetty/numpy_nn | 0d61b7fc47cecfd9701972de322c08ed799271b4 | [
"MIT"
] | null | null | null | import numpy as np
class RMSProp():
def __init__(self, lr = 0.09, beta = 0.85, epsilon=0.0005):
self.lr= lr
self.beta = beta
self.epsilon = epsilon
def set_parameters(self, params):
self.parmeters = params
run_values = {}
for key in params.keys():
run_values[key] = 0
self.run_values = run_values
def __call__(self, param, dL, param_name):
value = self.run_values[param_name]
beta = self.beta
new_value = beta * value + (1-beta)*(dL**2)
self.run_values[param_name] = new_value
result = param - self.lr * dL/(np.sqrt(new_value) + self.epsilon)
return result
| 28.708333 | 73 | 0.587808 | import numpy as np
class RMSProp():
def __init__(self, lr = 0.09, beta = 0.85, epsilon=0.0005):
self.lr= lr
self.beta = beta
self.epsilon = epsilon
def set_parameters(self, params):
self.parmeters = params
run_values = {}
for key in params.keys():
run_values[key] = 0
self.run_values = run_values
def __call__(self, param, dL, param_name):
value = self.run_values[param_name]
beta = self.beta
new_value = beta * value + (1-beta)*(dL**2)
self.run_values[param_name] = new_value
result = param - self.lr * dL/(np.sqrt(new_value) + self.epsilon)
return result
| true | true |
1c3617b6500f1bd1ba50dbb168bfa57fd6d2067f | 4,792 | py | Python | woldrnaseq/transcript_types.py | detrout/woldlab-rna-seq | 02099219ff783503e8b6acce94d96b2b374b72da | [
"BSD-3-Clause"
] | 2 | 2016-08-31T18:49:47.000Z | 2016-11-07T18:08:17.000Z | woldrnaseq/transcript_types.py | detrout/woldlab-rna-seq | 02099219ff783503e8b6acce94d96b2b374b72da | [
"BSD-3-Clause"
] | 15 | 2016-02-29T23:13:55.000Z | 2019-04-29T18:09:05.000Z | woldrnaseq/transcript_types.py | detrout/woldlab-rna-seq | 02099219ff783503e8b6acce94d96b2b374b72da | [
"BSD-3-Clause"
] | 2 | 2015-10-22T23:23:51.000Z | 2016-08-09T18:53:49.000Z | #!/usr/bin/python3
"""Read annotation bam and count reads per gene_type
"""
from argparse import ArgumentParser
from collections import Counter
import logging
from pathlib import Path
import pandas
import pysam
import time
from .common import (
add_debug_arguments,
add_metadata_arguments,
add_separator_argument,
add_version_argument,
configure_logging,
get_seperator,
)
from .models import (
load_library_tables,
load_experiments,
find_library_bam_file,
)
logger = logging.getLogger(__name__)
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
configure_logging(args)
transcript_type_map = make_transcript_type_map(args.gtf_cache)
if len(args.filenames) > 0:
if args.output is None:
parser.perror("Output filename is required when listing bam files directly")
scores = make_transcript_type_scores(args.filenames, transcript_type_map)
scores.to_csv(args.output, sep="\t")
if not (args.libraries is None or args.experiments is None):
sep = get_seperator(args.sep)
libraries = load_library_tables(args.libraries, sep=sep)
experiments = load_experiments(args.experiments, sep=sep)
for i, experiment in experiments.iterrows():
logging.info("Processing: %s", experiment.name)
scores = make_experiment_transcript_type_scores(
experiment, libraries, transcript_type_map
)
name = "{}_gene_type.tsv".format(experiment.name)
scores.to_csv(name, sep="\t")
elif args.libraries is None and args.experiments is None:
# Neither provided
pass
else:
# only one provided
parser.perror(
"You need to provide both a libraries and experiment table to use this mode")
def make_parser():
parser = ArgumentParser()
parser.add_argument(
"filenames", nargs="*", help="Names of transcriptome bam files to score"
)
parser.add_argument(
"-o", "--output", help="Name of output score table for directly read bam files"
)
add_metadata_arguments(parser)
parser.add_argument(
"--gtf-cache", required=True, help="name of gtf-cache file to read"
)
add_separator_argument(parser)
add_version_argument(parser)
add_debug_arguments(parser)
return parser
def make_transcript_type_map(cache_filename):
tstart = time.monotonic()
logger.info("Loading GTF Cache")
type_name = "gene_type"
store = pandas.HDFStore(cache_filename)
trna = store.select(
"gtf", columns=["transcript_id", type_name], where=["type==tRNA"]
)
transcripts = store.select(
"gtf", columns=["transcript_id", type_name], where=["type==transcript"]
)
spikes = store.select("gtf", columns=["transcript_id"], where=["source==spikein"])
store.close()
transcript_type_map = {k: "spikein" for k in spikes["transcript_id"]}
transcript_series = transcripts.set_index("transcript_id")[type_name]
transcript_type_map.update(transcript_series.to_dict())
trna_series = trna.set_index("transcript_id")[type_name]
transcript_type_map.update(trna_series.to_dict())
assert len(transcript_type_map) == (transcripts.shape[0] + spikes.shape[0] + trna.shape[0])
logging.debug("Loading finished {:.3} sec".format(time.monotonic() - tstart))
return transcript_type_map
def make_experiment_transcript_type_scores(experiment, libraries, transcript_type_map):
scores = {}
for library_id in experiment.replicates:
library = libraries.loc[library_id]
anno = find_library_bam_file(library, "transcriptome")
scores[library_id] = score_bam_transcript_type(anno, transcript_type_map)
return pandas.DataFrame(scores)
def make_transcript_type_scores(urls, transcript_type_map):
scores = {}
for url in urls:
basename = Path(url).name
scores[basename] = score_bam_transcript_type(url, transcript_type_map)
return pandas.DataFrame(scores)
def score_bam_transcript_type(alignment_filename, transcript_type_map):
tstart = time.monotonic()
logger.info("Counting {}".format(alignment_filename))
counts = Counter()
with pysam.AlignmentFile(alignment_filename, "r") as aligned:
for read in aligned.fetch(until_eof=True):
if not (read.is_secondary or read.is_unmapped or read.is_qcfail or read.is_duplicate):
transcript_type = transcript_type_map.get(read.reference_name, "no id")
counts[transcript_type] += 1
logging.debug(
"Finished counting {} {:.3f}".format(
alignment_filename, time.monotonic() - tstart
)
)
return counts
if __name__ == "__main__":
main()
| 32.598639 | 98 | 0.692613 |
from argparse import ArgumentParser
from collections import Counter
import logging
from pathlib import Path
import pandas
import pysam
import time
from .common import (
add_debug_arguments,
add_metadata_arguments,
add_separator_argument,
add_version_argument,
configure_logging,
get_seperator,
)
from .models import (
load_library_tables,
load_experiments,
find_library_bam_file,
)
logger = logging.getLogger(__name__)
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
configure_logging(args)
transcript_type_map = make_transcript_type_map(args.gtf_cache)
if len(args.filenames) > 0:
if args.output is None:
parser.perror("Output filename is required when listing bam files directly")
scores = make_transcript_type_scores(args.filenames, transcript_type_map)
scores.to_csv(args.output, sep="\t")
if not (args.libraries is None or args.experiments is None):
sep = get_seperator(args.sep)
libraries = load_library_tables(args.libraries, sep=sep)
experiments = load_experiments(args.experiments, sep=sep)
for i, experiment in experiments.iterrows():
logging.info("Processing: %s", experiment.name)
scores = make_experiment_transcript_type_scores(
experiment, libraries, transcript_type_map
)
name = "{}_gene_type.tsv".format(experiment.name)
scores.to_csv(name, sep="\t")
elif args.libraries is None and args.experiments is None:
pass
else:
parser.perror(
"You need to provide both a libraries and experiment table to use this mode")
def make_parser():
parser = ArgumentParser()
parser.add_argument(
"filenames", nargs="*", help="Names of transcriptome bam files to score"
)
parser.add_argument(
"-o", "--output", help="Name of output score table for directly read bam files"
)
add_metadata_arguments(parser)
parser.add_argument(
"--gtf-cache", required=True, help="name of gtf-cache file to read"
)
add_separator_argument(parser)
add_version_argument(parser)
add_debug_arguments(parser)
return parser
def make_transcript_type_map(cache_filename):
tstart = time.monotonic()
logger.info("Loading GTF Cache")
type_name = "gene_type"
store = pandas.HDFStore(cache_filename)
trna = store.select(
"gtf", columns=["transcript_id", type_name], where=["type==tRNA"]
)
transcripts = store.select(
"gtf", columns=["transcript_id", type_name], where=["type==transcript"]
)
spikes = store.select("gtf", columns=["transcript_id"], where=["source==spikein"])
store.close()
transcript_type_map = {k: "spikein" for k in spikes["transcript_id"]}
transcript_series = transcripts.set_index("transcript_id")[type_name]
transcript_type_map.update(transcript_series.to_dict())
trna_series = trna.set_index("transcript_id")[type_name]
transcript_type_map.update(trna_series.to_dict())
assert len(transcript_type_map) == (transcripts.shape[0] + spikes.shape[0] + trna.shape[0])
logging.debug("Loading finished {:.3} sec".format(time.monotonic() - tstart))
return transcript_type_map
def make_experiment_transcript_type_scores(experiment, libraries, transcript_type_map):
scores = {}
for library_id in experiment.replicates:
library = libraries.loc[library_id]
anno = find_library_bam_file(library, "transcriptome")
scores[library_id] = score_bam_transcript_type(anno, transcript_type_map)
return pandas.DataFrame(scores)
def make_transcript_type_scores(urls, transcript_type_map):
scores = {}
for url in urls:
basename = Path(url).name
scores[basename] = score_bam_transcript_type(url, transcript_type_map)
return pandas.DataFrame(scores)
def score_bam_transcript_type(alignment_filename, transcript_type_map):
tstart = time.monotonic()
logger.info("Counting {}".format(alignment_filename))
counts = Counter()
with pysam.AlignmentFile(alignment_filename, "r") as aligned:
for read in aligned.fetch(until_eof=True):
if not (read.is_secondary or read.is_unmapped or read.is_qcfail or read.is_duplicate):
transcript_type = transcript_type_map.get(read.reference_name, "no id")
counts[transcript_type] += 1
logging.debug(
"Finished counting {} {:.3f}".format(
alignment_filename, time.monotonic() - tstart
)
)
return counts
if __name__ == "__main__":
main()
| true | true |
1c361a34076f3e0c90451cf912d877849c9e4c39 | 737 | py | Python | src/genie/libs/parser/bigip/get_ltm_persistenceuniversal.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/bigip/get_ltm_persistenceuniversal.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/bigip/get_ltm_persistenceuniversal.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | # Global Imports
import json
from collections import defaultdict
# Metaparser
from genie.metaparser import MetaParser
# =============================================
# Collection for '/mgmt/tm/ltm/persistence/universal' resources
# =============================================
class LtmPersistenceUniversalSchema(MetaParser):
schema = {}
class LtmPersistenceUniversal(LtmPersistenceUniversalSchema):
""" To F5 resource for /mgmt/tm/ltm/persistence/universal
"""
cli_command = "/mgmt/tm/ltm/persistence/universal"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| 21.676471 | 63 | 0.620081 |
import json
from collections import defaultdict
from genie.metaparser import MetaParser
class LtmPersistenceUniversalSchema(MetaParser):
schema = {}
class LtmPersistenceUniversal(LtmPersistenceUniversalSchema):
cli_command = "/mgmt/tm/ltm/persistence/universal"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| true | true |
1c361c41097f02af037446129523c1e8185eb54f | 2,528 | py | Python | pwn/crypto/RSA.py | Haabb/pwnfork | c2530ea2fd2f9d4e65df234afeb8f7def93afe49 | [
"MIT"
] | 1 | 2016-08-29T03:38:42.000Z | 2016-08-29T03:38:42.000Z | pwn/crypto/RSA.py | Haabb/pwnfork | c2530ea2fd2f9d4e65df234afeb8f7def93afe49 | [
"MIT"
] | null | null | null | pwn/crypto/RSA.py | Haabb/pwnfork | c2530ea2fd2f9d4e65df234afeb8f7def93afe49 | [
"MIT"
] | null | null | null | import pwn
from util import *
def int2bytes(n):
"""PKCS#1 integer to bytes conversion, as used by RSA"""
string = ""
while n > 0:
string = "%s%s" % (chr(n & 0xFF), string)
n /= 256
return string
def bytes2int(bytes):
"""PKCS#1 bytes to integer conversion, as used by RSA"""
integer = 0
for byte in bytes:
integer *= 256
if isinstance(byte,str): byte = ord(byte)
integer += byte
return integer
def calculate_private_key(p,q,e):
"""Calculate the private key, d, based on p, q, and e."""
return modinv(e,totient(p,q))
def decrypt(c,d,n):
"""
Given an encrypted number, c, and the private key, n and d,
returns plaintext number m.
"""
return fast_exponentiation(c,d,n)
def encrypt(m,e,n):
"""
Given a plaintext number, m, and the public key, e and n,
returns the encrypted number c.
"""
return fast_exponentiation(m,e,n)
def wieners_attack(n, e):
"""
Implements wieners attack on RSA.
Based on http://wwwusers.di.uniroma1.it/~parisi/Risorse/Wiener_Attack.pdf
"""
from sympy.solvers import solve
from sympy.core import numbers
from sympy import Symbol
fractions = continued_fractions(n, e)
for i in range(2, len(fractions)):
frac = calculate_fraction(fractions[:i]).limit_denominator()
t, a = frac._numerator, frac._denominator
x = Symbol('x')
(f1, f2) = solve(a*e - t*(x-1)*((n/x)-1) - 1, x)
if isinstance(f1, numbers.Integer) and isinstance(f2, numbers.Integer):
return (f1, f2)
return None
def crack_rsa(n,e = None,c = None):
"""
Tries all currently implemented attacks on RSA key.
"""
pwn.log.info("Cracking RSA key")
# Wieners attack
if e != None:
pwn.log.waitfor("Trying Wiener's attack")
res = wieners_attack(n,e)
if res != None:
pwn.log.succeeded("success!")
pwn.log.success("Factors: %d %d" % res)
return
else:
pwn.log.failed()
# Factor
pwn.log.waitfor("Trying to factor...")
res = factor(n)
if res != None:
p, q = res
pwn.log.succeeded("success!")
pwn.log.success("Factors: %d %d" % (p, q))
if e != None:
d = calculate_private_key(p,q,e)
pwn.log.success("d = %d" % d)
if c != None:
pwn.log.info("Possible message: %s" % int2bytes(decrypt(c,d,n)))
return
else:
pwn.log.failed("failed")
| 28.088889 | 80 | 0.578323 | import pwn
from util import *
def int2bytes(n):
string = ""
while n > 0:
string = "%s%s" % (chr(n & 0xFF), string)
n /= 256
return string
def bytes2int(bytes):
integer = 0
for byte in bytes:
integer *= 256
if isinstance(byte,str): byte = ord(byte)
integer += byte
return integer
def calculate_private_key(p,q,e):
return modinv(e,totient(p,q))
def decrypt(c,d,n):
return fast_exponentiation(c,d,n)
def encrypt(m,e,n):
return fast_exponentiation(m,e,n)
def wieners_attack(n, e):
from sympy.solvers import solve
from sympy.core import numbers
from sympy import Symbol
fractions = continued_fractions(n, e)
for i in range(2, len(fractions)):
frac = calculate_fraction(fractions[:i]).limit_denominator()
t, a = frac._numerator, frac._denominator
x = Symbol('x')
(f1, f2) = solve(a*e - t*(x-1)*((n/x)-1) - 1, x)
if isinstance(f1, numbers.Integer) and isinstance(f2, numbers.Integer):
return (f1, f2)
return None
def crack_rsa(n,e = None,c = None):
pwn.log.info("Cracking RSA key")
if e != None:
pwn.log.waitfor("Trying Wiener's attack")
res = wieners_attack(n,e)
if res != None:
pwn.log.succeeded("success!")
pwn.log.success("Factors: %d %d" % res)
return
else:
pwn.log.failed()
# Factor
pwn.log.waitfor("Trying to factor...")
res = factor(n)
if res != None:
p, q = res
pwn.log.succeeded("success!")
pwn.log.success("Factors: %d %d" % (p, q))
if e != None:
d = calculate_private_key(p,q,e)
pwn.log.success("d = %d" % d)
if c != None:
pwn.log.info("Possible message: %s" % int2bytes(decrypt(c,d,n)))
return
else:
pwn.log.failed("failed")
| true | true |
1c361c888a0ba57c935a8615bcc29c5859ff43b9 | 677 | py | Python | Careers/migrations/0002_auto_20201011_0044.py | CiganOliviu/cigan_enterprize | 8a4f4c06197655622ca08f92ec793add7d0be0cd | [
"Apache-2.0"
] | null | null | null | Careers/migrations/0002_auto_20201011_0044.py | CiganOliviu/cigan_enterprize | 8a4f4c06197655622ca08f92ec793add7d0be0cd | [
"Apache-2.0"
] | null | null | null | Careers/migrations/0002_auto_20201011_0044.py | CiganOliviu/cigan_enterprize | 8a4f4c06197655622ca08f92ec793add7d0be0cd | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.8 on 2020-10-10 21:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Careers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='availableinternship',
name='representation_image',
field=models.ImageField(default='default.jpg', upload_to='internship_representation_images/'),
),
migrations.AddField(
model_name='availablejob',
name='representation_image',
field=models.ImageField(default='default.jpg', upload_to='jobs_representation_images/'),
),
]
| 28.208333 | 106 | 0.633678 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Careers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='availableinternship',
name='representation_image',
field=models.ImageField(default='default.jpg', upload_to='internship_representation_images/'),
),
migrations.AddField(
model_name='availablejob',
name='representation_image',
field=models.ImageField(default='default.jpg', upload_to='jobs_representation_images/'),
),
]
| true | true |
1c361c8caf2b4b2ddae381e8797de479157bf22a | 2,163 | py | Python | pinyin/word_2.py | JohndeVostok/THU-Artificial-Intelligent | c721274f7176b1eab35dc467678551982bf43409 | [
"MIT"
] | null | null | null | pinyin/word_2.py | JohndeVostok/THU-Artificial-Intelligent | c721274f7176b1eab35dc467678551982bf43409 | [
"MIT"
] | null | null | null | pinyin/word_2.py | JohndeVostok/THU-Artificial-Intelligent | c721274f7176b1eab35dc467678551982bf43409 | [
"MIT"
] | null | null | null | import json
import math
import sys
a1 = 0.001
a2 = 0.009
b2 = 0.99
def getP(term):
v0 = charDict[term[0]]
v1 = charDict[term[1]]
p1 = 1
h = str(v1)
if h in term1Cnt:
p1 += term1Cnt[h]
p2 = 1
h = str(v0 * n + v1)
if h in term2Cnt:
p2 += term2Cnt[h]
p3 = 1
if term in wordCnt:
p3 += wordCnt[term]
return a1 * p1 + a2 * p2 + b2 * p3
if __name__ == "__main__":
with open("term_dict.json", "r") as f:
termDict = json.load(f)
with open("char_dict.json", "r") as f:
charDict = json.load(f)
with open("pair_cnt.json", "r") as f:
pairCnt = json.load(f)
with open("term1_cnt.json", "r") as f:
term1Cnt = json.load(f)
with open("term2_cnt.json", "r") as f:
term2Cnt = json.load(f)
with open("term2_word_cnt.json", "r") as f:
wordCnt = json.load(f)
# with open(sys.argv[1], "r") as f:
# lines = f.readlines()
with open("test_input.txt", "r") as f:
lines = f.readlines()[::2]
n = len(charDict)
charDict["*"] = n
n += 1
for line in lines:
terms = line.strip().split()
flag = False
for term in terms:
if term not in termDict:
flag = True
print("invalid term.")
break
if flag:
continue
dp0 = {"**": ["", 1]}
for i in range(len(terms)):
pinyin = terms[i]
dp1 = {}
mx = -1e9
for term in dp0:
tmp = dp0[term]
for char in termDict[pinyin]:
newTerm = term[1:] + char
if newTerm not in dp1:
dp1[newTerm] = ["", -1e9]
p = getP(newTerm)
if tmp[1] * p > dp1[newTerm][1]:
dp1[newTerm][0] = tmp[0] + char
dp1[newTerm][1] = tmp[1] * p
dp0 = dp1
mx = -1e9
res = ""
for term in dp1:
if dp1[term][1] > mx:
mx = dp1[term][1]
res = dp1[term][0]
print(res)
| 26.060241 | 55 | 0.443828 | import json
import math
import sys
a1 = 0.001
a2 = 0.009
b2 = 0.99
def getP(term):
v0 = charDict[term[0]]
v1 = charDict[term[1]]
p1 = 1
h = str(v1)
if h in term1Cnt:
p1 += term1Cnt[h]
p2 = 1
h = str(v0 * n + v1)
if h in term2Cnt:
p2 += term2Cnt[h]
p3 = 1
if term in wordCnt:
p3 += wordCnt[term]
return a1 * p1 + a2 * p2 + b2 * p3
if __name__ == "__main__":
with open("term_dict.json", "r") as f:
termDict = json.load(f)
with open("char_dict.json", "r") as f:
charDict = json.load(f)
with open("pair_cnt.json", "r") as f:
pairCnt = json.load(f)
with open("term1_cnt.json", "r") as f:
term1Cnt = json.load(f)
with open("term2_cnt.json", "r") as f:
term2Cnt = json.load(f)
with open("term2_word_cnt.json", "r") as f:
wordCnt = json.load(f)
with open("test_input.txt", "r") as f:
lines = f.readlines()[::2]
n = len(charDict)
charDict["*"] = n
n += 1
for line in lines:
terms = line.strip().split()
flag = False
for term in terms:
if term not in termDict:
flag = True
print("invalid term.")
break
if flag:
continue
dp0 = {"**": ["", 1]}
for i in range(len(terms)):
pinyin = terms[i]
dp1 = {}
mx = -1e9
for term in dp0:
tmp = dp0[term]
for char in termDict[pinyin]:
newTerm = term[1:] + char
if newTerm not in dp1:
dp1[newTerm] = ["", -1e9]
p = getP(newTerm)
if tmp[1] * p > dp1[newTerm][1]:
dp1[newTerm][0] = tmp[0] + char
dp1[newTerm][1] = tmp[1] * p
dp0 = dp1
mx = -1e9
res = ""
for term in dp1:
if dp1[term][1] > mx:
mx = dp1[term][1]
res = dp1[term][0]
print(res)
| true | true |
1c361d5cd19b6aed918426fa3fcaf54237bf462b | 415 | py | Python | server/executor/datasets/mnist.py | wkcn/NNBaby | f0816ffa6faa7deb1116495a8bcbfe8f57413c7d | [
"MIT"
] | 3 | 2018-04-03T09:27:36.000Z | 2021-04-01T13:22:48.000Z | server/executor/datasets/mnist.py | wkcn/NNBaby | f0816ffa6faa7deb1116495a8bcbfe8f57413c7d | [
"MIT"
] | null | null | null | server/executor/datasets/mnist.py | wkcn/NNBaby | f0816ffa6faa7deb1116495a8bcbfe8f57413c7d | [
"MIT"
] | 3 | 2018-04-03T10:05:39.000Z | 2021-08-15T04:58:46.000Z | import keras
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
rows, cols = 28, 28
num_classes = 10
# NHWC
x_train = x_train.reshape((x_train.shape[0], rows, cols, 1)) / 255.0
x_test = x_test.reshape((x_test.shape[0], rows, cols, 1)) / 255.0
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes) | 41.5 | 68 | 0.749398 | import keras
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
rows, cols = 28, 28
num_classes = 10
x_train = x_train.reshape((x_train.shape[0], rows, cols, 1)) / 255.0
x_test = x_test.reshape((x_test.shape[0], rows, cols, 1)) / 255.0
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes) | true | true |
1c361de296a4c59d4fa243c558d65acd10f37dbc | 5,913 | py | Python | tests/api/utils/test_alembic_util.py | hayesgb/mlrun | 9a8b469b37d7d74f2d04dc956b2966f88fe4e890 | [
"Apache-2.0"
] | 1 | 2021-02-17T08:12:33.000Z | 2021-02-17T08:12:33.000Z | tests/api/utils/test_alembic_util.py | hayesgb/mlrun | 9a8b469b37d7d74f2d04dc956b2966f88fe4e890 | [
"Apache-2.0"
] | null | null | null | tests/api/utils/test_alembic_util.py | hayesgb/mlrun | 9a8b469b37d7d74f2d04dc956b2966f88fe4e890 | [
"Apache-2.0"
] | 1 | 2021-08-30T21:43:38.000Z | 2021-08-30T21:43:38.000Z | import os.path
import pathlib
import shutil
import typing
import unittest.mock
import alembic
import alembic.config
import pytest
import mlrun.api.utils.db.alembic
from mlrun import mlconf
class Constants(object):
revision_history = ["revision2", "revision1"]
initial_revision = "revision1"
latest_revision = "revision2"
unknown_revision = "revision3"
def test_no_database_exists(mock_alembic, mock_database, mock_shutil_copy):
mock_database(db_file_exists=False)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_not_called()
def test_database_exists_no_revision(mock_alembic, mock_database, mock_shutil_copy):
mock_database()
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_not_called()
def test_database_exists_known_revision(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name
):
mock_database(current_revision=Constants.initial_revision)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_called_once_with(
mock_db_file_name, pathlib.Path(f"{Constants.initial_revision}.db")
)
def test_database_exists_unknown_revision_successful_downgrade(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name
):
mock_database(current_revision=Constants.unknown_revision)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
copy_calls = [
# first copy - backup the current database before downgrading
unittest.mock.call(
mock_db_file_name, pathlib.Path(f"{Constants.unknown_revision}.db")
),
# second copy - to downgrade to the old db file
unittest.mock.call(
pathlib.Path(f"{Constants.latest_revision}.db"), mock_db_file_name
),
# third copy - to back up the db file. In a real scenario the backup would be {latest_revision}.db
# as the revision should change during the last copy, but changing a mock during the init_alembic function
# is cumbersome and might make the test unreadable - so the current revision stays unknown_revision.
unittest.mock.call(
mock_db_file_name, pathlib.Path(f"{Constants.unknown_revision}.db")
),
]
mock_shutil_copy.assert_has_calls(copy_calls)
def test_database_exists_unknown_revision_failed_downgrade(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name
):
mock_database(
current_revision=Constants.unknown_revision, db_backup_exists=False,
)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
with pytest.raises(
RuntimeError,
match=f"Cannot fall back to revision {Constants.latest_revision}, "
f"no back up exists. Current revision: {Constants.unknown_revision}",
):
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == []
mock_shutil_copy.assert_not_called()
@pytest.fixture()
def mock_database(
monkeypatch, mock_alembic, mock_db_file_name
) -> typing.Callable[[typing.List[str], str, bool, bool], None]:
def _mock_database(
revision_history: typing.List[str] = None,
current_revision: str = "",
db_file_exists: bool = True,
db_backup_exists: bool = True,
):
revision_history = revision_history or Constants.revision_history
def _db_file_exists(file_name: str) -> bool:
if file_name == mock_db_file_name:
return db_file_exists
else:
return db_backup_exists
monkeypatch.setattr(os.path, "isfile", _db_file_exists)
def _current_revision(alembic_config: typing.Any):
if current_revision != "" and current_revision not in revision_history:
raise Exception(
f"Can't locate revision identified by '{current_revision}'"
)
alembic_config.print_stdout(current_revision)
mock_alembic.current = _current_revision
def _revision_history(alembic_config: typing.Any):
for revision in revision_history:
alembic_config.print_stdout(f"none -> {revision}, revision name")
mock_alembic.history = _revision_history
return _mock_database
@pytest.fixture()
def mock_db_file_name(monkeypatch) -> str:
db_file_name = "test.db"
monkeypatch.setattr(mlconf.httpdb, "dsn", db_file_name)
return db_file_name
@pytest.fixture()
def mock_shutil_copy(monkeypatch) -> unittest.mock.Mock:
copy = unittest.mock.Mock()
monkeypatch.setattr(shutil, "copy2", copy)
return copy
class MockAlembicCommand(object):
def __init__(self):
self.stamp_calls = []
self.upgrade_calls = []
def stamp(self, alembic_config: typing.Any, revision: str):
self.stamp_calls.append(revision)
def upgrade(self, alembic_config: typing.Any, revision: str):
self.upgrade_calls.append(revision)
@pytest.fixture()
def mock_alembic(monkeypatch) -> MockAlembicCommand:
mocked_alembic_command = MockAlembicCommand()
monkeypatch.setattr(alembic, "command", mocked_alembic_command)
monkeypatch.setattr(alembic.config, "Config", unittest.mock.Mock())
return mocked_alembic_command
| 34.782353 | 114 | 0.718079 | import os.path
import pathlib
import shutil
import typing
import unittest.mock
import alembic
import alembic.config
import pytest
import mlrun.api.utils.db.alembic
from mlrun import mlconf
class Constants(object):
revision_history = ["revision2", "revision1"]
initial_revision = "revision1"
latest_revision = "revision2"
unknown_revision = "revision3"
def test_no_database_exists(mock_alembic, mock_database, mock_shutil_copy):
mock_database(db_file_exists=False)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_not_called()
def test_database_exists_no_revision(mock_alembic, mock_database, mock_shutil_copy):
mock_database()
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_not_called()
def test_database_exists_known_revision(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name
):
mock_database(current_revision=Constants.initial_revision)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
mock_shutil_copy.assert_called_once_with(
mock_db_file_name, pathlib.Path(f"{Constants.initial_revision}.db")
)
def test_database_exists_unknown_revision_successful_downgrade(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name
):
mock_database(current_revision=Constants.unknown_revision)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == ["head"]
copy_calls = [
unittest.mock.call(
mock_db_file_name, pathlib.Path(f"{Constants.unknown_revision}.db")
),
unittest.mock.call(
pathlib.Path(f"{Constants.latest_revision}.db"), mock_db_file_name
),
unittest.mock.call(
mock_db_file_name, pathlib.Path(f"{Constants.unknown_revision}.db")
),
]
mock_shutil_copy.assert_has_calls(copy_calls)
def test_database_exists_unknown_revision_failed_downgrade(
mock_alembic, mock_database, mock_shutil_copy, mock_db_file_name
):
mock_database(
current_revision=Constants.unknown_revision, db_backup_exists=False,
)
alembic_util = mlrun.api.utils.db.alembic.AlembicUtil(pathlib.Path(""))
with pytest.raises(
RuntimeError,
match=f"Cannot fall back to revision {Constants.latest_revision}, "
f"no back up exists. Current revision: {Constants.unknown_revision}",
):
alembic_util.init_alembic(use_backups=True)
assert mock_alembic.stamp_calls == []
assert mock_alembic.upgrade_calls == []
mock_shutil_copy.assert_not_called()
@pytest.fixture()
def mock_database(
monkeypatch, mock_alembic, mock_db_file_name
) -> typing.Callable[[typing.List[str], str, bool, bool], None]:
def _mock_database(
revision_history: typing.List[str] = None,
current_revision: str = "",
db_file_exists: bool = True,
db_backup_exists: bool = True,
):
revision_history = revision_history or Constants.revision_history
def _db_file_exists(file_name: str) -> bool:
if file_name == mock_db_file_name:
return db_file_exists
else:
return db_backup_exists
monkeypatch.setattr(os.path, "isfile", _db_file_exists)
def _current_revision(alembic_config: typing.Any):
if current_revision != "" and current_revision not in revision_history:
raise Exception(
f"Can't locate revision identified by '{current_revision}'"
)
alembic_config.print_stdout(current_revision)
mock_alembic.current = _current_revision
def _revision_history(alembic_config: typing.Any):
for revision in revision_history:
alembic_config.print_stdout(f"none -> {revision}, revision name")
mock_alembic.history = _revision_history
return _mock_database
@pytest.fixture()
def mock_db_file_name(monkeypatch) -> str:
db_file_name = "test.db"
monkeypatch.setattr(mlconf.httpdb, "dsn", db_file_name)
return db_file_name
@pytest.fixture()
def mock_shutil_copy(monkeypatch) -> unittest.mock.Mock:
copy = unittest.mock.Mock()
monkeypatch.setattr(shutil, "copy2", copy)
return copy
class MockAlembicCommand(object):
def __init__(self):
self.stamp_calls = []
self.upgrade_calls = []
def stamp(self, alembic_config: typing.Any, revision: str):
self.stamp_calls.append(revision)
def upgrade(self, alembic_config: typing.Any, revision: str):
self.upgrade_calls.append(revision)
@pytest.fixture()
def mock_alembic(monkeypatch) -> MockAlembicCommand:
mocked_alembic_command = MockAlembicCommand()
monkeypatch.setattr(alembic, "command", mocked_alembic_command)
monkeypatch.setattr(alembic.config, "Config", unittest.mock.Mock())
return mocked_alembic_command
| true | true |
1c3620597ea5f219fef8429209a315a374c1297b | 44,777 | py | Python | python/pyarrow/tests/test_convert_builtin.py | stspyder/arrow | 16b2a44be2b71bc1a7c95df70795664b4d450b6d | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_convert_builtin.py | stspyder/arrow | 16b2a44be2b71bc1a7c95df70795664b4d450b6d | [
"Apache-2.0"
] | 6 | 2020-07-01T20:18:37.000Z | 2021-01-07T16:22:13.000Z | python/pyarrow/tests/test_convert_builtin.py | stspyder/arrow | 16b2a44be2b71bc1a7c95df70795664b4d450b6d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from pyarrow.pandas_compat import _pandas_api # noqa
import pyarrow as pa
import collections
import datetime
import decimal
import itertools
import math
import traceback
import numpy as np
import pytz
int_type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64())]
np_int_types, _ = zip(*int_type_pairs)
class StrangeIterable:
def __init__(self, lst):
self.lst = lst
def __iter__(self):
return self.lst.__iter__()
class MyInt:
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
class MyBrokenInt:
def __int__(self):
1/0 # MARKER
def check_struct_type(ty, expected):
"""
Check a struct type is as expected, but not taking order into account.
"""
assert pa.types.is_struct(ty)
assert set(ty) == set(expected)
def test_iterable_types():
arr1 = pa.array(StrangeIterable([0, 1, 2, 3]))
arr2 = pa.array((0, 1, 2, 3))
assert arr1.equals(arr2)
def test_empty_iterable():
arr = pa.array(StrangeIterable([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_limited_iterator_types():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_limited_iterator_size_overflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2)
arr2 = pa.array((0, 1))
assert arr1.equals(arr2)
def test_limited_iterator_size_underflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_iterator_without_size():
expected = pa.array((0, 1, 2))
arr1 = pa.array(iter(range(3)))
assert arr1.equals(expected)
# Same with explicit type
arr1 = pa.array(iter(range(3)), type=pa.int64())
assert arr1.equals(expected)
def test_infinite_iterator():
expected = pa.array((0, 1, 2))
arr1 = pa.array(itertools.count(0), size=3)
assert arr1.equals(expected)
# Same with explicit type
arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
assert arr1.equals(expected)
def _as_list(xs):
return xs
def _as_tuple(xs):
return tuple(xs)
def _as_deque(xs):
# deque is a sequence while neither tuple nor list
return collections.deque(xs)
def _as_dict_values(xs):
# a dict values object is not a sequence, just a regular iterable
dct = {k: v for k, v in enumerate(xs)}
return dct.values()
def _as_numpy_array(xs):
arr = np.empty(len(xs), dtype=object)
arr[:] = xs
return arr
parametrize_with_iterable_types = pytest.mark.parametrize(
"seq", [_as_list, _as_tuple, _as_deque, _as_dict_values, _as_numpy_array])
@parametrize_with_iterable_types
def test_sequence_types(seq):
arr1 = pa.array(seq([1, 2, 3]))
arr2 = pa.array([1, 2, 3])
assert arr1.equals(arr2)
@parametrize_with_iterable_types
def test_sequence_boolean(seq):
expected = [True, None, False, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.bool_()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_numpy_boolean(seq):
expected = [np.bool(True), None, np.bool(False), None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.bool_()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_empty_list(seq):
arr = pa.array(seq([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
@parametrize_with_iterable_types
def test_nested_lists(seq):
data = [[], [1, 2], None]
arr = pa.array(seq(data))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
# With explicit type
arr = pa.array(seq(data), type=pa.list_(pa.int32()))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int32())
assert arr.to_pylist() == data
@parametrize_with_iterable_types
def test_nested_large_lists(seq):
data = [[], [1, 2], None]
arr = pa.array(seq(data), type=pa.large_list(pa.int16()))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.large_list(pa.int16())
assert arr.to_pylist() == data
@parametrize_with_iterable_types
def test_list_with_non_list(seq):
# List types don't accept non-sequences
with pytest.raises(TypeError):
pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64()))
with pytest.raises(TypeError):
pa.array(seq([[], [1, 2], 3]), type=pa.large_list(pa.int64()))
@parametrize_with_iterable_types
def test_nested_arrays(seq):
arr = pa.array(seq([np.array([], dtype=np.int64),
np.array([1, 2], dtype=np.int64), None]))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == [[], [1, 2], None]
@parametrize_with_iterable_types
def test_nested_fixed_size_list(seq):
# sequence of lists
data = [[1, 2], [3, None], None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 2)
assert arr.to_pylist() == data
# sequence of numpy arrays
data = [np.array([1, 2], dtype='int64'), np.array([3, 4], dtype='int64'),
None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 2)
assert arr.to_pylist() == [[1, 2], [3, 4], None]
# incorrect length of the lists or arrays
data = [[1, 2, 4], [3, None], None]
for data in [[[1, 2, 3]], [np.array([1, 2, 4], dtype='int64')]]:
with pytest.raises(
ValueError, match="Length of item not correct: expected 2"):
pa.array(seq(data), type=pa.list_(pa.int64(), 2))
# with list size of 0
data = [[], [], None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 0))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 0)
assert arr.to_pylist() == [[], [], None]
@parametrize_with_iterable_types
def test_sequence_all_none(seq):
arr = pa.array(seq([None, None]))
assert len(arr) == 2
assert arr.null_count == 2
assert arr.type == pa.null()
assert arr.to_pylist() == [None, None]
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [1, None, 3, None,
np.iinfo(np_scalar).min, np.iinfo(np_scalar).max]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([np.nan]), type=pa_type, from_pandas=False)
arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True)
expected = [None]
assert len(arr) == 1
assert arr.null_count == 1
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False)
arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True)
expected = [[None]]
assert len(arr) == 1
assert arr.null_count == 0
assert arr.type == pa.list_(pa_type)
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_integer_inferred(seq):
expected = [1, None, 3, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.int64()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None,
np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None]
expected += [np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
arr = pa.array(seq(expected))
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_custom_integers(seq):
expected = [0, 42, 2**33 + 1, -2**63]
data = list(map(MyInt, expected))
arr = pa.array(seq(data), type=pa.int64())
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_broken_integers(seq):
data = [MyBrokenInt()]
with pytest.raises(ZeroDivisionError) as exc_info:
pa.array(seq(data), type=pa.int64())
# Original traceback is kept
tb_lines = traceback.format_tb(exc_info.tb)
assert "# MARKER" in tb_lines[-1]
def test_numpy_scalars_mixed_type():
# ARROW-4324
data = [np.int32(10), np.float32(0.5)]
arr = pa.array(data)
expected = pa.array([10, 0.5], type='float64')
assert arr.equals(expected)
@pytest.mark.xfail(reason="Type inference for uint64 not implemented",
raises=OverflowError)
def test_uint64_max_convert():
data = [0, np.iinfo(np.uint64).max]
arr = pa.array(data, type=pa.uint64())
expected = pa.array(np.array(data, dtype='uint64'))
assert arr.equals(expected)
arr_inferred = pa.array(data)
assert arr_inferred.equals(expected)
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_signed_integer_overflow(bits):
ty = getattr(pa, "int%d" % bits)()
# XXX ideally would always raise OverflowError
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([2 ** (bits - 1)], ty)
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([-2 ** (bits - 1) - 1], ty)
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_unsigned_integer_overflow(bits):
ty = getattr(pa, "uint%d" % bits)()
# XXX ideally would always raise OverflowError
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([2 ** bits], ty)
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([-1], ty)
def test_convert_with_mask():
data = [1, 2, 3, 4, 5]
mask = np.array([False, True, False, False, True])
result = pa.array(data, mask=mask)
expected = pa.array([1, None, 3, 4, None])
assert result.equals(expected)
# Mask wrong length
with pytest.raises(ValueError):
pa.array(data, mask=mask[1:])
def test_garbage_collection():
import gc
# Force the cyclic garbage collector to run
gc.collect()
bytes_before = pa.total_allocated_bytes()
pa.array([1, None, 3, None])
gc.collect()
assert pa.total_allocated_bytes() == bytes_before
def test_sequence_double():
data = [1.5, 1., None, 2.5, None, None]
arr = pa.array(data)
assert len(arr) == 6
assert arr.null_count == 3
assert arr.type == pa.float64()
assert arr.to_pylist() == data
def test_double_auto_coerce_from_integer():
# Done as part of ARROW-2814
data = [1.5, 1., None, 2.5, None, None]
arr = pa.array(data)
data2 = [1.5, 1, None, 2.5, None, None]
arr2 = pa.array(data2)
assert arr.equals(arr2)
data3 = [1, 1.5, None, 2.5, None, None]
arr3 = pa.array(data3)
data4 = [1., 1.5, None, 2.5, None, None]
arr4 = pa.array(data4)
assert arr3.equals(arr4)
def test_double_integer_coerce_representable_range():
valid_values = [1.5, 1, 2, None, 1 << 53, -(1 << 53)]
invalid_values = [1.5, 1, 2, None, (1 << 53) + 1]
invalid_values2 = [1.5, 1, 2, None, -((1 << 53) + 1)]
# it works
pa.array(valid_values)
# it fails
with pytest.raises(ValueError):
pa.array(invalid_values)
with pytest.raises(ValueError):
pa.array(invalid_values2)
def test_float32_integer_coerce_representable_range():
f32 = np.float32
valid_values = [f32(1.5), 1 << 24, -(1 << 24)]
invalid_values = [f32(1.5), (1 << 24) + 1]
invalid_values2 = [f32(1.5), -((1 << 24) + 1)]
# it works
pa.array(valid_values, type=pa.float32())
# it fails
with pytest.raises(ValueError):
pa.array(invalid_values, type=pa.float32())
with pytest.raises(ValueError):
pa.array(invalid_values2, type=pa.float32())
def test_mixed_sequence_errors():
with pytest.raises(ValueError, match="tried to convert to boolean"):
pa.array([True, 'foo'], type=pa.bool_())
with pytest.raises(ValueError, match="tried to convert to float32"):
pa.array([1.5, 'foo'], type=pa.float32())
with pytest.raises(ValueError, match="tried to convert to double"):
pa.array([1.5, 'foo'])
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar,pa_type", [
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64())
])
@pytest.mark.parametrize("from_pandas", [True, False])
def test_sequence_numpy_double(seq, np_scalar, pa_type, from_pandas):
data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan]
arr = pa.array(seq(data), from_pandas=from_pandas)
assert len(arr) == 6
if from_pandas:
assert arr.null_count == 3
else:
assert arr.null_count == 2
if from_pandas:
# The NaN is skipped in type inference, otherwise it forces a
# float64 promotion
assert arr.type == pa_type
else:
assert arr.type == pa.float64()
assert arr.to_pylist()[:4] == data[:4]
if from_pandas:
assert arr.to_pylist()[5] is None
else:
assert np.isnan(arr.to_pylist()[5])
@pytest.mark.parametrize("from_pandas", [True, False])
@pytest.mark.parametrize("inner_seq", [np.array, list])
def test_ndarray_nested_numpy_double(from_pandas, inner_seq):
# ARROW-2806
data = np.array([
inner_seq([1., 2.]),
inner_seq([1., 2., 3.]),
inner_seq([np.nan]),
None
])
arr = pa.array(data, from_pandas=from_pandas)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.list_(pa.float64())
if from_pandas:
assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None]
else:
np.testing.assert_equal(arr.to_pylist(),
[[1., 2.], [1., 2., 3.], [np.nan], None])
def test_nested_ndarray_in_object_array():
# ARROW-4350
arr = np.empty(2, dtype=object)
arr[:] = [np.array([1, 2], dtype=np.int64),
np.array([2, 3], dtype=np.int64)]
arr2 = np.empty(2, dtype=object)
arr2[0] = [3, 4]
arr2[1] = [5, 6]
expected_type = pa.list_(pa.list_(pa.int64()))
assert pa.infer_type([arr]) == expected_type
result = pa.array([arr, arr2])
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
type=expected_type)
assert result.equals(expected)
# test case for len-1 arrays to ensure they are interpreted as
# sublists and not scalars
arr = np.empty(2, dtype=object)
arr[:] = [np.array([1]), np.array([2])]
result = pa.array([arr, arr])
assert result.to_pylist() == [[[1], [2]], [[1], [2]]]
@pytest.mark.xfail(reason=("Type inference for multidimensional ndarray "
"not yet implemented"),
raises=AssertionError)
def test_multidimensional_ndarray_as_nested_list():
# TODO(wesm): see ARROW-5645
arr = np.array([[1, 2], [2, 3]], dtype=np.int64)
arr2 = np.array([[3, 4], [5, 6]], dtype=np.int64)
expected_type = pa.list_(pa.list_(pa.int64()))
assert pa.infer_type([arr]) == expected_type
result = pa.array([arr, arr2])
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
type=expected_type)
assert result.equals(expected)
def test_array_ignore_nan_from_pandas():
# See ARROW-4324, this reverts logic that was introduced in
# ARROW-2240
with pytest.raises(ValueError):
pa.array([np.nan, 'str'])
arr = pa.array([np.nan, 'str'], from_pandas=True)
expected = pa.array([None, 'str'])
assert arr.equals(expected)
def test_nested_ndarray_different_dtypes():
data = [
np.array([1, 2, 3], dtype='int64'),
None,
np.array([4, 5, 6], dtype='uint32')
]
arr = pa.array(data)
expected = pa.array([[1, 2, 3], None, [4, 5, 6]],
type=pa.list_(pa.int64()))
assert arr.equals(expected)
t2 = pa.list_(pa.uint32())
arr2 = pa.array(data, type=t2)
expected2 = expected.cast(t2)
assert arr2.equals(expected2)
def test_sequence_unicode():
data = ['foo', 'bar', None, 'mañana']
arr = pa.array(data)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.string()
assert arr.to_pylist() == data
def check_array_mixed_unicode_bytes(binary_type, string_type):
values = ['qux', b'foo', bytearray(b'barz')]
b_values = [b'qux', b'foo', b'barz']
u_values = ['qux', 'foo', 'barz']
arr = pa.array(values)
expected = pa.array(b_values, type=pa.binary())
assert arr.type == pa.binary()
assert arr.equals(expected)
arr = pa.array(values, type=binary_type)
expected = pa.array(b_values, type=binary_type)
assert arr.type == binary_type
assert arr.equals(expected)
arr = pa.array(values, type=string_type)
expected = pa.array(u_values, type=string_type)
assert arr.type == string_type
assert arr.equals(expected)
def test_array_mixed_unicode_bytes():
check_array_mixed_unicode_bytes(pa.binary(), pa.string())
check_array_mixed_unicode_bytes(pa.large_binary(), pa.large_string())
@pytest.mark.large_memory
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
def test_large_binary_array(ty):
# Construct a large binary array with more than 4GB of data
s = b"0123456789abcdefghijklmnopqrstuvwxyz" * 10
nrepeats = math.ceil((2**32 + 5) / len(s))
data = [s] * nrepeats
arr = pa.array(data, type=ty)
assert isinstance(arr, pa.Array)
assert arr.type == ty
assert len(arr) == nrepeats
@pytest.mark.large_memory
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
def test_large_binary_value(ty):
# Construct a large binary array with a single value larger than 4GB
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
nrepeats = math.ceil((2**32 + 5) / len(s))
arr = pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
assert isinstance(arr, pa.Array)
assert arr.type == ty
assert len(arr) == 4
buf = arr[1].as_buffer()
assert len(buf) == len(s) * nrepeats
def test_sequence_bytes():
u1 = b'ma\xc3\xb1ana'
data = [b'foo',
memoryview(b'dada'),
memoryview(b'd-a-t-a')[::2], # non-contiguous is made contiguous
u1.decode('utf-8'), # unicode gets encoded,
bytearray(b'bar'),
None]
for ty in [None, pa.binary(), pa.large_binary()]:
arr = pa.array(data, type=ty)
assert len(arr) == 6
assert arr.null_count == 1
assert arr.type == ty or pa.binary()
assert arr.to_pylist() == [b'foo', b'dada', b'data', u1, b'bar', None]
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string()])
def test_sequence_utf8_to_unicode(ty):
# ARROW-1225
data = [b'foo', None, b'bar']
arr = pa.array(data, type=ty)
assert arr.type == ty
assert arr[0].as_py() == 'foo'
# test a non-utf8 unicode string
val = ('mañana').encode('utf-16-le')
with pytest.raises(pa.ArrowInvalid):
pa.array([val], type=ty)
def test_sequence_fixed_size_bytes():
data = [b'foof', None, bytearray(b'barb'), b'2346']
arr = pa.array(data, type=pa.binary(4))
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.binary(4)
assert arr.to_pylist() == [b'foof', None, b'barb', b'2346']
def test_fixed_size_bytes_does_not_accept_varying_lengths():
data = [b'foo', None, b'barb', b'2346']
with pytest.raises(pa.ArrowInvalid):
pa.array(data, type=pa.binary(4))
def test_sequence_date():
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
datetime.date(2040, 2, 26)]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.date32()
assert arr.null_count == 1
assert arr[0].as_py() == datetime.date(2000, 1, 1)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.date(1970, 1, 1)
assert arr[3].as_py() == datetime.date(2040, 2, 26)
@pytest.mark.parametrize('input',
[(pa.date32(), [10957, None]),
(pa.date64(), [10957 * 86400000, None])])
def test_sequence_explicit_types(input):
t, ex_values = input
data = [datetime.date(2000, 1, 1), None]
arr = pa.array(data, type=t)
arr2 = pa.array(ex_values, type=t)
for x in [arr, arr2]:
assert len(x) == 2
assert x.type == t
assert x.null_count == 1
assert x[0].as_py() == datetime.date(2000, 1, 1)
assert x[1] is pa.NA
def test_date32_overflow():
# Overflow
data3 = [2**32, None]
with pytest.raises((OverflowError, pa.ArrowException)):
pa.array(data3, type=pa.date32())
def test_sequence_timestamp():
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
None,
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
def test_sequence_numpy_timestamp():
data = [
np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)),
None,
np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)),
np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699))
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
def test_sequence_timestamp_with_unit():
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 0)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
class MyDate(datetime.date):
pass
class MyDatetime(datetime.datetime):
pass
class MyTimedelta(datetime.timedelta):
pass
def test_datetime_subclassing():
data = [
MyDate(2007, 7, 13),
]
date_type = pa.date32()
arr_date = pa.array(data, type=date_type)
assert len(arr_date) == 1
assert arr_date.type == date_type
assert arr_date[0].as_py() == datetime.date(2007, 7, 13)
data = [
MyDatetime(2007, 7, 13, 1, 23, 34, 123456),
]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 0)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
data = [
MyTimedelta(123, 456, 1002),
]
s = pa.duration('s')
ms = pa.duration('ms')
us = pa.duration('us')
arr_s = pa.array(data)
assert len(arr_s) == 1
assert arr_s.type == us
assert arr_s[0].as_py() == datetime.timedelta(123, 456, 1002)
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.timedelta(123, 456)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.timedelta(123, 456, 1000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.timedelta(123, 456, 1002)
@pytest.mark.xfail(not _pandas_api.have_pandas,
reason="pandas required for nanosecond conversion")
def test_sequence_timestamp_nanoseconds():
inputs = [
[datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)],
[MyDatetime(2007, 7, 13, 1, 23, 34, 123456)]
]
for data in inputs:
ns = pa.timestamp('ns')
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
@pytest.mark.pandas
def test_sequence_timestamp_from_int_with_unit():
# TODO(wesm): This test might be rewritten to assert the actual behavior
# when pandas is not installed
data = [1]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
ns = pa.timestamp('ns')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert repr(arr_s[0]) == "datetime.datetime(1970, 1, 1, 0, 0, 1)"
assert str(arr_s[0]) == "1970-01-01 00:00:01"
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert repr(arr_ms[0]) == "datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)"
assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000"
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert repr(arr_us[0]) == "datetime.datetime(1970, 1, 1, 0, 0, 0, 1)"
assert str(arr_us[0]) == "1970-01-01 00:00:00.000001"
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert repr(arr_ns[0]) == "Timestamp('1970-01-01 00:00:00.000000001')"
assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001"
expected_exc = TypeError
class CustomClass():
pass
for ty in [ns, pa.date32(), pa.date64()]:
with pytest.raises(expected_exc):
pa.array([1, CustomClass()], type=ty)
@pytest.mark.parametrize('np_scalar', [True, False])
def test_sequence_duration(np_scalar):
td1 = datetime.timedelta(2, 3601, 1)
td2 = datetime.timedelta(1, 100, 1000)
if np_scalar:
data = [np.timedelta64(td1), None, np.timedelta64(td2)]
else:
data = [td1, None, td2]
arr = pa.array(data)
assert len(arr) == 3
assert arr.type == pa.duration('us')
assert arr.null_count == 1
assert arr[0].as_py() == td1
assert arr[1].as_py() is None
assert arr[2].as_py() == td2
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_sequence_duration_with_unit(unit):
data = [
datetime.timedelta(3, 22, 1001),
]
expected = {'s': datetime.timedelta(3, 22),
'ms': datetime.timedelta(3, 22, 1000),
'us': datetime.timedelta(3, 22, 1001),
'ns': datetime.timedelta(3, 22, 1001)}
ty = pa.duration(unit)
arr_s = pa.array(data, type=ty)
assert len(arr_s) == 1
assert arr_s.type == ty
assert arr_s[0].as_py() == expected[unit]
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_sequence_duration_from_int_with_unit(unit):
data = [5]
ty = pa.duration(unit)
arr = pa.array(data, type=ty)
assert len(arr) == 1
assert arr.type == ty
assert arr[0].value == 5
def test_sequence_duration_nested_lists():
td1 = datetime.timedelta(1, 1, 1000)
td2 = datetime.timedelta(1, 100)
data = [[td1, None], [td1, td2]]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == data
arr = pa.array(data, type=pa.list_(pa.duration('ms')))
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('ms'))
assert arr.to_pylist() == data
def test_sequence_duration_nested_lists_numpy():
td1 = datetime.timedelta(1, 1, 1000)
td2 = datetime.timedelta(1, 100)
data = [[np.timedelta64(td1), None],
[np.timedelta64(td1), np.timedelta64(td2)]]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == [[td1, None], [td1, td2]]
data = [np.array([np.timedelta64(td1), None], dtype='timedelta64[us]'),
np.array([np.timedelta64(td1), np.timedelta64(td2)])]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == [[td1, None], [td1, td2]]
def test_sequence_nesting_levels():
data = [1, 2, None]
arr = pa.array(data)
assert arr.type == pa.int64()
assert arr.to_pylist() == data
data = [[1], [2], None]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [[1], [2, 3, 4], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [None, [[None, 1]], [[2, 3, 4], None], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.list_(pa.int64()))
assert arr.to_pylist() == data
exceptions = (pa.ArrowInvalid, pa.ArrowTypeError)
# Mixed nesting levels are rejected
with pytest.raises(exceptions):
pa.array([1, 2, [1]])
with pytest.raises(exceptions):
pa.array([1, 2, []])
with pytest.raises(exceptions):
pa.array([[1], [2], [None, [1]]])
def test_sequence_mixed_types_fails():
data = ['a', 1, 2.0]
with pytest.raises(pa.ArrowTypeError):
pa.array(data)
def test_sequence_mixed_types_with_specified_type_fails():
data = ['-10', '-5', {'a': 1}, '0', '5', '10']
type = pa.string()
with pytest.raises(TypeError):
pa.array(data, type=type)
def test_sequence_decimal():
data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')]
type = pa.decimal128(precision=7, scale=3)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_different_precisions():
data = [
decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234')
]
type = pa.decimal128(precision=13, scale=3)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_no_scale():
data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')]
type = pa.decimal128(precision=10)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_negative():
data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')]
type = pa.decimal128(precision=10, scale=6)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_no_whole_part():
data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')]
type = pa.decimal128(precision=7, scale=7)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_large_integer():
data = [decimal.Decimal('-394029506937548693.42983'),
decimal.Decimal('32358695912932.01033')]
type = pa.decimal128(precision=23, scale=5)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_from_integers():
data = [0, 1, -39402950693754869342983]
expected = [decimal.Decimal(x) for x in data]
type = pa.decimal128(precision=28, scale=5)
arr = pa.array(data, type=type)
assert arr.to_pylist() == expected
def test_sequence_decimal_too_high_precision():
# ARROW-6989 python decimal created from float has too high precision
with pytest.raises(ValueError, match="precision out of range"):
pa.array([decimal.Decimal(123.234)])
def test_range_types():
arr1 = pa.array(range(3))
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_empty_range():
arr = pa.array(range(0))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_structarray():
arr = pa.StructArray.from_arrays([], names=[])
assert arr.type == pa.struct([])
assert len(arr) == 0
assert arr.to_pylist() == []
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array(['a', None, 'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = [
{'ints': None, 'strs': 'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': 'c', 'bools': None},
]
pylist = arr.to_pylist()
assert pylist == expected, (pylist, expected)
# len(names) != len(arrays)
with pytest.raises(ValueError):
pa.StructArray.from_arrays([ints], ['ints', 'strs'])
def test_struct_from_dicts():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
arr = pa.array([], type=ty)
assert arr.to_pylist() == []
data = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': 'bar'}]
arr = pa.array(data, type=ty)
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': 'bar', 'c': None}]
assert arr.to_pylist() == expected
def test_struct_from_dicts_bytes_keys():
# ARROW-6878
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
arr = pa.array([], type=ty)
assert arr.to_pylist() == []
data = [{b'a': 5, b'b': 'foo'},
{b'a': 6, b'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [
{'a': 5, 'b': 'foo', 'c': None},
{'a': 6, 'b': None, 'c': False},
]
def test_struct_from_tuples():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
(6, 'bar', False)]
expected = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
data_as_ndarray = np.empty(len(data), dtype=object)
data_as_ndarray[:] = data
arr2 = pa.array(data_as_ndarray, type=ty)
assert arr.to_pylist() == expected
assert arr.equals(arr2)
# With omitted values
data = [(5, 'foo', None),
None,
(6, None, False)]
expected = [{'a': 5, 'b': 'foo', 'c': None},
None,
{'a': 6, 'b': None, 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == expected
# Invalid tuple size
for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]:
with pytest.raises(ValueError, match="(?i)tuple size"):
pa.array([tup], type=ty)
def test_struct_from_mixed_sequence():
# It is forbidden to mix dicts and tuples when initializing a struct array
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
{'a': 6, 'b': 'bar', 'c': False}]
with pytest.raises(TypeError):
pa.array(data, type=ty)
def test_struct_from_dicts_inference():
expected_type = pa.struct([pa.field('a', pa.int64()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': 'bar'}]
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': 'bar', 'c': None}]
arr = pa.array(data)
data_as_ndarray = np.empty(len(data), dtype=object)
data_as_ndarray[:] = data
arr2 = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == expected
assert arr.equals(arr2)
# Nested
expected_type = pa.struct([
pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())),
pa.field('ab', pa.bool_())])),
pa.field('b', pa.string())])
data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'},
{'a': {'aa': None, 'ab': False}, 'b': None},
{'a': None, 'b': 'bar'}]
arr = pa.array(data)
assert arr.to_pylist() == data
# Edge cases
arr = pa.array([{}])
assert arr.type == pa.struct([])
assert arr.to_pylist() == [{}]
# Mixing structs and scalars is rejected
with pytest.raises((pa.ArrowInvalid, pa.ArrowTypeError)):
pa.array([1, {'a': 2}])
def test_structarray_from_arrays_coerce():
# ARROW-1706
ints = [None, 2, 3]
strs = ['a', None, 'c']
bools = [True, False, None]
ints_nonnull = [1, 2, 3]
arrays = [ints, strs, bools, ints_nonnull]
result = pa.StructArray.from_arrays(arrays,
['ints', 'strs', 'bools',
'int_nonnull'])
expected = pa.StructArray.from_arrays(
[pa.array(ints, type='int64'),
pa.array(strs, type='utf8'),
pa.array(bools),
pa.array(ints_nonnull, type='int64')],
['ints', 'strs', 'bools', 'int_nonnull'])
with pytest.raises(ValueError):
pa.StructArray.from_arrays(arrays)
assert result.equals(expected)
def test_decimal_array_with_none_and_nan():
values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')]
with pytest.raises(TypeError):
# ARROW-6227: Without from_pandas=True, NaN is considered a float
array = pa.array(values)
array = pa.array(values, from_pandas=True)
assert array.type == pa.decimal128(4, 3)
assert array.to_pylist() == values[:2] + [None, None]
array = pa.array(values, type=pa.decimal128(10, 4), from_pandas=True)
assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None]
@pytest.mark.parametrize('tz,name', [
(pytz.FixedOffset(90), '+01:30'),
(pytz.FixedOffset(-90), '-01:30'),
(pytz.utc, 'UTC'),
(pytz.timezone('America/New_York'), 'America/New_York')
])
def test_timezone_string(tz, name):
assert pa.lib.tzinfo_to_string(tz) == name
assert pa.lib.string_to_tzinfo(name) == tz
def test_map_from_dicts():
data = [[{'key': b'a', 'value': 1}, {'key': b'b', 'value': 2}],
[{'key': b'c', 'value': 3}],
[{'key': b'd', 'value': 4}, {'key': b'e', 'value': 5},
{'key': b'f', 'value': None}],
[{'key': b'g', 'value': 7}]]
expected = [[(d['key'], d['value']) for d in entry] for entry in data]
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# With omitted values
data[1] = None
expected[1] = None
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# Invalid dictionary
for entry in [[{'value': 5}], [{}], [{'k': 1, 'v': 2}]]:
with pytest.raises(ValueError, match="Invalid Map"):
pa.array([entry], type=pa.map_('i4', 'i4'))
# Invalid dictionary types
for entry in [[{'key': '1', 'value': 5}], [{'key': {'value': 2}}]]:
with pytest.raises(TypeError, match="integer is required"):
pa.array([entry], type=pa.map_('i4', 'i4'))
def test_map_from_tuples():
expected = [[(b'a', 1), (b'b', 2)],
[(b'c', 3)],
[(b'd', 4), (b'e', 5), (b'f', None)],
[(b'g', 7)]]
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# With omitted values
expected[1] = None
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# Invalid tuple size
for entry in [[(5,)], [()], [('5', 'foo', True)]]:
with pytest.raises(ValueError, match="(?i)tuple size"):
pa.array([entry], type=pa.map_('i4', 'i4'))
| 30.398506 | 78 | 0.599415 |
import pytest
from pyarrow.pandas_compat import _pandas_api
import pyarrow as pa
import collections
import datetime
import decimal
import itertools
import math
import traceback
import numpy as np
import pytz
int_type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64())]
np_int_types, _ = zip(*int_type_pairs)
class StrangeIterable:
def __init__(self, lst):
self.lst = lst
def __iter__(self):
return self.lst.__iter__()
class MyInt:
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
class MyBrokenInt:
def __int__(self):
1/0
def check_struct_type(ty, expected):
assert pa.types.is_struct(ty)
assert set(ty) == set(expected)
def test_iterable_types():
arr1 = pa.array(StrangeIterable([0, 1, 2, 3]))
arr2 = pa.array((0, 1, 2, 3))
assert arr1.equals(arr2)
def test_empty_iterable():
arr = pa.array(StrangeIterable([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_limited_iterator_types():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_limited_iterator_size_overflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2)
arr2 = pa.array((0, 1))
assert arr1.equals(arr2)
def test_limited_iterator_size_underflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_iterator_without_size():
expected = pa.array((0, 1, 2))
arr1 = pa.array(iter(range(3)))
assert arr1.equals(expected)
arr1 = pa.array(iter(range(3)), type=pa.int64())
assert arr1.equals(expected)
def test_infinite_iterator():
expected = pa.array((0, 1, 2))
arr1 = pa.array(itertools.count(0), size=3)
assert arr1.equals(expected)
arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
assert arr1.equals(expected)
def _as_list(xs):
return xs
def _as_tuple(xs):
return tuple(xs)
def _as_deque(xs):
return collections.deque(xs)
def _as_dict_values(xs):
dct = {k: v for k, v in enumerate(xs)}
return dct.values()
def _as_numpy_array(xs):
arr = np.empty(len(xs), dtype=object)
arr[:] = xs
return arr
parametrize_with_iterable_types = pytest.mark.parametrize(
"seq", [_as_list, _as_tuple, _as_deque, _as_dict_values, _as_numpy_array])
@parametrize_with_iterable_types
def test_sequence_types(seq):
arr1 = pa.array(seq([1, 2, 3]))
arr2 = pa.array([1, 2, 3])
assert arr1.equals(arr2)
@parametrize_with_iterable_types
def test_sequence_boolean(seq):
expected = [True, None, False, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.bool_()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_numpy_boolean(seq):
expected = [np.bool(True), None, np.bool(False), None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.bool_()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_empty_list(seq):
arr = pa.array(seq([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
@parametrize_with_iterable_types
def test_nested_lists(seq):
data = [[], [1, 2], None]
arr = pa.array(seq(data))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
arr = pa.array(seq(data), type=pa.list_(pa.int32()))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int32())
assert arr.to_pylist() == data
@parametrize_with_iterable_types
def test_nested_large_lists(seq):
data = [[], [1, 2], None]
arr = pa.array(seq(data), type=pa.large_list(pa.int16()))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.large_list(pa.int16())
assert arr.to_pylist() == data
@parametrize_with_iterable_types
def test_list_with_non_list(seq):
with pytest.raises(TypeError):
pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64()))
with pytest.raises(TypeError):
pa.array(seq([[], [1, 2], 3]), type=pa.large_list(pa.int64()))
@parametrize_with_iterable_types
def test_nested_arrays(seq):
arr = pa.array(seq([np.array([], dtype=np.int64),
np.array([1, 2], dtype=np.int64), None]))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == [[], [1, 2], None]
@parametrize_with_iterable_types
def test_nested_fixed_size_list(seq):
# sequence of lists
data = [[1, 2], [3, None], None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 2)
assert arr.to_pylist() == data
# sequence of numpy arrays
data = [np.array([1, 2], dtype='int64'), np.array([3, 4], dtype='int64'),
None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 2)
assert arr.to_pylist() == [[1, 2], [3, 4], None]
# incorrect length of the lists or arrays
data = [[1, 2, 4], [3, None], None]
for data in [[[1, 2, 3]], [np.array([1, 2, 4], dtype='int64')]]:
with pytest.raises(
ValueError, match="Length of item not correct: expected 2"):
pa.array(seq(data), type=pa.list_(pa.int64(), 2))
# with list size of 0
data = [[], [], None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 0))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 0)
assert arr.to_pylist() == [[], [], None]
@parametrize_with_iterable_types
def test_sequence_all_none(seq):
arr = pa.array(seq([None, None]))
assert len(arr) == 2
assert arr.null_count == 2
assert arr.type == pa.null()
assert arr.to_pylist() == [None, None]
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [1, None, 3, None,
np.iinfo(np_scalar).min, np.iinfo(np_scalar).max]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([np.nan]), type=pa_type, from_pandas=False)
arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True)
expected = [None]
assert len(arr) == 1
assert arr.null_count == 1
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False)
arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True)
expected = [[None]]
assert len(arr) == 1
assert arr.null_count == 0
assert arr.type == pa.list_(pa_type)
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_integer_inferred(seq):
expected = [1, None, 3, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.int64()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None,
np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None]
expected += [np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
arr = pa.array(seq(expected))
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_custom_integers(seq):
expected = [0, 42, 2**33 + 1, -2**63]
data = list(map(MyInt, expected))
arr = pa.array(seq(data), type=pa.int64())
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_broken_integers(seq):
data = [MyBrokenInt()]
with pytest.raises(ZeroDivisionError) as exc_info:
pa.array(seq(data), type=pa.int64())
# Original traceback is kept
tb_lines = traceback.format_tb(exc_info.tb)
assert "# MARKER" in tb_lines[-1]
def test_numpy_scalars_mixed_type():
# ARROW-4324
data = [np.int32(10), np.float32(0.5)]
arr = pa.array(data)
expected = pa.array([10, 0.5], type='float64')
assert arr.equals(expected)
@pytest.mark.xfail(reason="Type inference for uint64 not implemented",
raises=OverflowError)
def test_uint64_max_convert():
data = [0, np.iinfo(np.uint64).max]
arr = pa.array(data, type=pa.uint64())
expected = pa.array(np.array(data, dtype='uint64'))
assert arr.equals(expected)
arr_inferred = pa.array(data)
assert arr_inferred.equals(expected)
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_signed_integer_overflow(bits):
ty = getattr(pa, "int%d" % bits)()
# XXX ideally would always raise OverflowError
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([2 ** (bits - 1)], ty)
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([-2 ** (bits - 1) - 1], ty)
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_unsigned_integer_overflow(bits):
ty = getattr(pa, "uint%d" % bits)()
# XXX ideally would always raise OverflowError
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([2 ** bits], ty)
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([-1], ty)
def test_convert_with_mask():
data = [1, 2, 3, 4, 5]
mask = np.array([False, True, False, False, True])
result = pa.array(data, mask=mask)
expected = pa.array([1, None, 3, 4, None])
assert result.equals(expected)
# Mask wrong length
with pytest.raises(ValueError):
pa.array(data, mask=mask[1:])
def test_garbage_collection():
import gc
# Force the cyclic garbage collector to run
gc.collect()
bytes_before = pa.total_allocated_bytes()
pa.array([1, None, 3, None])
gc.collect()
assert pa.total_allocated_bytes() == bytes_before
def test_sequence_double():
data = [1.5, 1., None, 2.5, None, None]
arr = pa.array(data)
assert len(arr) == 6
assert arr.null_count == 3
assert arr.type == pa.float64()
assert arr.to_pylist() == data
def test_double_auto_coerce_from_integer():
# Done as part of ARROW-2814
data = [1.5, 1., None, 2.5, None, None]
arr = pa.array(data)
data2 = [1.5, 1, None, 2.5, None, None]
arr2 = pa.array(data2)
assert arr.equals(arr2)
data3 = [1, 1.5, None, 2.5, None, None]
arr3 = pa.array(data3)
data4 = [1., 1.5, None, 2.5, None, None]
arr4 = pa.array(data4)
assert arr3.equals(arr4)
def test_double_integer_coerce_representable_range():
valid_values = [1.5, 1, 2, None, 1 << 53, -(1 << 53)]
invalid_values = [1.5, 1, 2, None, (1 << 53) + 1]
invalid_values2 = [1.5, 1, 2, None, -((1 << 53) + 1)]
# it works
pa.array(valid_values)
# it fails
with pytest.raises(ValueError):
pa.array(invalid_values)
with pytest.raises(ValueError):
pa.array(invalid_values2)
def test_float32_integer_coerce_representable_range():
f32 = np.float32
valid_values = [f32(1.5), 1 << 24, -(1 << 24)]
invalid_values = [f32(1.5), (1 << 24) + 1]
invalid_values2 = [f32(1.5), -((1 << 24) + 1)]
# it works
pa.array(valid_values, type=pa.float32())
# it fails
with pytest.raises(ValueError):
pa.array(invalid_values, type=pa.float32())
with pytest.raises(ValueError):
pa.array(invalid_values2, type=pa.float32())
def test_mixed_sequence_errors():
with pytest.raises(ValueError, match="tried to convert to boolean"):
pa.array([True, 'foo'], type=pa.bool_())
with pytest.raises(ValueError, match="tried to convert to float32"):
pa.array([1.5, 'foo'], type=pa.float32())
with pytest.raises(ValueError, match="tried to convert to double"):
pa.array([1.5, 'foo'])
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar,pa_type", [
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64())
])
@pytest.mark.parametrize("from_pandas", [True, False])
def test_sequence_numpy_double(seq, np_scalar, pa_type, from_pandas):
data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan]
arr = pa.array(seq(data), from_pandas=from_pandas)
assert len(arr) == 6
if from_pandas:
assert arr.null_count == 3
else:
assert arr.null_count == 2
if from_pandas:
# The NaN is skipped in type inference, otherwise it forces a
# float64 promotion
assert arr.type == pa_type
else:
assert arr.type == pa.float64()
assert arr.to_pylist()[:4] == data[:4]
if from_pandas:
assert arr.to_pylist()[5] is None
else:
assert np.isnan(arr.to_pylist()[5])
@pytest.mark.parametrize("from_pandas", [True, False])
@pytest.mark.parametrize("inner_seq", [np.array, list])
def test_ndarray_nested_numpy_double(from_pandas, inner_seq):
# ARROW-2806
data = np.array([
inner_seq([1., 2.]),
inner_seq([1., 2., 3.]),
inner_seq([np.nan]),
None
])
arr = pa.array(data, from_pandas=from_pandas)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.list_(pa.float64())
if from_pandas:
assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None]
else:
np.testing.assert_equal(arr.to_pylist(),
[[1., 2.], [1., 2., 3.], [np.nan], None])
def test_nested_ndarray_in_object_array():
# ARROW-4350
arr = np.empty(2, dtype=object)
arr[:] = [np.array([1, 2], dtype=np.int64),
np.array([2, 3], dtype=np.int64)]
arr2 = np.empty(2, dtype=object)
arr2[0] = [3, 4]
arr2[1] = [5, 6]
expected_type = pa.list_(pa.list_(pa.int64()))
assert pa.infer_type([arr]) == expected_type
result = pa.array([arr, arr2])
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
type=expected_type)
assert result.equals(expected)
# test case for len-1 arrays to ensure they are interpreted as
# sublists and not scalars
arr = np.empty(2, dtype=object)
arr[:] = [np.array([1]), np.array([2])]
result = pa.array([arr, arr])
assert result.to_pylist() == [[[1], [2]], [[1], [2]]]
@pytest.mark.xfail(reason=("Type inference for multidimensional ndarray "
"not yet implemented"),
raises=AssertionError)
def test_multidimensional_ndarray_as_nested_list():
# TODO(wesm): see ARROW-5645
arr = np.array([[1, 2], [2, 3]], dtype=np.int64)
arr2 = np.array([[3, 4], [5, 6]], dtype=np.int64)
expected_type = pa.list_(pa.list_(pa.int64()))
assert pa.infer_type([arr]) == expected_type
result = pa.array([arr, arr2])
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
type=expected_type)
assert result.equals(expected)
def test_array_ignore_nan_from_pandas():
# See ARROW-4324, this reverts logic that was introduced in
# ARROW-2240
with pytest.raises(ValueError):
pa.array([np.nan, 'str'])
arr = pa.array([np.nan, 'str'], from_pandas=True)
expected = pa.array([None, 'str'])
assert arr.equals(expected)
def test_nested_ndarray_different_dtypes():
data = [
np.array([1, 2, 3], dtype='int64'),
None,
np.array([4, 5, 6], dtype='uint32')
]
arr = pa.array(data)
expected = pa.array([[1, 2, 3], None, [4, 5, 6]],
type=pa.list_(pa.int64()))
assert arr.equals(expected)
t2 = pa.list_(pa.uint32())
arr2 = pa.array(data, type=t2)
expected2 = expected.cast(t2)
assert arr2.equals(expected2)
def test_sequence_unicode():
data = ['foo', 'bar', None, 'mañana']
arr = pa.array(data)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.string()
assert arr.to_pylist() == data
def check_array_mixed_unicode_bytes(binary_type, string_type):
values = ['qux', b'foo', bytearray(b'barz')]
b_values = [b'qux', b'foo', b'barz']
u_values = ['qux', 'foo', 'barz']
arr = pa.array(values)
expected = pa.array(b_values, type=pa.binary())
assert arr.type == pa.binary()
assert arr.equals(expected)
arr = pa.array(values, type=binary_type)
expected = pa.array(b_values, type=binary_type)
assert arr.type == binary_type
assert arr.equals(expected)
arr = pa.array(values, type=string_type)
expected = pa.array(u_values, type=string_type)
assert arr.type == string_type
assert arr.equals(expected)
def test_array_mixed_unicode_bytes():
check_array_mixed_unicode_bytes(pa.binary(), pa.string())
check_array_mixed_unicode_bytes(pa.large_binary(), pa.large_string())
@pytest.mark.large_memory
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
def test_large_binary_array(ty):
# Construct a large binary array with more than 4GB of data
s = b"0123456789abcdefghijklmnopqrstuvwxyz" * 10
nrepeats = math.ceil((2**32 + 5) / len(s))
data = [s] * nrepeats
arr = pa.array(data, type=ty)
assert isinstance(arr, pa.Array)
assert arr.type == ty
assert len(arr) == nrepeats
@pytest.mark.large_memory
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
def test_large_binary_value(ty):
# Construct a large binary array with a single value larger than 4GB
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
nrepeats = math.ceil((2**32 + 5) / len(s))
arr = pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
assert isinstance(arr, pa.Array)
assert arr.type == ty
assert len(arr) == 4
buf = arr[1].as_buffer()
assert len(buf) == len(s) * nrepeats
def test_sequence_bytes():
u1 = b'ma\xc3\xb1ana'
data = [b'foo',
memoryview(b'dada'),
memoryview(b'd-a-t-a')[::2], # non-contiguous is made contiguous
u1.decode('utf-8'), # unicode gets encoded,
bytearray(b'bar'),
None]
for ty in [None, pa.binary(), pa.large_binary()]:
arr = pa.array(data, type=ty)
assert len(arr) == 6
assert arr.null_count == 1
assert arr.type == ty or pa.binary()
assert arr.to_pylist() == [b'foo', b'dada', b'data', u1, b'bar', None]
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string()])
def test_sequence_utf8_to_unicode(ty):
# ARROW-1225
data = [b'foo', None, b'bar']
arr = pa.array(data, type=ty)
assert arr.type == ty
assert arr[0].as_py() == 'foo'
# test a non-utf8 unicode string
val = ('mañana').encode('utf-16-le')
with pytest.raises(pa.ArrowInvalid):
pa.array([val], type=ty)
def test_sequence_fixed_size_bytes():
data = [b'foof', None, bytearray(b'barb'), b'2346']
arr = pa.array(data, type=pa.binary(4))
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.binary(4)
assert arr.to_pylist() == [b'foof', None, b'barb', b'2346']
def test_fixed_size_bytes_does_not_accept_varying_lengths():
data = [b'foo', None, b'barb', b'2346']
with pytest.raises(pa.ArrowInvalid):
pa.array(data, type=pa.binary(4))
def test_sequence_date():
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
datetime.date(2040, 2, 26)]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.date32()
assert arr.null_count == 1
assert arr[0].as_py() == datetime.date(2000, 1, 1)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.date(1970, 1, 1)
assert arr[3].as_py() == datetime.date(2040, 2, 26)
@pytest.mark.parametrize('input',
[(pa.date32(), [10957, None]),
(pa.date64(), [10957 * 86400000, None])])
def test_sequence_explicit_types(input):
t, ex_values = input
data = [datetime.date(2000, 1, 1), None]
arr = pa.array(data, type=t)
arr2 = pa.array(ex_values, type=t)
for x in [arr, arr2]:
assert len(x) == 2
assert x.type == t
assert x.null_count == 1
assert x[0].as_py() == datetime.date(2000, 1, 1)
assert x[1] is pa.NA
def test_date32_overflow():
# Overflow
data3 = [2**32, None]
with pytest.raises((OverflowError, pa.ArrowException)):
pa.array(data3, type=pa.date32())
def test_sequence_timestamp():
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
None,
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
def test_sequence_numpy_timestamp():
data = [
np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)),
None,
np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)),
np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699))
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
def test_sequence_timestamp_with_unit():
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 0)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
class MyDate(datetime.date):
pass
class MyDatetime(datetime.datetime):
pass
class MyTimedelta(datetime.timedelta):
pass
def test_datetime_subclassing():
data = [
MyDate(2007, 7, 13),
]
date_type = pa.date32()
arr_date = pa.array(data, type=date_type)
assert len(arr_date) == 1
assert arr_date.type == date_type
assert arr_date[0].as_py() == datetime.date(2007, 7, 13)
data = [
MyDatetime(2007, 7, 13, 1, 23, 34, 123456),
]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 0)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
data = [
MyTimedelta(123, 456, 1002),
]
s = pa.duration('s')
ms = pa.duration('ms')
us = pa.duration('us')
arr_s = pa.array(data)
assert len(arr_s) == 1
assert arr_s.type == us
assert arr_s[0].as_py() == datetime.timedelta(123, 456, 1002)
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.timedelta(123, 456)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.timedelta(123, 456, 1000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.timedelta(123, 456, 1002)
@pytest.mark.xfail(not _pandas_api.have_pandas,
reason="pandas required for nanosecond conversion")
def test_sequence_timestamp_nanoseconds():
inputs = [
[datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)],
[MyDatetime(2007, 7, 13, 1, 23, 34, 123456)]
]
for data in inputs:
ns = pa.timestamp('ns')
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
@pytest.mark.pandas
def test_sequence_timestamp_from_int_with_unit():
# TODO(wesm): This test might be rewritten to assert the actual behavior
# when pandas is not installed
data = [1]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
ns = pa.timestamp('ns')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert repr(arr_s[0]) == "datetime.datetime(1970, 1, 1, 0, 0, 1)"
assert str(arr_s[0]) == "1970-01-01 00:00:01"
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert repr(arr_ms[0]) == "datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)"
assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000"
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert repr(arr_us[0]) == "datetime.datetime(1970, 1, 1, 0, 0, 0, 1)"
assert str(arr_us[0]) == "1970-01-01 00:00:00.000001"
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert repr(arr_ns[0]) == "Timestamp('1970-01-01 00:00:00.000000001')"
assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001"
expected_exc = TypeError
class CustomClass():
pass
for ty in [ns, pa.date32(), pa.date64()]:
with pytest.raises(expected_exc):
pa.array([1, CustomClass()], type=ty)
@pytest.mark.parametrize('np_scalar', [True, False])
def test_sequence_duration(np_scalar):
td1 = datetime.timedelta(2, 3601, 1)
td2 = datetime.timedelta(1, 100, 1000)
if np_scalar:
data = [np.timedelta64(td1), None, np.timedelta64(td2)]
else:
data = [td1, None, td2]
arr = pa.array(data)
assert len(arr) == 3
assert arr.type == pa.duration('us')
assert arr.null_count == 1
assert arr[0].as_py() == td1
assert arr[1].as_py() is None
assert arr[2].as_py() == td2
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_sequence_duration_with_unit(unit):
data = [
datetime.timedelta(3, 22, 1001),
]
expected = {'s': datetime.timedelta(3, 22),
'ms': datetime.timedelta(3, 22, 1000),
'us': datetime.timedelta(3, 22, 1001),
'ns': datetime.timedelta(3, 22, 1001)}
ty = pa.duration(unit)
arr_s = pa.array(data, type=ty)
assert len(arr_s) == 1
assert arr_s.type == ty
assert arr_s[0].as_py() == expected[unit]
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_sequence_duration_from_int_with_unit(unit):
data = [5]
ty = pa.duration(unit)
arr = pa.array(data, type=ty)
assert len(arr) == 1
assert arr.type == ty
assert arr[0].value == 5
def test_sequence_duration_nested_lists():
td1 = datetime.timedelta(1, 1, 1000)
td2 = datetime.timedelta(1, 100)
data = [[td1, None], [td1, td2]]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == data
arr = pa.array(data, type=pa.list_(pa.duration('ms')))
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('ms'))
assert arr.to_pylist() == data
def test_sequence_duration_nested_lists_numpy():
td1 = datetime.timedelta(1, 1, 1000)
td2 = datetime.timedelta(1, 100)
data = [[np.timedelta64(td1), None],
[np.timedelta64(td1), np.timedelta64(td2)]]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == [[td1, None], [td1, td2]]
data = [np.array([np.timedelta64(td1), None], dtype='timedelta64[us]'),
np.array([np.timedelta64(td1), np.timedelta64(td2)])]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == [[td1, None], [td1, td2]]
def test_sequence_nesting_levels():
data = [1, 2, None]
arr = pa.array(data)
assert arr.type == pa.int64()
assert arr.to_pylist() == data
data = [[1], [2], None]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [[1], [2, 3, 4], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [None, [[None, 1]], [[2, 3, 4], None], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.list_(pa.int64()))
assert arr.to_pylist() == data
exceptions = (pa.ArrowInvalid, pa.ArrowTypeError)
# Mixed nesting levels are rejected
with pytest.raises(exceptions):
pa.array([1, 2, [1]])
with pytest.raises(exceptions):
pa.array([1, 2, []])
with pytest.raises(exceptions):
pa.array([[1], [2], [None, [1]]])
def test_sequence_mixed_types_fails():
data = ['a', 1, 2.0]
with pytest.raises(pa.ArrowTypeError):
pa.array(data)
def test_sequence_mixed_types_with_specified_type_fails():
data = ['-10', '-5', {'a': 1}, '0', '5', '10']
type = pa.string()
with pytest.raises(TypeError):
pa.array(data, type=type)
def test_sequence_decimal():
data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')]
type = pa.decimal128(precision=7, scale=3)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_different_precisions():
data = [
decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234')
]
type = pa.decimal128(precision=13, scale=3)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_no_scale():
data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')]
type = pa.decimal128(precision=10)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_negative():
data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')]
type = pa.decimal128(precision=10, scale=6)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_no_whole_part():
data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')]
type = pa.decimal128(precision=7, scale=7)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_large_integer():
data = [decimal.Decimal('-394029506937548693.42983'),
decimal.Decimal('32358695912932.01033')]
type = pa.decimal128(precision=23, scale=5)
arr = pa.array(data, type=type)
assert arr.to_pylist() == data
def test_sequence_decimal_from_integers():
data = [0, 1, -39402950693754869342983]
expected = [decimal.Decimal(x) for x in data]
type = pa.decimal128(precision=28, scale=5)
arr = pa.array(data, type=type)
assert arr.to_pylist() == expected
def test_sequence_decimal_too_high_precision():
# ARROW-6989 python decimal created from float has too high precision
with pytest.raises(ValueError, match="precision out of range"):
pa.array([decimal.Decimal(123.234)])
def test_range_types():
arr1 = pa.array(range(3))
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_empty_range():
arr = pa.array(range(0))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_structarray():
arr = pa.StructArray.from_arrays([], names=[])
assert arr.type == pa.struct([])
assert len(arr) == 0
assert arr.to_pylist() == []
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array(['a', None, 'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = [
{'ints': None, 'strs': 'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': 'c', 'bools': None},
]
pylist = arr.to_pylist()
assert pylist == expected, (pylist, expected)
# len(names) != len(arrays)
with pytest.raises(ValueError):
pa.StructArray.from_arrays([ints], ['ints', 'strs'])
def test_struct_from_dicts():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
arr = pa.array([], type=ty)
assert arr.to_pylist() == []
data = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': 'bar'}]
arr = pa.array(data, type=ty)
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': 'bar', 'c': None}]
assert arr.to_pylist() == expected
def test_struct_from_dicts_bytes_keys():
# ARROW-6878
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
arr = pa.array([], type=ty)
assert arr.to_pylist() == []
data = [{b'a': 5, b'b': 'foo'},
{b'a': 6, b'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [
{'a': 5, 'b': 'foo', 'c': None},
{'a': 6, 'b': None, 'c': False},
]
def test_struct_from_tuples():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
(6, 'bar', False)]
expected = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
data_as_ndarray = np.empty(len(data), dtype=object)
data_as_ndarray[:] = data
arr2 = pa.array(data_as_ndarray, type=ty)
assert arr.to_pylist() == expected
assert arr.equals(arr2)
# With omitted values
data = [(5, 'foo', None),
None,
(6, None, False)]
expected = [{'a': 5, 'b': 'foo', 'c': None},
None,
{'a': 6, 'b': None, 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == expected
# Invalid tuple size
for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]:
with pytest.raises(ValueError, match="(?i)tuple size"):
pa.array([tup], type=ty)
def test_struct_from_mixed_sequence():
# It is forbidden to mix dicts and tuples when initializing a struct array
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
{'a': 6, 'b': 'bar', 'c': False}]
with pytest.raises(TypeError):
pa.array(data, type=ty)
def test_struct_from_dicts_inference():
expected_type = pa.struct([pa.field('a', pa.int64()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': 'bar'}]
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': 'bar', 'c': None}]
arr = pa.array(data)
data_as_ndarray = np.empty(len(data), dtype=object)
data_as_ndarray[:] = data
arr2 = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == expected
assert arr.equals(arr2)
# Nested
expected_type = pa.struct([
pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())),
pa.field('ab', pa.bool_())])),
pa.field('b', pa.string())])
data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'},
{'a': {'aa': None, 'ab': False}, 'b': None},
{'a': None, 'b': 'bar'}]
arr = pa.array(data)
assert arr.to_pylist() == data
# Edge cases
arr = pa.array([{}])
assert arr.type == pa.struct([])
assert arr.to_pylist() == [{}]
# Mixing structs and scalars is rejected
with pytest.raises((pa.ArrowInvalid, pa.ArrowTypeError)):
pa.array([1, {'a': 2}])
def test_structarray_from_arrays_coerce():
# ARROW-1706
ints = [None, 2, 3]
strs = ['a', None, 'c']
bools = [True, False, None]
ints_nonnull = [1, 2, 3]
arrays = [ints, strs, bools, ints_nonnull]
result = pa.StructArray.from_arrays(arrays,
['ints', 'strs', 'bools',
'int_nonnull'])
expected = pa.StructArray.from_arrays(
[pa.array(ints, type='int64'),
pa.array(strs, type='utf8'),
pa.array(bools),
pa.array(ints_nonnull, type='int64')],
['ints', 'strs', 'bools', 'int_nonnull'])
with pytest.raises(ValueError):
pa.StructArray.from_arrays(arrays)
assert result.equals(expected)
def test_decimal_array_with_none_and_nan():
values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')]
with pytest.raises(TypeError):
# ARROW-6227: Without from_pandas=True, NaN is considered a float
array = pa.array(values)
array = pa.array(values, from_pandas=True)
assert array.type == pa.decimal128(4, 3)
assert array.to_pylist() == values[:2] + [None, None]
array = pa.array(values, type=pa.decimal128(10, 4), from_pandas=True)
assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None]
@pytest.mark.parametrize('tz,name', [
(pytz.FixedOffset(90), '+01:30'),
(pytz.FixedOffset(-90), '-01:30'),
(pytz.utc, 'UTC'),
(pytz.timezone('America/New_York'), 'America/New_York')
])
def test_timezone_string(tz, name):
assert pa.lib.tzinfo_to_string(tz) == name
assert pa.lib.string_to_tzinfo(name) == tz
def test_map_from_dicts():
data = [[{'key': b'a', 'value': 1}, {'key': b'b', 'value': 2}],
[{'key': b'c', 'value': 3}],
[{'key': b'd', 'value': 4}, {'key': b'e', 'value': 5},
{'key': b'f', 'value': None}],
[{'key': b'g', 'value': 7}]]
expected = [[(d['key'], d['value']) for d in entry] for entry in data]
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# With omitted values
data[1] = None
expected[1] = None
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# Invalid dictionary
for entry in [[{'value': 5}], [{}], [{'k': 1, 'v': 2}]]:
with pytest.raises(ValueError, match="Invalid Map"):
pa.array([entry], type=pa.map_('i4', 'i4'))
# Invalid dictionary types
for entry in [[{'key': '1', 'value': 5}], [{'key': {'value': 2}}]]:
with pytest.raises(TypeError, match="integer is required"):
pa.array([entry], type=pa.map_('i4', 'i4'))
def test_map_from_tuples():
expected = [[(b'a', 1), (b'b', 2)],
[(b'c', 3)],
[(b'd', 4), (b'e', 5), (b'f', None)],
[(b'g', 7)]]
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# With omitted values
expected[1] = None
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# Invalid tuple size
for entry in [[(5,)], [()], [('5', 'foo', True)]]:
with pytest.raises(ValueError, match="(?i)tuple size"):
pa.array([entry], type=pa.map_('i4', 'i4'))
| true | true |
1c362088d03232252ddb9675140953581fcb031e | 4,300 | py | Python | Taller_06_ControlFor/main.py | BarinasJ/Algoritmos_Programacion | fe9fe222619e1c90639a6e365454e06e2621af06 | [
"MIT"
] | null | null | null | Taller_06_ControlFor/main.py | BarinasJ/Algoritmos_Programacion | fe9fe222619e1c90639a6e365454e06e2621af06 | [
"MIT"
] | null | null | null | Taller_06_ControlFor/main.py | BarinasJ/Algoritmos_Programacion | fe9fe222619e1c90639a6e365454e06e2621af06 | [
"MIT"
] | null | null | null | archivo = open('paises.txt', 'r')#imprima la posicion de colombia
"""
c=0
lista=[]
for i in archivo:
lista.append(i)
a=" ".join(lista)
c=c+1
if(a=="Colombia: Bogotá\n"):
break
lista=[]
print(c)
"""
#Imprima todos los paises
lista=[]
for i in archivo:
a=i.index(":")
for r in range(0,a):
lista.append(i[r])
a="".join(lista)
print(a)
lista=[]
"""
#Imprima todas las Capitales
lista=[]
for i in archivo:
a=i.index(":")
for r in range(a+2,len(i)):
lista.append(i[r])
a="".join(lista)
print(a)
"""
#Imprimir todos los paises que inicien con la letra C
lista=[]
paises=[]
for e in archivo:
a=e.index(":")
for r in range(0,a):
lista.append(e[r])
a="".join(lista)
paises.append(a)
lista=[]
for e in paises:
if(e[0]=="C"):
print(e)
"""
#imprima todas las capitales que inicien con la leta B
lista=[]
ciudad=[]
for i in archivo:
a=i.index(":")
for r in range(a+2,len(i)-1):
lista.append(i[r])
a="".join(lista)
ciudad.append(a)
lista=[]
for i in ciudad:
if(i[0]=="B"):
print(i)
"""
#Cuente e imprima cuantas ciudades inician con la letra M
m=0
lista=[]
ciudadm=[]
for i in archivo:
a=i.index(":")
for q in range(a+2, len(i)-1):
lista.append(i[q])
a="".join(lista)
ciudadm.append(a)
lista=[]
for i in ciudadm:
if(i[0]=="M"):
m=m+1
print(m)
"""
#Imprima todos los paises y capitale cuyo inicio sea con la letra U
lista=[]
lista2=[]
ciudadu=[]
paisu=[]
for i in archivo:
e=i.index(":")
for q in range(e+2, len(i)-1):
lista.append(i[q])
e="".join(lista)
ciudadu.append(e)
lista=[]
n=i.index(":")
for t in range(0, n):
lista2.append(i[t])
n="".join(lista2)
paisu.append(n)
lista2=[]
for i in paisu:
if(i[0]=="U"):
print(i)
for i in ciudadu:
if(i[0]=="U"):
print(i)
"""
"""
#Cuente e imprima cuantos paises que hay en el archivo
lista=[]
contador=0
for i in archivo:
a=i.index(":")
for r in range(0,a):
lista.append(i[r])
a="".join(lista)
print(a)
contador=contador+1
print(contador)
lista=[]
for i in archivo:
a=i.index(":")
for r in range(0,a):
lista.append(i[r])
a="".join(lista)
print(a)
lista=[]
""
#Busque e imprima la ciudad de Singapur
lista=[]
ciudades=[]
for i in archivo:
a=i.index(":")
for r in range(a+2,len(i)-1):
lista.append(i[r])
a="".join(lista)
ciudades.append(a)
lista=[]
for i in ciudades:
if(i=="Singapur"):
print(i)
""
#Busque e imprima el pais de Venezuela y su capital
lista=[]
pais=[]
for i in archivo:
lista.append(i)
a="".join(lista)
for r in lista:
pais.append(r)
for i in pais:
if i=="Venezuela: Caracas\n":
print(i)
break
lista=[]
""
#Cuente e imprima las ciudades que su pais inicie con la letra E
lista=[]
paises=[]
ciudades=[]
ciudad=[]
for i in archivo:
lista.append(i)
for i in lista:
if(i[0]=="E"):
ciudades.append(i)
c="".join(ciudades)
for i in ciudades:
a=i.index(":")
for r in range(a+2,len(i)-1):
ciudad.append(i[r])
b = "".join(ciudad)
for i in ciudad:
print(i,end="")
""
#Busque e imprima la Capiltal de Colombia
lista=[]
ciudades=[]
for i in archivo:
a=i.index(":")
for r in range(a+2,len(i)-1):
lista.append(i[r])
a="".join(lista)
ciudades.append(a)
lista=[]
for i in ciudades:
if(i=="Bogotá"):
print(i)
""
#imprima la posicion del pais de Uganda
c=0
lista=[]
for i in archivo:
lista.append(i)
a=" ".join(lista)
c=c+1
if(a=="Uganda: Kampala\n"):
break
lista=[]
print(c)
""
#imprima la posicion del pais de Mexico
c=0
lista=[]
for i in archivo:
lista.append(i)
a=" ".join(lista)
c=c+1
if(a=="México: Ciudad de México \n"):
break
lista=[]
print(c)
""
#El alcalde de Antananarivo contrato a algunos alumnos de la Universidad Ean para corregir el archivo de países.txt, ya que la capital de Madagascar NO es rey julien es Antananarivo, espero que el alcalde se vaya contento por su trabajo. Utilice un For para cambiar ese Dato
lista=[]
for i in archivo:
lista.append(i)
lista.remove("Madagascar: rey julien\n")
lista.insert(109,"Madagascar: Antananarivo\n")
print(lista)
""
#Agregue un país que no esté en la lista
lista=[]
for i in archivo:
lista.append(i)
lista.insert(136,"Palestina: Jerusalem\n")
print(lista)
""
archivo.close()
""" | 18.942731 | 274 | 0.616279 | archivo = open('paises.txt', 'r')
lista=[]
for i in archivo:
a=i.index(":")
for r in range(0,a):
lista.append(i[r])
a="".join(lista)
print(a)
lista=[]
lista=[]
paises=[]
for e in archivo:
a=e.index(":")
for r in range(0,a):
lista.append(e[r])
a="".join(lista)
paises.append(a)
lista=[]
for e in paises:
if(e[0]=="C"):
print(e)
m=0
lista=[]
ciudadm=[]
for i in archivo:
a=i.index(":")
for q in range(a+2, len(i)-1):
lista.append(i[q])
a="".join(lista)
ciudadm.append(a)
lista=[]
for i in ciudadm:
if(i[0]=="M"):
m=m+1
print(m)
| true | true |
1c362122de240151bc2c891b1ed8ffb1cf4dc369 | 12,603 | py | Python | bokeh/embed/util.py | masdao/bokeh | fc39b47387ae37ba5b06ebabf9bae0afb3eada3d | [
"BSD-3-Clause"
] | 1 | 2018-12-19T06:52:07.000Z | 2018-12-19T06:52:07.000Z | bokeh/embed/util.py | masdao/bokeh | fc39b47387ae37ba5b06ebabf9bae0afb3eada3d | [
"BSD-3-Clause"
] | null | null | null | bokeh/embed/util.py | masdao/bokeh | fc39b47387ae37ba5b06ebabf9bae0afb3eada3d | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import Sequence, OrderedDict
# External imports
from six import string_types
# Bokeh imports
from ..core.json_encoder import serialize_json
from ..core.templates import _env, DOC_JS, FILE, MACROS, PLOT_DIV, SCRIPT_TAG
from ..document.document import DEFAULT_TITLE, Document
from ..model import Model, collect_models
from ..settings import settings
from ..util.compiler import bundle_all_models
from ..util.serialization import make_id
from ..util.string import encode_utf8, indent
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class FromCurdoc(object):
''' This class merely provides a non-None default value for ``theme``
arguments, since ``None`` itself is a meaningful value for users to pass.
'''
pass
def check_models_or_docs(models, allow_dict=False):
'''
'''
input_type_valid = False
# Check for single item
if isinstance(models, (Model, Document)):
models = [models]
# Check for sequence
if isinstance(models, Sequence) and all(isinstance(x, (Model, Document)) for x in models):
input_type_valid = True
if allow_dict:
if isinstance(models, dict) and \
all(isinstance(x, string_types) for x in models.keys()) and \
all(isinstance(x, (Model, Document)) for x in models.values()):
input_type_valid = True
if not input_type_valid:
if allow_dict:
raise ValueError(
'Input must be a Model, a Document, a Sequence of Models and Document, or a dictionary from string to Model and Document'
)
else:
raise ValueError('Input must be a Model, a Document, or a Sequence of Models and Document')
return models
def check_one_model_or_doc(model):
'''
'''
models = check_models_or_docs(model)
if len(models) != 1:
raise ValueError("Input must be exactly one Model or Document")
return models[0]
def submodel_has_python_callbacks(models):
''' Traverses submodels to check for Python (event) callbacks
'''
has_python_callback = False
for model in collect_models(models):
if len(model._callbacks) > 0 or len(model._event_callbacks) > 0:
has_python_callback = True
break
return has_python_callback
def div_for_render_item(item):
'''
item: RenderItem
'''
return PLOT_DIV.render(doc=item, macros=MACROS)
def find_existing_docs(models):
'''
'''
existing_docs = set(m if isinstance(m, Document) else m.document for m in models)
existing_docs.discard(None)
if len(existing_docs) == 0:
# no existing docs, use the current doc
doc = Document()
elif len(existing_docs) == 1:
# all existing docs are the same, use that one
doc = existing_docs.pop()
else:
# conflicting/multiple docs, raise an error
msg = ('Multiple items in models contain documents or are '
'themselves documents. (Models must be owned by only a '
'single document). This may indicate a usage error.')
raise RuntimeError(msg)
return doc
def html_page_for_render_items(bundle, docs_json, render_items, title,
template=None, template_variables={}):
'''
'''
if title is None:
title = DEFAULT_TITLE
bokeh_js, bokeh_css = bundle
json_id = make_id()
json = escape(serialize_json(docs_json), quote=False)
json = wrap_in_script_tag(json, "application/json", json_id)
script = bundle_all_models()
script += script_for_render_items(json_id, render_items)
script = wrap_in_script_tag(script)
context = template_variables.copy()
context.update(dict(
title = title,
bokeh_js = bokeh_js,
bokeh_css = bokeh_css,
plot_script = json + script,
docs = render_items,
base = FILE,
macros = MACROS,
))
if len(render_items) == 1:
context["doc"] = context["docs"][0]
context["roots"] = context["doc"].roots
# XXX: backwards compatibility, remove for 1.0
context["plot_div"] = "\n".join([ div_for_render_item(item) for item in render_items ])
if template is None:
template = FILE
elif isinstance(template, string_types):
template = _env.from_string("{% extends base %}\n" + template)
html = template.render(context)
return encode_utf8(html)
def script_for_render_items(docs_json_or_id, render_items, app_path=None, absolute_url=None):
'''
'''
if isinstance(docs_json_or_id, string_types):
docs_json = "document.getElementById('%s').textContent" % docs_json_or_id
else:
# XXX: encodes &, <, > and ', but not ". This is because " is used a lot in JSON,
# and encoding it would significantly increase size of generated files. Doing so
# is safe, because " in strings was already encoded by JSON, and the semi-encoded
# JSON string is included in JavaScript in single quotes.
docs_json = serialize_json(docs_json_or_id, pretty=False) # JSON string
docs_json = escape(docs_json, quote=("'",)) # make HTML-safe
docs_json = docs_json.replace("\\", "\\\\") # double encode escapes
docs_json = "'" + docs_json + "'" # JS string
js = DOC_JS.render(
docs_json=docs_json,
render_items=serialize_json([ item.to_json() for item in render_items ], pretty=False),
app_path=app_path,
absolute_url=absolute_url,
)
if not settings.dev:
js = wrap_in_safely(js)
return wrap_in_onload(js)
class RenderRoot(object):
def __init__(self, elementid, id, name=None, tags=None):
self.elementid = elementid
self.id = id
self.name = name or ""
self.tags = tags or []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.elementid == other.elementid
class RenderRoots(object):
def __init__(self, roots):
self._roots = roots
def __len__(self):
return len(self._roots.items())
def __getitem__(self, key):
if isinstance(key, int):
(root, elementid) = list(self._roots.items())[key]
else:
for root, elementid in self._roots.items():
if root.name == key:
break
else:
raise ValueError("root with '%s' name not found" % key)
return RenderRoot(elementid, root._id, root.name, root.tags)
def __getattr__(self, key):
return self.__getitem__(key)
def to_json(self):
return OrderedDict([ (root._id, elementid) for root, elementid in self._roots.items() ])
class RenderItem(object):
def __init__(self, docid=None, sessionid=None, elementid=None, roots=None, use_for_title=None):
if (docid is None and sessionid is None) or (docid is not None and sessionid is not None):
raise ValueError("either docid or sessionid must be provided")
if roots is None:
roots = OrderedDict()
elif isinstance(roots, list):
roots = OrderedDict([ (root, make_id()) for root in roots ])
self.docid = docid
self.sessionid = sessionid
self.elementid = elementid
self.roots = RenderRoots(roots)
self.use_for_title = use_for_title
def to_json(self):
json = {}
if self.docid is not None:
json["docid"] = self.docid
else:
json["sessionid"] = self.sessionid
if self.elementid is not None:
json["elementid"] = self.elementid
if self.roots:
json["roots"] = self.roots.to_json()
if self.use_for_title is not None:
json["use_for_title"] = self.use_for_title
return json
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.to_json() == other.to_json()
CALLBACKS_WARNING = """
You are generating standalone HTML/JS output, but trying to use real Python
callbacks (i.e. with on_change or on_event). This combination cannot work.
Only JavaScript callbacks may be used with standalone output. For more
information on JavaScript callbacks with Bokeh, see:
http://bokeh.pydata.org/en/latest/docs/user_guide/interaction/callbacks.html
Alternatively, to use real Python callbacks, a Bokeh server application may
be used. For more information on building and running Bokeh applications, see:
http://bokeh.pydata.org/en/latest/docs/user_guide/server.html
"""
def standalone_docs_json_and_render_items(models):
'''
'''
models = check_models_or_docs(models)
if submodel_has_python_callbacks(models):
log.warn(CALLBACKS_WARNING)
docs = {}
for model_or_doc in models:
if isinstance(model_or_doc, Document):
model = None
doc = model_or_doc
else:
model = model_or_doc
doc = model.document
if doc is None:
raise ValueError("to render a model as HTML it must be part of a document")
if doc not in docs:
docs[doc] = (make_id(), OrderedDict())
(docid, roots) = docs[doc]
if model is not None:
roots[model] = make_id()
else:
for model in doc.roots:
roots[model] = make_id()
docs_json = {}
for doc, (docid, _) in docs.items():
docs_json[docid] = doc.to_json()
render_items = []
for _, (docid, roots) in docs.items():
render_items.append(RenderItem(docid, roots=roots))
return (docs_json, render_items)
def wrap_in_onload(code):
'''
'''
return _ONLOAD % dict(code=indent(code, 4))
def wrap_in_safely(code):
'''
'''
return _SAFELY % dict(code=indent(code, 2))
def wrap_in_script_tag(js, type="text/javascript", id=None):
'''
'''
return SCRIPT_TAG.render(js_code=indent(js, 2), type=type, id=id)
# based on `html` stdlib module (3.2+)
def escape(s, quote=("'", '"')):
"""
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated.
"""
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
if '"' in quote:
s = s.replace('"', """)
if "'" in quote:
s = s.replace("'", "'")
return s
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_ONLOAD = """\
(function() {
var fn = function() {
%(code)s
};
if (document.readyState != "loading") fn();
else document.addEventListener("DOMContentLoaded", fn);
})();\
"""
_SAFELY = """\
Bokeh.safely(function() {
%(code)s
});\
"""
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 30.965602 | 137 | 0.566611 |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from collections import Sequence, OrderedDict
from six import string_types
from ..core.json_encoder import serialize_json
from ..core.templates import _env, DOC_JS, FILE, MACROS, PLOT_DIV, SCRIPT_TAG
from ..document.document import DEFAULT_TITLE, Document
from ..model import Model, collect_models
from ..settings import settings
from ..util.compiler import bundle_all_models
from ..util.serialization import make_id
from ..util.string import encode_utf8, indent
class FromCurdoc(object):
pass
def check_models_or_docs(models, allow_dict=False):
input_type_valid = False
if isinstance(models, (Model, Document)):
models = [models]
if isinstance(models, Sequence) and all(isinstance(x, (Model, Document)) for x in models):
input_type_valid = True
if allow_dict:
if isinstance(models, dict) and \
all(isinstance(x, string_types) for x in models.keys()) and \
all(isinstance(x, (Model, Document)) for x in models.values()):
input_type_valid = True
if not input_type_valid:
if allow_dict:
raise ValueError(
'Input must be a Model, a Document, a Sequence of Models and Document, or a dictionary from string to Model and Document'
)
else:
raise ValueError('Input must be a Model, a Document, or a Sequence of Models and Document')
return models
def check_one_model_or_doc(model):
models = check_models_or_docs(model)
if len(models) != 1:
raise ValueError("Input must be exactly one Model or Document")
return models[0]
def submodel_has_python_callbacks(models):
has_python_callback = False
for model in collect_models(models):
if len(model._callbacks) > 0 or len(model._event_callbacks) > 0:
has_python_callback = True
break
return has_python_callback
def div_for_render_item(item):
return PLOT_DIV.render(doc=item, macros=MACROS)
def find_existing_docs(models):
existing_docs = set(m if isinstance(m, Document) else m.document for m in models)
existing_docs.discard(None)
if len(existing_docs) == 0:
doc = Document()
elif len(existing_docs) == 1:
doc = existing_docs.pop()
else:
msg = ('Multiple items in models contain documents or are '
'themselves documents. (Models must be owned by only a '
'single document). This may indicate a usage error.')
raise RuntimeError(msg)
return doc
def html_page_for_render_items(bundle, docs_json, render_items, title,
template=None, template_variables={}):
if title is None:
title = DEFAULT_TITLE
bokeh_js, bokeh_css = bundle
json_id = make_id()
json = escape(serialize_json(docs_json), quote=False)
json = wrap_in_script_tag(json, "application/json", json_id)
script = bundle_all_models()
script += script_for_render_items(json_id, render_items)
script = wrap_in_script_tag(script)
context = template_variables.copy()
context.update(dict(
title = title,
bokeh_js = bokeh_js,
bokeh_css = bokeh_css,
plot_script = json + script,
docs = render_items,
base = FILE,
macros = MACROS,
))
if len(render_items) == 1:
context["doc"] = context["docs"][0]
context["roots"] = context["doc"].roots
context["plot_div"] = "\n".join([ div_for_render_item(item) for item in render_items ])
if template is None:
template = FILE
elif isinstance(template, string_types):
template = _env.from_string("{% extends base %}\n" + template)
html = template.render(context)
return encode_utf8(html)
def script_for_render_items(docs_json_or_id, render_items, app_path=None, absolute_url=None):
if isinstance(docs_json_or_id, string_types):
docs_json = "document.getElementById('%s').textContent" % docs_json_or_id
else:
# and encoding it would significantly increase size of generated files. Doing so
# is safe, because " in strings was already encoded by JSON, and the semi-encoded
# JSON string is included in JavaScript in single quotes.
docs_json = serialize_json(docs_json_or_id, pretty=False) # JSON string
docs_json = escape(docs_json, quote=("'",)) # make HTML-safe
docs_json = docs_json.replace("\\", "\\\\") # double encode escapes
docs_json = "'" + docs_json + "'" # JS string
js = DOC_JS.render(
docs_json=docs_json,
render_items=serialize_json([ item.to_json() for item in render_items ], pretty=False),
app_path=app_path,
absolute_url=absolute_url,
)
if not settings.dev:
js = wrap_in_safely(js)
return wrap_in_onload(js)
class RenderRoot(object):
def __init__(self, elementid, id, name=None, tags=None):
self.elementid = elementid
self.id = id
self.name = name or ""
self.tags = tags or []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.elementid == other.elementid
class RenderRoots(object):
def __init__(self, roots):
self._roots = roots
def __len__(self):
return len(self._roots.items())
def __getitem__(self, key):
if isinstance(key, int):
(root, elementid) = list(self._roots.items())[key]
else:
for root, elementid in self._roots.items():
if root.name == key:
break
else:
raise ValueError("root with '%s' name not found" % key)
return RenderRoot(elementid, root._id, root.name, root.tags)
def __getattr__(self, key):
return self.__getitem__(key)
def to_json(self):
return OrderedDict([ (root._id, elementid) for root, elementid in self._roots.items() ])
class RenderItem(object):
def __init__(self, docid=None, sessionid=None, elementid=None, roots=None, use_for_title=None):
if (docid is None and sessionid is None) or (docid is not None and sessionid is not None):
raise ValueError("either docid or sessionid must be provided")
if roots is None:
roots = OrderedDict()
elif isinstance(roots, list):
roots = OrderedDict([ (root, make_id()) for root in roots ])
self.docid = docid
self.sessionid = sessionid
self.elementid = elementid
self.roots = RenderRoots(roots)
self.use_for_title = use_for_title
def to_json(self):
json = {}
if self.docid is not None:
json["docid"] = self.docid
else:
json["sessionid"] = self.sessionid
if self.elementid is not None:
json["elementid"] = self.elementid
if self.roots:
json["roots"] = self.roots.to_json()
if self.use_for_title is not None:
json["use_for_title"] = self.use_for_title
return json
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
return self.to_json() == other.to_json()
CALLBACKS_WARNING = """
You are generating standalone HTML/JS output, but trying to use real Python
callbacks (i.e. with on_change or on_event). This combination cannot work.
Only JavaScript callbacks may be used with standalone output. For more
information on JavaScript callbacks with Bokeh, see:
http://bokeh.pydata.org/en/latest/docs/user_guide/interaction/callbacks.html
Alternatively, to use real Python callbacks, a Bokeh server application may
be used. For more information on building and running Bokeh applications, see:
http://bokeh.pydata.org/en/latest/docs/user_guide/server.html
"""
def standalone_docs_json_and_render_items(models):
models = check_models_or_docs(models)
if submodel_has_python_callbacks(models):
log.warn(CALLBACKS_WARNING)
docs = {}
for model_or_doc in models:
if isinstance(model_or_doc, Document):
model = None
doc = model_or_doc
else:
model = model_or_doc
doc = model.document
if doc is None:
raise ValueError("to render a model as HTML it must be part of a document")
if doc not in docs:
docs[doc] = (make_id(), OrderedDict())
(docid, roots) = docs[doc]
if model is not None:
roots[model] = make_id()
else:
for model in doc.roots:
roots[model] = make_id()
docs_json = {}
for doc, (docid, _) in docs.items():
docs_json[docid] = doc.to_json()
render_items = []
for _, (docid, roots) in docs.items():
render_items.append(RenderItem(docid, roots=roots))
return (docs_json, render_items)
def wrap_in_onload(code):
return _ONLOAD % dict(code=indent(code, 4))
def wrap_in_safely(code):
return _SAFELY % dict(code=indent(code, 2))
def wrap_in_script_tag(js, type="text/javascript", id=None):
return SCRIPT_TAG.render(js_code=indent(js, 2), type=type, id=id)
# based on `html` stdlib module (3.2+)
def escape(s, quote=("'", '"')):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
if '"' in quote:
s = s.replace('"', """)
if "'" in quote:
s = s.replace("'", "'")
return s
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_ONLOAD = """\
(function() {
var fn = function() {
%(code)s
};
if (document.readyState != "loading") fn();
else document.addEventListener("DOMContentLoaded", fn);
})();\
"""
_SAFELY = """\
Bokeh.safely(function() {
%(code)s
});\
"""
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| true | true |
1c362224e270a6a441cc8331fe7cff5734ba93c3 | 5,627 | py | Python | main.py | gdaguilarc/intelligent-systems | af8c7374a3225ad5944e63956a666ff1ccb6532d | [
"MIT"
] | null | null | null | main.py | gdaguilarc/intelligent-systems | af8c7374a3225ad5944e63956a666ff1ccb6532d | [
"MIT"
] | 10 | 2021-02-02T22:58:28.000Z | 2022-02-10T02:10:26.000Z | main.py | gdaguilarc/intelligent-systems | af8c7374a3225ad5944e63956a666ff1ccb6532d | [
"MIT"
] | null | null | null | # Proyecto Final
# Clustering with kmeans a group of numbers from an image, crop and then predict the label with a NN
# Location:
# Tecnologico de Monterrey Campus Ciudad de México
# Contributors:
# Andrea Beatriz Becerra Bolaños - A01337434
# Guillermo David Aguilar Castilleja - A01337242
# Dependencies:
# tensorflow == 2.3.0
from datetime import date
import random
from pathlib import Path
from PIL import Image, ImageOps
from sklearn.cluster import KMeans
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, BatchNormalization
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from matplotlib import pyplot as plt
# IMAGES
IMAGE_ONE = Image.open("img/digits-handmade.jpg")
IMAGE_TWO = Image.open("img/prueba1.png")
IMAGE_THREE = Image.open("img/prueba2.png")
IMAGE_FOUR = Image.open("img/prueba3.jpeg")
# NET CONSTANTS
NUMBER_LAYERS = 3
EPOCHS = 15
OPTIMIZER = "Adam"
DATE = date.today().strftime("%d-%m-%Y")
FILENAME = "DigitNN-Layers{}-{}-{}epochs-{}.h5".format(
NUMBER_LAYERS, OPTIMIZER, EPOCHS, DATE)
BEST_NET = "DigitNN-3-adam-20-29-07-2020.h5"
# DATA
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# NORMALIZING DATA
x_train = x_train / 255
x_test = x_test / 255
# MODEL
def train(x_train, y_train, epochs=20, optimizer="adam", net=FILENAME):
if net != FILENAME:
my_file = Path("models/" + net)
else:
my_file = Path("models/" + FILENAME)
if my_file.is_file():
if net != FILENAME:
model = load_model("models/" + net)
else:
model = load_model("models/" + FILENAME)
return model
model = Sequential()
model.add(Flatten())
model.add(Dense(392, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy', metrics=["accuracy"])
history = model.fit(x_train, y_train, epochs=epochs,
shuffle=True, validation_split=0.25)
model.summary()
model.save("models/"+FILENAME)
print_history(history)
model.evaluate(x_test, y_test)
return model
def print_history(history):
plt.figure(figsize=(10, 6))
plt.subplot(2, 2, 1)
plt.plot(
range(len(history.history['accuracy'])), history.history['accuracy'])
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.subplot(2, 2, 2)
plt.plot(range(len(history.history['loss'])), history.history['loss'])
plt.ylabel('loss')
plt.xlabel('epochs')
plt.show()
def image_preprocessing(img, k, inverted=True):
img = ImageOps.grayscale(img)
if inverted:
img = ImageOps.invert(img)
img = np.asarray(img)
pairs = pair_points(img)
cluster_labels = cluster(pairs, k)
images = []
for i in range(k):
images.append(cutted_digit(cluster_labels, img, pairs, i))
return images
def cutted_digit(cluster, img, pairs, index, inverted=True):
positions = np.where(cluster == index)
img_boundaries = pairs[positions][:]
# Get Square
y_max = img_boundaries[:, 0].max()
x_max = img_boundaries[:, 1].max()
y_min = img_boundaries[:, 0].min()
x_min = img_boundaries[:, 1].min()
area = (x_min, y_min, x_max, y_max)
cutted = Image.fromarray(img)
cutted = cutted.crop(area)
resized = cutted.resize((20, 20))
# resized.show() # Borrar
resized = np.array(resized)
resized = Image.fromarray(resized)
resized = ImageOps.invert(resized)
resized = np.asarray(resized)
return Image.fromarray(np.pad(resized, ((4, 4), (4, 4)), "constant", constant_values=0))
def pair_points(data):
points = []
max_x = len(data)
max_y = len(data[0])
for i in range(max_x):
for j in range(max_y):
if data[i][j] < 125:
points.append((i, j))
return np.array(points)
def cluster(pairs, k):
dbscan = KMeans(n_clusters=k)
cluster = dbscan.fit_predict(pairs)
plt.scatter(pairs[:, 1], pairs[:, 0], c=cluster, cmap='plasma')
plt.show()
return cluster
def predict_images(images):
_, axs = plt.subplots(1, len(images))
for i in range(len(images)):
image = np.asarray(images[i])
pred = model.predict(image.reshape(1, 28, 28, 1))
axs[i].set_title(str(pred.argmax()))
axs[i].imshow(image, cmap="gray")
axs[i].axis('off')
plt.show()
# TRAINING OR IMPORTING THE MODEL
model = train(x_train, y_train, EPOCHS, OPTIMIZER, net=FILENAME)
# BEST TRY
# DigitNN-3-adam-20-29-07-2020
# IMAGE_ONE 12/16 16 False 75%
# IMAGE_TWO 7/10 10 True 70%
# IMAGE_THREE 8/10 10 True 80%
# BEST TRY
# DigitNN-Layers3-Adam-20epochs-30-07-2020
# IMAGE_ONE 12/16 16 False 75%
# IMAGE_TWO 7/10 10 True 70%
# IMAGE_THREE 5/10 10 True 50%
# BEST TRY
# DigitNN-Layers3-Adam-15epochs-30-07-2020
# IMAGE_ONE 12/16 16 False 62%
# IMAGE_TWO 7/10 10 True 70%
# IMAGE_THREE 7/10 10 True 70%
# # SECOND BEST
# DigitNN-3-Nadam-20epochs-30-07-2020
# IMAGE_ONE 12/16 16 False 75%
# IMAGE_TWO 8/10 10 True 80%
# IMAGE_THREE 6/10 10 True 60%
# THIRD BEST
# DigitNN-3-Adam-15epochs-30-07-2020
# IMAGE_ONE 11/16 16 False 68.75%
# IMAGE_TWO 7/10 10 True 70%
# IMAGE_THREE 7/10 10 True 70%
# IMAGE CLUSTERING AND SEGMENTATION
# CLUSTERING: KMEANS
images = image_preprocessing(IMAGE_ONE, 16, False)
# RESULTS
predict_images(images)
| 25.461538 | 100 | 0.665897 |
from datetime import date
import random
from pathlib import Path
from PIL import Image, ImageOps
from sklearn.cluster import KMeans
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, BatchNormalization
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from matplotlib import pyplot as plt
IMAGE_ONE = Image.open("img/digits-handmade.jpg")
IMAGE_TWO = Image.open("img/prueba1.png")
IMAGE_THREE = Image.open("img/prueba2.png")
IMAGE_FOUR = Image.open("img/prueba3.jpeg")
NUMBER_LAYERS = 3
EPOCHS = 15
OPTIMIZER = "Adam"
DATE = date.today().strftime("%d-%m-%Y")
FILENAME = "DigitNN-Layers{}-{}-{}epochs-{}.h5".format(
NUMBER_LAYERS, OPTIMIZER, EPOCHS, DATE)
BEST_NET = "DigitNN-3-adam-20-29-07-2020.h5"
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train / 255
x_test = x_test / 255
def train(x_train, y_train, epochs=20, optimizer="adam", net=FILENAME):
if net != FILENAME:
my_file = Path("models/" + net)
else:
my_file = Path("models/" + FILENAME)
if my_file.is_file():
if net != FILENAME:
model = load_model("models/" + net)
else:
model = load_model("models/" + FILENAME)
return model
model = Sequential()
model.add(Flatten())
model.add(Dense(392, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy', metrics=["accuracy"])
history = model.fit(x_train, y_train, epochs=epochs,
shuffle=True, validation_split=0.25)
model.summary()
model.save("models/"+FILENAME)
print_history(history)
model.evaluate(x_test, y_test)
return model
def print_history(history):
plt.figure(figsize=(10, 6))
plt.subplot(2, 2, 1)
plt.plot(
range(len(history.history['accuracy'])), history.history['accuracy'])
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.subplot(2, 2, 2)
plt.plot(range(len(history.history['loss'])), history.history['loss'])
plt.ylabel('loss')
plt.xlabel('epochs')
plt.show()
def image_preprocessing(img, k, inverted=True):
img = ImageOps.grayscale(img)
if inverted:
img = ImageOps.invert(img)
img = np.asarray(img)
pairs = pair_points(img)
cluster_labels = cluster(pairs, k)
images = []
for i in range(k):
images.append(cutted_digit(cluster_labels, img, pairs, i))
return images
def cutted_digit(cluster, img, pairs, index, inverted=True):
positions = np.where(cluster == index)
img_boundaries = pairs[positions][:]
y_max = img_boundaries[:, 0].max()
x_max = img_boundaries[:, 1].max()
y_min = img_boundaries[:, 0].min()
x_min = img_boundaries[:, 1].min()
area = (x_min, y_min, x_max, y_max)
cutted = Image.fromarray(img)
cutted = cutted.crop(area)
resized = cutted.resize((20, 20))
sized = np.array(resized)
resized = Image.fromarray(resized)
resized = ImageOps.invert(resized)
resized = np.asarray(resized)
return Image.fromarray(np.pad(resized, ((4, 4), (4, 4)), "constant", constant_values=0))
def pair_points(data):
points = []
max_x = len(data)
max_y = len(data[0])
for i in range(max_x):
for j in range(max_y):
if data[i][j] < 125:
points.append((i, j))
return np.array(points)
def cluster(pairs, k):
dbscan = KMeans(n_clusters=k)
cluster = dbscan.fit_predict(pairs)
plt.scatter(pairs[:, 1], pairs[:, 0], c=cluster, cmap='plasma')
plt.show()
return cluster
def predict_images(images):
_, axs = plt.subplots(1, len(images))
for i in range(len(images)):
image = np.asarray(images[i])
pred = model.predict(image.reshape(1, 28, 28, 1))
axs[i].set_title(str(pred.argmax()))
axs[i].imshow(image, cmap="gray")
axs[i].axis('off')
plt.show()
model = train(x_train, y_train, EPOCHS, OPTIMIZER, net=FILENAME)
images = image_preprocessing(IMAGE_ONE, 16, False)
predict_images(images)
| true | true |
1c3622adcfb1f767e507a0d34984f641b7721a84 | 4,248 | py | Python | core/circuit.py | ethanabrooks/pytorch-dnc | bf7a039e3062742654364fb80b1ab5d44e5746f8 | [
"MIT"
] | null | null | null | core/circuit.py | ethanabrooks/pytorch-dnc | bf7a039e3062742654364fb80b1ab5d44e5746f8 | [
"MIT"
] | null | null | null | core/circuit.py | ethanabrooks/pytorch-dnc | bf7a039e3062742654364fb80b1ab5d44e5746f8 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Circuit(
nn.Module
): # NOTE: basically this whole module is treated as a custom rnn cell
def __init__(self, args):
super(Circuit, self).__init__()
# logging
self.logger = args.logger
# params
self.use_cuda = args.use_cuda
self.dtype = args.dtype
# params
self.batch_size = args.batch_size
self.input_dim = args.input_dim
self.output_dim = args.output_dim
self.hidden_dim = args.hidden_dim
self.num_write_heads = args.num_write_heads
self.num_read_heads = args.num_read_heads
self.mem_hei = args.mem_hei
self.mem_wid = args.mem_wid
self.clip_value = args.clip_value
# functional components
self.controller_params = args.controller_params
self.accessor_params = args.accessor_params
# now we fill in the missing values for each module
self.read_vec_dim = self.num_read_heads * self.mem_wid
# controller
self.controller_params.batch_size = self.batch_size
self.controller_params.input_dim = self.input_dim
self.controller_params.read_vec_dim = self.read_vec_dim
self.controller_params.output_dim = self.output_dim
self.controller_params.hidden_dim = self.hidden_dim
self.controller_params.mem_hei = self.mem_hei
self.controller_params.mem_wid = self.mem_wid
self.controller_params.clip_value = self.clip_value
# accessor: {write_heads, read_heads, memory}
self.accessor_params.batch_size = self.batch_size
self.accessor_params.hidden_dim = self.hidden_dim
self.accessor_params.num_write_heads = self.num_write_heads
self.accessor_params.num_read_heads = self.num_read_heads
self.accessor_params.mem_hei = self.mem_hei
self.accessor_params.mem_wid = self.mem_wid
self.accessor_params.clip_value = self.clip_value
self.logger.warning(
"<-----------------------------======> Circuit: {Controller, Accessor}"
)
def _init_weights(self):
raise NotImplementedError("not implemented in base calss")
def print_model(self):
self.logger.warning(
"<-----------------------------======> Circuit: {Overall Architecture}"
)
self.logger.warning(self)
def _reset_states(
self
): # should be called at the beginning of forwarding a new input sequence
# we first reset the previous read vector
self.read_vec_vb = Variable(self.read_vec_ts).type(self.dtype)
# we then reset the controller's hidden state
self.controller._reset_states()
# we then reset the write/read weights of heads
self.accessor._reset_states()
def _reset(self):
self._init_weights()
self.type(self.dtype)
self.print_model()
# reset internal states
self.read_vec_ts = torch.zeros(self.batch_size, self.read_vec_dim).fill_(1e-6)
self._reset_states()
def forward(self, input_vb):
# NOTE: the operation order must be the following: control, access{write, read}, output
# 1. first feed {input, read_vec_{t-1}} to controller
hidden_vb = self.controller.forward(input_vb, self.read_vec_vb)
# 2. then we write to memory_{t-1} to get memory_{t}; then read from memory_{t} to get read_vec_{t}
self.read_vec_vb = self.accessor.forward(hidden_vb)
# 3. finally we concat the output from the controller and the current read_vec_{t} to get the final output
output_vb = self.hid_to_out(
torch.cat(
(
hidden_vb.view(-1, self.hidden_dim),
self.read_vec_vb.view(-1, self.read_vec_dim),
),
1,
)
)
# we clip the output values here
return F.sigmoid(
torch.clamp(output_vb, min=-self.clip_value, max=self.clip_value)
).view(1, self.batch_size, self.output_dim)
| 39.333333 | 114 | 0.649953 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Circuit(
nn.Module
):
def __init__(self, args):
super(Circuit, self).__init__()
self.logger = args.logger
self.use_cuda = args.use_cuda
self.dtype = args.dtype
self.batch_size = args.batch_size
self.input_dim = args.input_dim
self.output_dim = args.output_dim
self.hidden_dim = args.hidden_dim
self.num_write_heads = args.num_write_heads
self.num_read_heads = args.num_read_heads
self.mem_hei = args.mem_hei
self.mem_wid = args.mem_wid
self.clip_value = args.clip_value
self.controller_params = args.controller_params
self.accessor_params = args.accessor_params
self.read_vec_dim = self.num_read_heads * self.mem_wid
self.controller_params.batch_size = self.batch_size
self.controller_params.input_dim = self.input_dim
self.controller_params.read_vec_dim = self.read_vec_dim
self.controller_params.output_dim = self.output_dim
self.controller_params.hidden_dim = self.hidden_dim
self.controller_params.mem_hei = self.mem_hei
self.controller_params.mem_wid = self.mem_wid
self.controller_params.clip_value = self.clip_value
self.accessor_params.batch_size = self.batch_size
self.accessor_params.hidden_dim = self.hidden_dim
self.accessor_params.num_write_heads = self.num_write_heads
self.accessor_params.num_read_heads = self.num_read_heads
self.accessor_params.mem_hei = self.mem_hei
self.accessor_params.mem_wid = self.mem_wid
self.accessor_params.clip_value = self.clip_value
self.logger.warning(
"<-----------------------------======> Circuit: {Controller, Accessor}"
)
def _init_weights(self):
raise NotImplementedError("not implemented in base calss")
def print_model(self):
self.logger.warning(
"<-----------------------------======> Circuit: {Overall Architecture}"
)
self.logger.warning(self)
def _reset_states(
self
):
self.read_vec_vb = Variable(self.read_vec_ts).type(self.dtype)
self.controller._reset_states()
# we then reset the write/read weights of heads
self.accessor._reset_states()
def _reset(self):
self._init_weights()
self.type(self.dtype)
self.print_model()
# reset internal states
self.read_vec_ts = torch.zeros(self.batch_size, self.read_vec_dim).fill_(1e-6)
self._reset_states()
def forward(self, input_vb):
# NOTE: the operation order must be the following: control, access{write, read}, output
# 1. first feed {input, read_vec_{t-1}} to controller
hidden_vb = self.controller.forward(input_vb, self.read_vec_vb)
# 2. then we write to memory_{t-1} to get memory_{t}; then read from memory_{t} to get read_vec_{t}
self.read_vec_vb = self.accessor.forward(hidden_vb)
# 3. finally we concat the output from the controller and the current read_vec_{t} to get the final output
output_vb = self.hid_to_out(
torch.cat(
(
hidden_vb.view(-1, self.hidden_dim),
self.read_vec_vb.view(-1, self.read_vec_dim),
),
1,
)
)
# we clip the output values here
return F.sigmoid(
torch.clamp(output_vb, min=-self.clip_value, max=self.clip_value)
).view(1, self.batch_size, self.output_dim)
| true | true |
1c362625e0815188ed59394ae7a59039a798e9ab | 973 | py | Python | class10/exercises/ex4_cf_ascompleted_procs.py | ksannedhi/pyplus_course | fc3499f2dfef472dc49fe6caddf2e6e2be160f4b | [
"Apache-2.0"
] | 39 | 2019-03-03T18:16:55.000Z | 2022-02-17T17:05:18.000Z | class10/exercises/ex4_cf_ascompleted_procs.py | ksannedhi/pyplus_course | fc3499f2dfef472dc49fe6caddf2e6e2be160f4b | [
"Apache-2.0"
] | 1 | 2020-06-17T22:39:28.000Z | 2020-06-17T22:39:28.000Z | class10/exercises/ex4_cf_ascompleted_procs.py | ksannedhi/pyplus_course | fc3499f2dfef472dc49fe6caddf2e6e2be160f4b | [
"Apache-2.0"
] | 77 | 2019-01-25T10:41:23.000Z | 2022-03-14T21:35:59.000Z | from concurrent.futures import ProcessPoolExecutor, as_completed
import time
from my_devices import network_devices
from my_functions import ssh_command2
def main():
"""
Use concurrent futures threading to simultaneously gather "show version" output from devices.
Wait for all threads to complete. Record the amount of time required to do this.
"""
start_time = time.time()
max_procs = 5
# Create the process pool
pool = ProcessPoolExecutor(max_procs)
# Create list to append the processes to
futures = []
for device in network_devices:
futures.append(pool.submit(ssh_command2, device, "show version"))
print("\n\n")
for future in as_completed(futures):
print("-" * 40)
print("Result: " + future.result())
print("-" * 40)
print("\n\n")
end_time = time.time()
print(f"Finished in {end_time - start_time:.2f}")
print("\n\n")
if __name__ == "__main__":
main()
| 26.297297 | 97 | 0.663926 | from concurrent.futures import ProcessPoolExecutor, as_completed
import time
from my_devices import network_devices
from my_functions import ssh_command2
def main():
start_time = time.time()
max_procs = 5
pool = ProcessPoolExecutor(max_procs)
futures = []
for device in network_devices:
futures.append(pool.submit(ssh_command2, device, "show version"))
print("\n\n")
for future in as_completed(futures):
print("-" * 40)
print("Result: " + future.result())
print("-" * 40)
print("\n\n")
end_time = time.time()
print(f"Finished in {end_time - start_time:.2f}")
print("\n\n")
if __name__ == "__main__":
main()
| true | true |
1c362689a0ad0e196248420c051bea54c265ad01 | 40,370 | py | Python | dsl_parser/rel_graph.py | mistio/cloudify-dsl-parser | 212864f77591a91ea401c4cfcf99f260d8e41ab3 | [
"Apache-2.0"
] | null | null | null | dsl_parser/rel_graph.py | mistio/cloudify-dsl-parser | 212864f77591a91ea401c4cfcf99f260d8e41ab3 | [
"Apache-2.0"
] | null | null | null | dsl_parser/rel_graph.py | mistio/cloudify-dsl-parser | 212864f77591a91ea401c4cfcf99f260d8e41ab3 | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import copy
import collections
from random import choice
from string import ascii_lowercase, digits
import networkx as nx
from dsl_parser import constants
from dsl_parser import exceptions
NODES = 'nodes'
RELATIONSHIPS = 'relationships'
DEPENDS_ON_REL_TYPE = constants.DEPENDS_ON_REL_TYPE
CONNECTED_TO_REL_TYPE = constants.CONNECTED_TO_REL_TYPE
CONTAINED_IN_REL_TYPE = constants.CONTAINED_IN_REL_TYPE
GROUP_CONTAINED_IN_REL_TYPE = '__group_contained_in__'
CONNECTION_TYPE = 'connection_type'
ALL_TO_ALL = 'all_to_all'
ALL_TO_ONE = 'all_to_one'
def build_node_graph(nodes, scaling_groups):
graph = nx.DiGraph()
groups_graph = nx.DiGraph()
node_ids = set()
contained_in_group = {}
for node in nodes:
node_id = node['id']
node_ids.add(node_id)
if 'capabilities' in node:
# This code path is used by unit tests
scale_properties = node['capabilities']['scalable']['properties']
else:
# This code path is used by actual code
scale_properties = {
'current_instances': node['number_of_instances'],
'default_instances':
node['deploy_number_of_instances'],
'min_instances': node['min_number_of_instances'],
'max_instances': node['max_number_of_instances']
}
graph.add_node(node_id,
node=node,
scale_properties=scale_properties)
for group_name, group in list(scaling_groups.items()):
scale_properties = group['properties']
groups_graph.add_node(group_name)
graph.add_node(group_name,
node={'id': group_name, 'group': True},
scale_properties=scale_properties)
for group_name, group in list(scaling_groups.items()):
for member in group['members']:
graph.add_edge(member, group_name,
relationship={
'type': GROUP_CONTAINED_IN_REL_TYPE,
'type_hierarchy': [GROUP_CONTAINED_IN_REL_TYPE],
'target_id': group_name
},
index=-1)
groups_graph.add_edge(member, group_name)
if member in node_ids:
contained_in_group[member] = group_name
for node in nodes:
node_id = node['id']
for index, relationship in enumerate(node.get(RELATIONSHIPS, [])):
target_id = relationship['target_id']
if (CONTAINED_IN_REL_TYPE in relationship['type_hierarchy'] and
node_id in contained_in_group):
group_name = contained_in_group[node_id]
relationship['target_id'] = group_name
relationship['replaced'] = target_id
graph.add_edge(node_id, group_name,
relationship=relationship,
index=index)
top_level_group_name = nx.topological_sort(
groups_graph, nbunch=[group_name])[-1]
graph.add_edge(
top_level_group_name, target_id,
relationship={
'type': GROUP_CONTAINED_IN_REL_TYPE,
'type_hierarchy': [GROUP_CONTAINED_IN_REL_TYPE],
'target_id': target_id
},
index=-1)
else:
graph.add_edge(node_id, target_id,
relationship=relationship,
index=index)
return graph
def build_previous_deployment_node_graph(plan_node_graph,
previous_node_instances):
graph = nx.DiGraph()
contained_graph = nx.DiGraph()
for node_instance in previous_node_instances:
node_instance_id = node_instance['id']
node_instance_host_id = node_instance.get('host_id')
graph.add_node(node_instance_id,
node=node_instance)
contained_graph.add_node(node_instance_id,
node=node_instance)
scaling_groups = node_instance.get('scaling_groups') or ()
for scaling_group in scaling_groups:
group_id = scaling_group['id']
group_name = scaling_group['name']
node = {'id': group_id, 'name': group_name, 'group': True}
if node_instance_host_id:
node['host_id'] = node_instance_host_id
graph.add_node(group_id, node=node)
contained_graph.add_node(group_id, node=node)
for node_instance in previous_node_instances:
node_instance_id = node_instance['id']
node_id = _node_id_from_node_instance(node_instance)
scaling_groups = node_instance.get('scaling_groups')
contained_in_target_id = None
contained_in_target_name = None
for index, rel in enumerate(node_instance.get('relationships', [])):
target_id = rel['target_id']
target_name = rel['target_name']
# if the original relationship does not exist in the plan node
# graph, it means it was a contained_in relationship that was
# replaced by a scaling group
replaced_contained_in = target_name not in plan_node_graph[node_id]
if replaced_contained_in:
contained_in_target_id = target_id
contained_in_target_name = target_name
# for the purpose of containment, only the first group
# is relevant
scaling_group = scaling_groups[0]
rel['target_id'] = scaling_group['id']
rel['target_name'] = scaling_group['name']
rel['replaced'] = True
graph.add_edge(node_instance_id, scaling_group['id'],
relationship=rel,
index=index)
contained_graph.add_edge(node_instance_id, scaling_group['id'])
else:
graph.add_edge(node_instance_id, target_id,
relationship=rel,
index=index)
if _relationship_type_hierarchy_includes_one_of(
plan_node_graph[node_id][target_name]['relationship'],
[CONTAINED_IN_REL_TYPE]):
contained_graph.add_edge(node_instance_id, target_id)
if scaling_groups:
scaling_groups = scaling_groups[:]
if contained_in_target_id:
scaling_groups.append({
'id': contained_in_target_id,
'name': contained_in_target_name
})
else:
scaling_groups.insert(0, {
'id': node_instance_id,
'name': node_id
})
for i in range(len(scaling_groups) - 1):
graph.add_edge(
scaling_groups[i]['id'],
scaling_groups[i+1]['id'],
relationship={
'type': GROUP_CONTAINED_IN_REL_TYPE,
'target_id': scaling_groups[i+1]['id'],
'target_name': scaling_groups[i+1]['name']
},
index=-1)
contained_graph.add_edge(scaling_groups[i]['id'],
scaling_groups[i+1]['id'])
return graph, contained_graph
def build_deployment_node_graph(plan_node_graph,
previous_deployment_node_graph=None,
previous_deployment_contained_graph=None,
modified_nodes=None):
_verify_no_unsupported_relationships(plan_node_graph)
deployment_node_graph = nx.DiGraph()
ctx = Context(
plan_node_graph=plan_node_graph,
deployment_node_graph=deployment_node_graph,
previous_deployment_node_graph=previous_deployment_node_graph,
previous_deployment_contained_graph=previous_deployment_contained_graph, # noqa
modified_nodes=modified_nodes)
_handle_contained_in(ctx)
ctx.node_instance_ids.clear()
ctx.node_ids_to_node_instance_ids.clear()
for node_instance_id, data in deployment_node_graph.nodes_iter(
data=True):
ctx.node_instance_ids.add(node_instance_id)
node_id = _node_id_from_node_instance(data['node'])
ctx.node_ids_to_node_instance_ids[node_id].add(node_instance_id)
_handle_connected_to_and_depends_on(ctx)
ctx.restore_plan_node_graph()
return deployment_node_graph, ctx
def extract_node_instances(node_instances_graph,
ctx,
copy_instances=False,
contained_graph=None):
contained_graph = contained_graph or ctx.deployment_contained_graph
added_missing_node_instance_ids = set()
node_instances = []
for node_instance_id, data in node_instances_graph.nodes_iter(data=True):
node_instance = data['node']
if node_instance.get('group'):
continue
node_instance_attributes = data.get('node_instance_attributes')
if copy_instances:
node_instance = copy.deepcopy(node_instance)
if node_instance_attributes:
node_instance.update(node_instance_attributes)
indexed_relationship_instances = []
for target_node_instance_id in node_instances_graph.neighbors_iter(
node_instance_id):
edge_data = node_instances_graph[node_instance_id][
target_node_instance_id]
relationship_instance = edge_data['relationship']
relationship_index = edge_data['index']
if copy_instances:
relationship_instance = copy.deepcopy(relationship_instance)
group_rel = (relationship_instance['type'] ==
GROUP_CONTAINED_IN_REL_TYPE)
replaced = relationship_instance.pop('replaced', None)
if replaced or group_rel:
group_name = relationship_instance['target_name']
group_id = relationship_instance['target_id']
scaling_groups = [{
'name': group_name,
'id': group_id
}]
containing_groups, parent = ctx.containing_group_instances(
instance_id=group_id,
contained_graph=contained_graph
)
scaling_groups += containing_groups
node_instance['scaling_groups'] = scaling_groups
if replaced:
target_node_instance = parent
target_name = target_node_instance['name']
target_id = target_node_instance['id']
relationship_instance['target_name'] = target_name
relationship_instance['target_id'] = target_id
# In deployment modification, if an instance is contained
# in a node and that node is not new, it needs to be added
# as a "related" node. Added and removed nodes are marked
# as such, so all we need to do is add this node with
# no relationships
if (target_id not in node_instances_graph and
target_id not in added_missing_node_instance_ids):
target_node_instance = contained_graph.node[target_id][
'node']
if copy_instances:
target_node_instance = copy.deepcopy(
target_node_instance)
target_node_instance[RELATIONSHIPS] = []
node_instances.append(target_node_instance)
added_missing_node_instance_ids.add(target_id)
if not group_rel:
indexed_relationship_instances.append(
(relationship_index, relationship_instance))
indexed_relationship_instances.sort(key=lambda index__: index__[0])
relationship_instances = [r for _, r in indexed_relationship_instances]
node_instance[RELATIONSHIPS] = relationship_instances
node_instances.append(node_instance)
return node_instances
def extract_added_node_instances(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
added_instances_graph = _graph_diff(
new_deployment_node_graph,
previous_deployment_node_graph,
node_instance_attributes={'modification': 'added'})
return extract_node_instances(
added_instances_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.deployment_contained_graph)
def extract_removed_node_instances(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
removed_instances_graph = _graph_diff(
previous_deployment_node_graph,
new_deployment_node_graph,
node_instance_attributes={'modification': 'removed'})
return extract_node_instances(
removed_instances_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.previous_deployment_contained_graph)
def extract_added_relationships(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
modified_instance_graph = _graph_diff_relationships(
new_deployment_node_graph,
previous_deployment_node_graph,
node_instance_attributes={'modification': 'extended'})
return extract_node_instances(
modified_instance_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.deployment_contained_graph)
def extract_removed_relationships(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
modified_instance_graph = _graph_diff_relationships(
previous_deployment_node_graph,
new_deployment_node_graph,
node_instance_attributes={'modification': 'reduced'})
return extract_node_instances(
modified_instance_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.previous_deployment_contained_graph)
def _graph_diff(G, H, node_instance_attributes):
result = nx.DiGraph()
for n1, data in G.nodes_iter(data=True):
if n1 in H:
continue
result.add_node(n1, data,
node_instance_attributes=node_instance_attributes)
for n2 in G.neighbors_iter(n1):
result.add_node(n2, G.node[n2])
result.add_edge(n1, n2, G[n1][n2])
for n2 in G.predecessors_iter(n1):
result.add_node(n2, G.node[n2])
result.add_edge(n2, n1, G[n2][n1])
return result
def _graph_diff_relationships(G, H, node_instance_attributes):
"""
G represents the base and H represents the changed graph.
:param G:
:param H:
:param node_instance_attributes:
:return:
"""
result = nx.DiGraph()
for source, dest, data in G.edges_iter(data=True):
if source in H and dest not in H[source]:
new_node = copy.deepcopy(G.node[source])
result.add_node(source, new_node,
node_instance_attributes=node_instance_attributes)
result.add_node(dest, G.node[dest])
result.add_edge(source, dest, G[source][dest])
return result
def _handle_contained_in(ctx):
# for each 'contained' tree, recursively build new trees based on
# scaling groups with generated ids
for contained_tree in nx.weakly_connected_component_subgraphs(
ctx.plan_contained_graph.reverse(copy=True)):
# extract tree root node id
node_id = nx.topological_sort(contained_tree)[0]
_build_multi_instance_node_tree_rec(
node_id=node_id,
contained_tree=contained_tree,
ctx=ctx)
ctx.deployment_contained_graph = ctx.deployment_node_graph.copy()
def _build_multi_instance_node_tree_rec(node_id,
contained_tree,
ctx,
parent_relationship=None,
parent_relationship_index=None,
parent_node_instance_id=None,
current_host_instance_id=None):
node = contained_tree.node[node_id]['node']
containers = _build_and_update_node_instances(
ctx=ctx,
node=node,
parent_node_instance_id=parent_node_instance_id,
parent_relationship=parent_relationship,
current_host_instance_id=current_host_instance_id)
for container in containers:
node_instance = container.node_instance
node_instance_id = node_instance['id']
relationship_instance = container.relationship_instance
new_current_host_instance_id = container.current_host_instance_id
ctx.deployment_node_graph.add_node(node_instance_id,
node=node_instance)
if parent_node_instance_id is not None:
ctx.deployment_node_graph.add_edge(
node_instance_id, parent_node_instance_id,
relationship=relationship_instance,
index=parent_relationship_index)
for child_node_id in contained_tree.neighbors_iter(node_id):
descendants = nx.descendants(contained_tree, child_node_id)
descendants.add(child_node_id)
child_contained_tree = contained_tree.subgraph(descendants)
_build_multi_instance_node_tree_rec(
node_id=child_node_id,
contained_tree=child_contained_tree,
ctx=ctx,
parent_relationship=ctx.plan_node_graph[
child_node_id][node_id]['relationship'],
parent_relationship_index=ctx.plan_node_graph[
child_node_id][node_id]['index'],
parent_node_instance_id=node_instance_id,
current_host_instance_id=new_current_host_instance_id)
def _build_and_update_node_instances(ctx,
node,
parent_node_instance_id,
parent_relationship,
current_host_instance_id):
node_id = node['id']
current_instances_num = ctx.plan_node_graph.node[node_id][
'scale_properties']['current_instances']
new_instances_num = 0
previous_containers = []
if ctx.is_modification:
all_previous_node_instance_ids = ctx.node_ids_to_node_instance_ids[
node_id]
previous_node_instance_ids = [
instance_id for instance_id in all_previous_node_instance_ids
if not parent_node_instance_id or
(instance_id in ctx.previous_deployment_node_graph and
ctx.previous_deployment_node_graph[instance_id].get(
parent_node_instance_id))
]
previous_instances_num = len(previous_node_instance_ids)
if node_id in ctx.modified_nodes:
modified_node = ctx.modified_nodes[node_id]
total_instances_num = modified_node['instances']
if total_instances_num > previous_instances_num:
new_instances_num = (total_instances_num -
previous_instances_num)
else:
# removed nodes are removed from the
# 'previous_node_instance_ids' list which means they will
# not be included in the resulting graph
_handle_removed_instances(previous_node_instance_ids,
previous_instances_num,
total_instances_num,
modified_node)
else:
new_instances_num = (current_instances_num -
previous_instances_num)
previous_node_instances = [
ctx.previous_deployment_node_graph.node[node_instance_id]['node']
for node_instance_id in previous_node_instance_ids]
previous_containers = [Container(node_instance,
_extract_contained(node,
node_instance),
node_instance.get('host_id'))
for node_instance in previous_node_instances]
else:
new_instances_num = current_instances_num
new_containers = []
for _ in range(int(new_instances_num)):
node_instance_id = _node_instance_id(node_id, ctx)
node_instance = _node_instance_copy(
node=node,
node_instance_id=node_instance_id)
new_current_host_instance_id = _handle_host_instance_id(
current_host_instance_id=current_host_instance_id,
node_id=node_id,
node_instance_id=node_instance_id,
node_instance=node_instance)
if parent_node_instance_id is not None:
relationship_instance = _relationship_instance_copy(
relationship=parent_relationship,
target_node_instance_id=parent_node_instance_id)
else:
relationship_instance = None
new_containers.append(Container(node_instance,
relationship_instance,
new_current_host_instance_id))
return previous_containers + new_containers
def _handle_removed_instances(
previous_node_instance_ids,
previous_instances_num,
total_instances_num,
modified_node):
removed_instances_num = previous_instances_num - total_instances_num
removed_ids_include_hint = modified_node.get(
'removed_ids_include_hint', [])
removed_ids_exclude_hint = modified_node.get(
'removed_ids_exclude_hint', [])
for removed_instance_id in removed_ids_include_hint:
if removed_instances_num <= 0:
break
if removed_instance_id in previous_node_instance_ids:
previous_node_instance_ids.remove(removed_instance_id)
removed_instances_num -= 1
for removed_instance_id in copy.copy(
previous_node_instance_ids):
if removed_instances_num <= 0:
break
if removed_instance_id in removed_ids_exclude_hint:
continue
previous_node_instance_ids.remove(removed_instance_id)
removed_instances_num -= 1
remaining_removed_instance_ids = previous_node_instance_ids[
:removed_instances_num]
for removed_instance_id in remaining_removed_instance_ids:
previous_node_instance_ids.remove(removed_instance_id)
def _extract_contained(node, node_instance):
for node_relationship in node.get('relationships', []):
if CONTAINED_IN_REL_TYPE in node_relationship['type_hierarchy']:
contained_node_relationship = node_relationship
break
else:
return None
for node_instance_relationship in node_instance['relationships']:
if (node_instance_relationship['type'] ==
contained_node_relationship['type']):
return node_instance_relationship
raise RuntimeError("Failed extracting contained node instance "
"relationships for node instance '{0}'"
.format(node_instance['id']))
def _handle_host_instance_id(current_host_instance_id,
node_id,
node_instance_id,
node_instance):
# If this condition applies, we assume current root is a host node
if current_host_instance_id is None and \
node_instance.get('host_id') == node_id:
current_host_instance_id = node_instance_id
if current_host_instance_id is not None:
node_instance['host_id'] = current_host_instance_id
return current_host_instance_id
def _handle_connected_to_and_depends_on(ctx):
relationship_target_ids = _build_previous_target_ids_for_all_to_one(ctx)
connected_graph = ctx.plan_connected_graph
for source_node_id, target_node_id, edge_data in connected_graph.edges(
data=True):
relationship = edge_data['relationship']
index = edge_data['index']
connection_type = _verify_and_get_connection_type(relationship)
source_node_instance_ids = ctx.node_ids_to_node_instance_ids[
source_node_id]
target_node_instance_ids = ctx.node_ids_to_node_instance_ids[
target_node_id]
_add_connected_to_and_depends_on_relationships(
ctx=ctx,
relationship=relationship,
index=index,
source_node_id=source_node_id,
target_node_id=target_node_id,
source_node_instance_ids=source_node_instance_ids,
target_node_instance_ids=target_node_instance_ids,
connection_type=connection_type,
relationship_target_ids=relationship_target_ids)
def _build_previous_target_ids_for_all_to_one(ctx):
relationship_target_ids = {}
if ctx.is_modification:
for s, t, e_data in ctx.previous_deployment_node_graph.edges_iter(
data=True):
s_node = ctx.previous_deployment_node_graph.node[s]['node']
t_node = ctx.previous_deployment_node_graph.node[t]['node']
rel = e_data['relationship']
key = (_node_id_from_node_instance(s_node),
_node_id_from_node_instance(t_node),
rel['type'])
if key not in relationship_target_ids:
relationship_target_ids[key] = set()
target_ids = relationship_target_ids[key]
target_ids.add(rel['target_id'])
return relationship_target_ids
def _get_all_to_one_relationship_target_id(
ctx,
relationship_target_ids,
source_node_id,
target_node_id,
relationship,
target_node_instance_ids):
key = (source_node_id, target_node_id, relationship['type'])
if ctx.is_modification and key in relationship_target_ids:
target_ids = relationship_target_ids[key]
if len(target_ids) != 1:
raise exceptions.IllegalAllToOneState(
"Expected exactly one target id for relationship "
"{0}->{1} of type '{2}')".format(source_node_id,
target_node_id,
relationship['type']))
return target_ids.copy().pop()
else:
return min(target_node_instance_ids)
def _add_connected_to_and_depends_on_relationships(
ctx,
relationship,
index,
source_node_id,
target_node_id,
source_node_instance_ids,
target_node_instance_ids,
connection_type,
relationship_target_ids):
if not source_node_instance_ids or not target_node_instance_ids:
return
minimal_containing_group = ctx.minimal_containing_group(
node_a=source_node_id,
node_b=target_node_id)
if connection_type == ALL_TO_ONE:
if minimal_containing_group:
raise exceptions.UnsupportedAllToOneInGroup(
"'{0}' connection type is not supported within groups, "
"but the source node '{1}' and target node '{2}' are both in "
"group '{3}'"
.format(ALL_TO_ONE, source_node_id, target_node_id,
minimal_containing_group))
else:
target_node_instance_id = _get_all_to_one_relationship_target_id(
ctx=ctx,
relationship_target_ids=relationship_target_ids,
source_node_id=source_node_id,
target_node_id=target_node_id,
relationship=relationship,
target_node_instance_ids=target_node_instance_ids)
target_node_instance_ids = [target_node_instance_id]
if minimal_containing_group:
partitioned_node_instance_ids = _partition_source_and_target_instances(
ctx=ctx,
group=minimal_containing_group,
source_node_instance_ids=source_node_instance_ids,
target_node_instance_ids=target_node_instance_ids)
else:
partitioned_node_instance_ids = [
(source_node_instance_ids, target_node_instance_ids)]
for source_node_instance_ids, target_node_instance_ids in \
partitioned_node_instance_ids:
for source_node_instance_id in source_node_instance_ids:
for target_node_instance_id in target_node_instance_ids:
relationship_instance = _relationship_instance_copy(
relationship=relationship,
target_node_instance_id=target_node_instance_id)
ctx.deployment_node_graph.add_edge(
source_node_instance_id, target_node_instance_id,
relationship=relationship_instance,
index=index)
def _partition_source_and_target_instances(
ctx,
group,
source_node_instance_ids,
target_node_instance_ids):
partitioned_node_instance_ids = []
source_scaling_groups_map = _build_scaling_groups_map(
ctx=ctx,
node_instance_ids=source_node_instance_ids,
group=group)
target_scaling_groups_map = _build_scaling_groups_map(
ctx=ctx,
node_instance_ids=target_node_instance_ids,
group=group)
assert (set(source_scaling_groups_map.keys()) ==
set(target_scaling_groups_map.keys()))
for key, value in list(source_scaling_groups_map.items()):
partitioned_node_instance_ids.append((value,
target_scaling_groups_map[key]))
return partitioned_node_instance_ids
def _build_scaling_groups_map(ctx, node_instance_ids, group):
node_instances = [ctx.deployment_node_graph.node[n]['node']
for n in node_instance_ids]
scaling_groups_map = collections.defaultdict(list)
for node_instance in node_instances:
node_instance_id = node_instance['id']
group_id = ctx.containing_group_id(
node_instance_id=node_instance_id,
group_name=group)
if not group_id:
raise RuntimeError('Unexpected state')
scaling_groups_map[group_id].append(node_instance_id)
return scaling_groups_map
def _node_instance_id(node_id, ctx):
new_node_instance_id = '{0}_{1}'.format(node_id, _generate_id())
while new_node_instance_id in ctx.node_instance_ids:
new_node_instance_id = '{0}_{1}'.format(node_id, _generate_id())
ctx.node_instance_ids.add(new_node_instance_id)
return new_node_instance_id
def _generate_id(id_len=6):
return ''.join(choice(digits + ascii_lowercase) for _ in range(id_len))
def _node_instance_copy(node, node_instance_id):
node_id = _node_id_from_node(node)
result = {
'name': node_id,
'node_id': node_id,
'id': node_instance_id
}
if 'host_id' in node:
result['host_id'] = node['host_id']
if node.get('group'):
result['group'] = True
return result
def _relationship_instance_copy(relationship,
target_node_instance_id):
result = {
'type': relationship['type'],
'target_name': relationship['target_id'],
'target_id': target_node_instance_id
}
replaced = relationship.get('replaced')
if replaced:
result['replaced'] = replaced
return result
# currently we have decided not to support such relationships
# until we better understand what semantics are required for such
# relationships
def _verify_no_unsupported_relationships(graph):
for s, t, edge in graph.edges_iter(data=True):
if not _relationship_type_hierarchy_includes_one_of(
edge['relationship'], [
DEPENDS_ON_REL_TYPE,
CONTAINED_IN_REL_TYPE,
CONNECTED_TO_REL_TYPE,
GROUP_CONTAINED_IN_REL_TYPE]):
raise exceptions.UnsupportedRelationship()
def _verify_and_get_connection_type(relationship):
connection_type = relationship.get('properties', {}).get(CONNECTION_TYPE)
if connection_type not in [ALL_TO_ALL, ALL_TO_ONE]:
raise exceptions.IllegalConnectedToConnectionType()
return connection_type
def _relationship_type_hierarchy_includes_one_of(relationship, expected_types):
relationship_type_hierarchy = relationship['type_hierarchy']
return any([relationship_type in expected_types
for relationship_type in relationship_type_hierarchy])
def _node_id_from_node(node):
return node.get('name') or node.get('id')
def _node_id_from_node_instance(instance):
return instance.get('name') or instance.get('node_id')
class Context(object):
def __init__(self,
plan_node_graph,
deployment_node_graph,
previous_deployment_node_graph=None,
previous_deployment_contained_graph=None,
modified_nodes=None):
self.plan_node_graph = plan_node_graph
self.plan_contained_graph = self._build_contained_in_graph(
plan_node_graph)
self.plan_connected_graph = self._build_connected_to_and_depends_on_graph( # noqa
plan_node_graph)
self.deployment_node_graph = deployment_node_graph
self.deployment_contained_graph = None
self.previous_deployment_node_graph = previous_deployment_node_graph
self.previous_deployment_contained_graph = (
previous_deployment_contained_graph)
self.modified_nodes = modified_nodes
self.node_ids_to_node_instance_ids = collections.defaultdict(set)
self.node_instance_ids = set()
if self.is_modification:
for node_instance_id, data in \
self.previous_deployment_node_graph.nodes_iter(data=True):
self.node_instance_ids.add(node_instance_id)
node_instance = data['node']
self.node_ids_to_node_instance_ids[
_node_id_from_node_instance(node_instance)].add(
node_instance_id)
@property
def is_modification(self):
return self.previous_deployment_node_graph is not None
def minimal_containing_group(self, node_a, node_b):
a_groups = self._containing_groups(node_a)
b_groups = self._containing_groups(node_b)
shared_groups = set(a_groups) & set(b_groups)
if not shared_groups:
return None
return nx.topological_sort(self.plan_contained_graph,
nbunch=shared_groups)[0]
def _containing_groups(self, node_id):
graph = self.plan_contained_graph
result = []
while True:
succ = graph.succ[node_id]
if succ:
assert len(succ) == 1
node_id = list(succ.keys())[0]
if not graph.node[node_id]['node'].get('group'):
continue
result.append(node_id)
else:
break
return result
def containing_group_id(self, node_instance_id, group_name):
graph = self.deployment_contained_graph
while True:
succ = graph.succ[node_instance_id]
if succ:
assert len(succ) == 1
node_instance_id = list(succ.keys())[0]
node = graph.node[node_instance_id]['node']
if not node.get('group'):
continue
if _node_id_from_node_instance(node) == group_name:
return node['id']
else:
return None
@staticmethod
def containing_group_instances(instance_id,
contained_graph):
result = []
while True:
succ = contained_graph.succ[instance_id]
if succ:
assert len(succ) == 1
node_instance_id = list(succ.keys())[0]
node = contained_graph.node[node_instance_id]['node']
instance_id = node['id']
result.append({
'name': _node_id_from_node_instance(node),
'id': instance_id
})
if not node.get('group'):
return result[:-1], result[-1]
else:
return result, None
def restore_plan_node_graph(self):
for _, data in self.plan_node_graph.nodes_iter(data=True):
node = data['node']
for relationship in node.get('relationships', []):
replaced = relationship.pop('replaced', None)
if replaced:
relationship['target_id'] = replaced
def _build_connected_to_and_depends_on_graph(self, graph):
return self._build_graph_by_relationship_types(
graph,
build_from_types=[CONNECTED_TO_REL_TYPE, DEPENDS_ON_REL_TYPE],
# because contained_in derived from depends_on
exclude_types=[CONTAINED_IN_REL_TYPE])
def _build_contained_in_graph(self, graph):
# build graph based only on connected_to relationships
result = self._build_graph_by_relationship_types(
graph,
build_from_types=[CONTAINED_IN_REL_TYPE,
GROUP_CONTAINED_IN_REL_TYPE],
exclude_types=[])
# don't forget to include nodes in this graph that no one is contained
# in them (these will be considered 1 node trees)
result.add_nodes_from(graph.nodes_iter(data=True))
return result
@staticmethod
def _build_graph_by_relationship_types(graph,
build_from_types,
exclude_types):
relationship_base_graph = nx.DiGraph()
for source, target, edge_data in graph.edges_iter(data=True):
include_edge = (
_relationship_type_hierarchy_includes_one_of(
edge_data['relationship'], build_from_types) and not
_relationship_type_hierarchy_includes_one_of(
edge_data['relationship'], exclude_types))
if include_edge:
relationship_base_graph.add_node(source, graph.node[source])
relationship_base_graph.add_node(target, graph.node[target])
relationship_base_graph.add_edge(source, target, edge_data)
return relationship_base_graph
class Container(object):
def __init__(self,
node_instance,
relationship_instance,
current_host_instance_id):
self.node_instance = node_instance
self.relationship_instance = relationship_instance
self.current_host_instance_id = current_host_instance_id
| 42.139875 | 90 | 0.623309 | mport collections
from random import choice
from string import ascii_lowercase, digits
import networkx as nx
from dsl_parser import constants
from dsl_parser import exceptions
NODES = 'nodes'
RELATIONSHIPS = 'relationships'
DEPENDS_ON_REL_TYPE = constants.DEPENDS_ON_REL_TYPE
CONNECTED_TO_REL_TYPE = constants.CONNECTED_TO_REL_TYPE
CONTAINED_IN_REL_TYPE = constants.CONTAINED_IN_REL_TYPE
GROUP_CONTAINED_IN_REL_TYPE = '__group_contained_in__'
CONNECTION_TYPE = 'connection_type'
ALL_TO_ALL = 'all_to_all'
ALL_TO_ONE = 'all_to_one'
def build_node_graph(nodes, scaling_groups):
graph = nx.DiGraph()
groups_graph = nx.DiGraph()
node_ids = set()
contained_in_group = {}
for node in nodes:
node_id = node['id']
node_ids.add(node_id)
if 'capabilities' in node:
scale_properties = node['capabilities']['scalable']['properties']
else:
scale_properties = {
'current_instances': node['number_of_instances'],
'default_instances':
node['deploy_number_of_instances'],
'min_instances': node['min_number_of_instances'],
'max_instances': node['max_number_of_instances']
}
graph.add_node(node_id,
node=node,
scale_properties=scale_properties)
for group_name, group in list(scaling_groups.items()):
scale_properties = group['properties']
groups_graph.add_node(group_name)
graph.add_node(group_name,
node={'id': group_name, 'group': True},
scale_properties=scale_properties)
for group_name, group in list(scaling_groups.items()):
for member in group['members']:
graph.add_edge(member, group_name,
relationship={
'type': GROUP_CONTAINED_IN_REL_TYPE,
'type_hierarchy': [GROUP_CONTAINED_IN_REL_TYPE],
'target_id': group_name
},
index=-1)
groups_graph.add_edge(member, group_name)
if member in node_ids:
contained_in_group[member] = group_name
for node in nodes:
node_id = node['id']
for index, relationship in enumerate(node.get(RELATIONSHIPS, [])):
target_id = relationship['target_id']
if (CONTAINED_IN_REL_TYPE in relationship['type_hierarchy'] and
node_id in contained_in_group):
group_name = contained_in_group[node_id]
relationship['target_id'] = group_name
relationship['replaced'] = target_id
graph.add_edge(node_id, group_name,
relationship=relationship,
index=index)
top_level_group_name = nx.topological_sort(
groups_graph, nbunch=[group_name])[-1]
graph.add_edge(
top_level_group_name, target_id,
relationship={
'type': GROUP_CONTAINED_IN_REL_TYPE,
'type_hierarchy': [GROUP_CONTAINED_IN_REL_TYPE],
'target_id': target_id
},
index=-1)
else:
graph.add_edge(node_id, target_id,
relationship=relationship,
index=index)
return graph
def build_previous_deployment_node_graph(plan_node_graph,
previous_node_instances):
graph = nx.DiGraph()
contained_graph = nx.DiGraph()
for node_instance in previous_node_instances:
node_instance_id = node_instance['id']
node_instance_host_id = node_instance.get('host_id')
graph.add_node(node_instance_id,
node=node_instance)
contained_graph.add_node(node_instance_id,
node=node_instance)
scaling_groups = node_instance.get('scaling_groups') or ()
for scaling_group in scaling_groups:
group_id = scaling_group['id']
group_name = scaling_group['name']
node = {'id': group_id, 'name': group_name, 'group': True}
if node_instance_host_id:
node['host_id'] = node_instance_host_id
graph.add_node(group_id, node=node)
contained_graph.add_node(group_id, node=node)
for node_instance in previous_node_instances:
node_instance_id = node_instance['id']
node_id = _node_id_from_node_instance(node_instance)
scaling_groups = node_instance.get('scaling_groups')
contained_in_target_id = None
contained_in_target_name = None
for index, rel in enumerate(node_instance.get('relationships', [])):
target_id = rel['target_id']
target_name = rel['target_name']
replaced_contained_in = target_name not in plan_node_graph[node_id]
if replaced_contained_in:
contained_in_target_id = target_id
contained_in_target_name = target_name
scaling_group = scaling_groups[0]
rel['target_id'] = scaling_group['id']
rel['target_name'] = scaling_group['name']
rel['replaced'] = True
graph.add_edge(node_instance_id, scaling_group['id'],
relationship=rel,
index=index)
contained_graph.add_edge(node_instance_id, scaling_group['id'])
else:
graph.add_edge(node_instance_id, target_id,
relationship=rel,
index=index)
if _relationship_type_hierarchy_includes_one_of(
plan_node_graph[node_id][target_name]['relationship'],
[CONTAINED_IN_REL_TYPE]):
contained_graph.add_edge(node_instance_id, target_id)
if scaling_groups:
scaling_groups = scaling_groups[:]
if contained_in_target_id:
scaling_groups.append({
'id': contained_in_target_id,
'name': contained_in_target_name
})
else:
scaling_groups.insert(0, {
'id': node_instance_id,
'name': node_id
})
for i in range(len(scaling_groups) - 1):
graph.add_edge(
scaling_groups[i]['id'],
scaling_groups[i+1]['id'],
relationship={
'type': GROUP_CONTAINED_IN_REL_TYPE,
'target_id': scaling_groups[i+1]['id'],
'target_name': scaling_groups[i+1]['name']
},
index=-1)
contained_graph.add_edge(scaling_groups[i]['id'],
scaling_groups[i+1]['id'])
return graph, contained_graph
def build_deployment_node_graph(plan_node_graph,
previous_deployment_node_graph=None,
previous_deployment_contained_graph=None,
modified_nodes=None):
_verify_no_unsupported_relationships(plan_node_graph)
deployment_node_graph = nx.DiGraph()
ctx = Context(
plan_node_graph=plan_node_graph,
deployment_node_graph=deployment_node_graph,
previous_deployment_node_graph=previous_deployment_node_graph,
previous_deployment_contained_graph=previous_deployment_contained_graph,
modified_nodes=modified_nodes)
_handle_contained_in(ctx)
ctx.node_instance_ids.clear()
ctx.node_ids_to_node_instance_ids.clear()
for node_instance_id, data in deployment_node_graph.nodes_iter(
data=True):
ctx.node_instance_ids.add(node_instance_id)
node_id = _node_id_from_node_instance(data['node'])
ctx.node_ids_to_node_instance_ids[node_id].add(node_instance_id)
_handle_connected_to_and_depends_on(ctx)
ctx.restore_plan_node_graph()
return deployment_node_graph, ctx
def extract_node_instances(node_instances_graph,
ctx,
copy_instances=False,
contained_graph=None):
contained_graph = contained_graph or ctx.deployment_contained_graph
added_missing_node_instance_ids = set()
node_instances = []
for node_instance_id, data in node_instances_graph.nodes_iter(data=True):
node_instance = data['node']
if node_instance.get('group'):
continue
node_instance_attributes = data.get('node_instance_attributes')
if copy_instances:
node_instance = copy.deepcopy(node_instance)
if node_instance_attributes:
node_instance.update(node_instance_attributes)
indexed_relationship_instances = []
for target_node_instance_id in node_instances_graph.neighbors_iter(
node_instance_id):
edge_data = node_instances_graph[node_instance_id][
target_node_instance_id]
relationship_instance = edge_data['relationship']
relationship_index = edge_data['index']
if copy_instances:
relationship_instance = copy.deepcopy(relationship_instance)
group_rel = (relationship_instance['type'] ==
GROUP_CONTAINED_IN_REL_TYPE)
replaced = relationship_instance.pop('replaced', None)
if replaced or group_rel:
group_name = relationship_instance['target_name']
group_id = relationship_instance['target_id']
scaling_groups = [{
'name': group_name,
'id': group_id
}]
containing_groups, parent = ctx.containing_group_instances(
instance_id=group_id,
contained_graph=contained_graph
)
scaling_groups += containing_groups
node_instance['scaling_groups'] = scaling_groups
if replaced:
target_node_instance = parent
target_name = target_node_instance['name']
target_id = target_node_instance['id']
relationship_instance['target_name'] = target_name
relationship_instance['target_id'] = target_id
if (target_id not in node_instances_graph and
target_id not in added_missing_node_instance_ids):
target_node_instance = contained_graph.node[target_id][
'node']
if copy_instances:
target_node_instance = copy.deepcopy(
target_node_instance)
target_node_instance[RELATIONSHIPS] = []
node_instances.append(target_node_instance)
added_missing_node_instance_ids.add(target_id)
if not group_rel:
indexed_relationship_instances.append(
(relationship_index, relationship_instance))
indexed_relationship_instances.sort(key=lambda index__: index__[0])
relationship_instances = [r for _, r in indexed_relationship_instances]
node_instance[RELATIONSHIPS] = relationship_instances
node_instances.append(node_instance)
return node_instances
def extract_added_node_instances(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
added_instances_graph = _graph_diff(
new_deployment_node_graph,
previous_deployment_node_graph,
node_instance_attributes={'modification': 'added'})
return extract_node_instances(
added_instances_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.deployment_contained_graph)
def extract_removed_node_instances(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
removed_instances_graph = _graph_diff(
previous_deployment_node_graph,
new_deployment_node_graph,
node_instance_attributes={'modification': 'removed'})
return extract_node_instances(
removed_instances_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.previous_deployment_contained_graph)
def extract_added_relationships(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
modified_instance_graph = _graph_diff_relationships(
new_deployment_node_graph,
previous_deployment_node_graph,
node_instance_attributes={'modification': 'extended'})
return extract_node_instances(
modified_instance_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.deployment_contained_graph)
def extract_removed_relationships(previous_deployment_node_graph,
new_deployment_node_graph,
ctx):
modified_instance_graph = _graph_diff_relationships(
previous_deployment_node_graph,
new_deployment_node_graph,
node_instance_attributes={'modification': 'reduced'})
return extract_node_instances(
modified_instance_graph,
ctx=ctx,
copy_instances=True,
contained_graph=ctx.previous_deployment_contained_graph)
def _graph_diff(G, H, node_instance_attributes):
result = nx.DiGraph()
for n1, data in G.nodes_iter(data=True):
if n1 in H:
continue
result.add_node(n1, data,
node_instance_attributes=node_instance_attributes)
for n2 in G.neighbors_iter(n1):
result.add_node(n2, G.node[n2])
result.add_edge(n1, n2, G[n1][n2])
for n2 in G.predecessors_iter(n1):
result.add_node(n2, G.node[n2])
result.add_edge(n2, n1, G[n2][n1])
return result
def _graph_diff_relationships(G, H, node_instance_attributes):
result = nx.DiGraph()
for source, dest, data in G.edges_iter(data=True):
if source in H and dest not in H[source]:
new_node = copy.deepcopy(G.node[source])
result.add_node(source, new_node,
node_instance_attributes=node_instance_attributes)
result.add_node(dest, G.node[dest])
result.add_edge(source, dest, G[source][dest])
return result
def _handle_contained_in(ctx):
for contained_tree in nx.weakly_connected_component_subgraphs(
ctx.plan_contained_graph.reverse(copy=True)):
node_id = nx.topological_sort(contained_tree)[0]
_build_multi_instance_node_tree_rec(
node_id=node_id,
contained_tree=contained_tree,
ctx=ctx)
ctx.deployment_contained_graph = ctx.deployment_node_graph.copy()
def _build_multi_instance_node_tree_rec(node_id,
contained_tree,
ctx,
parent_relationship=None,
parent_relationship_index=None,
parent_node_instance_id=None,
current_host_instance_id=None):
node = contained_tree.node[node_id]['node']
containers = _build_and_update_node_instances(
ctx=ctx,
node=node,
parent_node_instance_id=parent_node_instance_id,
parent_relationship=parent_relationship,
current_host_instance_id=current_host_instance_id)
for container in containers:
node_instance = container.node_instance
node_instance_id = node_instance['id']
relationship_instance = container.relationship_instance
new_current_host_instance_id = container.current_host_instance_id
ctx.deployment_node_graph.add_node(node_instance_id,
node=node_instance)
if parent_node_instance_id is not None:
ctx.deployment_node_graph.add_edge(
node_instance_id, parent_node_instance_id,
relationship=relationship_instance,
index=parent_relationship_index)
for child_node_id in contained_tree.neighbors_iter(node_id):
descendants = nx.descendants(contained_tree, child_node_id)
descendants.add(child_node_id)
child_contained_tree = contained_tree.subgraph(descendants)
_build_multi_instance_node_tree_rec(
node_id=child_node_id,
contained_tree=child_contained_tree,
ctx=ctx,
parent_relationship=ctx.plan_node_graph[
child_node_id][node_id]['relationship'],
parent_relationship_index=ctx.plan_node_graph[
child_node_id][node_id]['index'],
parent_node_instance_id=node_instance_id,
current_host_instance_id=new_current_host_instance_id)
def _build_and_update_node_instances(ctx,
node,
parent_node_instance_id,
parent_relationship,
current_host_instance_id):
node_id = node['id']
current_instances_num = ctx.plan_node_graph.node[node_id][
'scale_properties']['current_instances']
new_instances_num = 0
previous_containers = []
if ctx.is_modification:
all_previous_node_instance_ids = ctx.node_ids_to_node_instance_ids[
node_id]
previous_node_instance_ids = [
instance_id for instance_id in all_previous_node_instance_ids
if not parent_node_instance_id or
(instance_id in ctx.previous_deployment_node_graph and
ctx.previous_deployment_node_graph[instance_id].get(
parent_node_instance_id))
]
previous_instances_num = len(previous_node_instance_ids)
if node_id in ctx.modified_nodes:
modified_node = ctx.modified_nodes[node_id]
total_instances_num = modified_node['instances']
if total_instances_num > previous_instances_num:
new_instances_num = (total_instances_num -
previous_instances_num)
else:
_handle_removed_instances(previous_node_instance_ids,
previous_instances_num,
total_instances_num,
modified_node)
else:
new_instances_num = (current_instances_num -
previous_instances_num)
previous_node_instances = [
ctx.previous_deployment_node_graph.node[node_instance_id]['node']
for node_instance_id in previous_node_instance_ids]
previous_containers = [Container(node_instance,
_extract_contained(node,
node_instance),
node_instance.get('host_id'))
for node_instance in previous_node_instances]
else:
new_instances_num = current_instances_num
new_containers = []
for _ in range(int(new_instances_num)):
node_instance_id = _node_instance_id(node_id, ctx)
node_instance = _node_instance_copy(
node=node,
node_instance_id=node_instance_id)
new_current_host_instance_id = _handle_host_instance_id(
current_host_instance_id=current_host_instance_id,
node_id=node_id,
node_instance_id=node_instance_id,
node_instance=node_instance)
if parent_node_instance_id is not None:
relationship_instance = _relationship_instance_copy(
relationship=parent_relationship,
target_node_instance_id=parent_node_instance_id)
else:
relationship_instance = None
new_containers.append(Container(node_instance,
relationship_instance,
new_current_host_instance_id))
return previous_containers + new_containers
def _handle_removed_instances(
previous_node_instance_ids,
previous_instances_num,
total_instances_num,
modified_node):
removed_instances_num = previous_instances_num - total_instances_num
removed_ids_include_hint = modified_node.get(
'removed_ids_include_hint', [])
removed_ids_exclude_hint = modified_node.get(
'removed_ids_exclude_hint', [])
for removed_instance_id in removed_ids_include_hint:
if removed_instances_num <= 0:
break
if removed_instance_id in previous_node_instance_ids:
previous_node_instance_ids.remove(removed_instance_id)
removed_instances_num -= 1
for removed_instance_id in copy.copy(
previous_node_instance_ids):
if removed_instances_num <= 0:
break
if removed_instance_id in removed_ids_exclude_hint:
continue
previous_node_instance_ids.remove(removed_instance_id)
removed_instances_num -= 1
remaining_removed_instance_ids = previous_node_instance_ids[
:removed_instances_num]
for removed_instance_id in remaining_removed_instance_ids:
previous_node_instance_ids.remove(removed_instance_id)
def _extract_contained(node, node_instance):
for node_relationship in node.get('relationships', []):
if CONTAINED_IN_REL_TYPE in node_relationship['type_hierarchy']:
contained_node_relationship = node_relationship
break
else:
return None
for node_instance_relationship in node_instance['relationships']:
if (node_instance_relationship['type'] ==
contained_node_relationship['type']):
return node_instance_relationship
raise RuntimeError("Failed extracting contained node instance "
"relationships for node instance '{0}'"
.format(node_instance['id']))
def _handle_host_instance_id(current_host_instance_id,
node_id,
node_instance_id,
node_instance):
if current_host_instance_id is None and \
node_instance.get('host_id') == node_id:
current_host_instance_id = node_instance_id
if current_host_instance_id is not None:
node_instance['host_id'] = current_host_instance_id
return current_host_instance_id
def _handle_connected_to_and_depends_on(ctx):
relationship_target_ids = _build_previous_target_ids_for_all_to_one(ctx)
connected_graph = ctx.plan_connected_graph
for source_node_id, target_node_id, edge_data in connected_graph.edges(
data=True):
relationship = edge_data['relationship']
index = edge_data['index']
connection_type = _verify_and_get_connection_type(relationship)
source_node_instance_ids = ctx.node_ids_to_node_instance_ids[
source_node_id]
target_node_instance_ids = ctx.node_ids_to_node_instance_ids[
target_node_id]
_add_connected_to_and_depends_on_relationships(
ctx=ctx,
relationship=relationship,
index=index,
source_node_id=source_node_id,
target_node_id=target_node_id,
source_node_instance_ids=source_node_instance_ids,
target_node_instance_ids=target_node_instance_ids,
connection_type=connection_type,
relationship_target_ids=relationship_target_ids)
def _build_previous_target_ids_for_all_to_one(ctx):
relationship_target_ids = {}
if ctx.is_modification:
for s, t, e_data in ctx.previous_deployment_node_graph.edges_iter(
data=True):
s_node = ctx.previous_deployment_node_graph.node[s]['node']
t_node = ctx.previous_deployment_node_graph.node[t]['node']
rel = e_data['relationship']
key = (_node_id_from_node_instance(s_node),
_node_id_from_node_instance(t_node),
rel['type'])
if key not in relationship_target_ids:
relationship_target_ids[key] = set()
target_ids = relationship_target_ids[key]
target_ids.add(rel['target_id'])
return relationship_target_ids
def _get_all_to_one_relationship_target_id(
ctx,
relationship_target_ids,
source_node_id,
target_node_id,
relationship,
target_node_instance_ids):
key = (source_node_id, target_node_id, relationship['type'])
if ctx.is_modification and key in relationship_target_ids:
target_ids = relationship_target_ids[key]
if len(target_ids) != 1:
raise exceptions.IllegalAllToOneState(
"Expected exactly one target id for relationship "
"{0}->{1} of type '{2}')".format(source_node_id,
target_node_id,
relationship['type']))
return target_ids.copy().pop()
else:
return min(target_node_instance_ids)
def _add_connected_to_and_depends_on_relationships(
ctx,
relationship,
index,
source_node_id,
target_node_id,
source_node_instance_ids,
target_node_instance_ids,
connection_type,
relationship_target_ids):
if not source_node_instance_ids or not target_node_instance_ids:
return
minimal_containing_group = ctx.minimal_containing_group(
node_a=source_node_id,
node_b=target_node_id)
if connection_type == ALL_TO_ONE:
if minimal_containing_group:
raise exceptions.UnsupportedAllToOneInGroup(
"'{0}' connection type is not supported within groups, "
"but the source node '{1}' and target node '{2}' are both in "
"group '{3}'"
.format(ALL_TO_ONE, source_node_id, target_node_id,
minimal_containing_group))
else:
target_node_instance_id = _get_all_to_one_relationship_target_id(
ctx=ctx,
relationship_target_ids=relationship_target_ids,
source_node_id=source_node_id,
target_node_id=target_node_id,
relationship=relationship,
target_node_instance_ids=target_node_instance_ids)
target_node_instance_ids = [target_node_instance_id]
if minimal_containing_group:
partitioned_node_instance_ids = _partition_source_and_target_instances(
ctx=ctx,
group=minimal_containing_group,
source_node_instance_ids=source_node_instance_ids,
target_node_instance_ids=target_node_instance_ids)
else:
partitioned_node_instance_ids = [
(source_node_instance_ids, target_node_instance_ids)]
for source_node_instance_ids, target_node_instance_ids in \
partitioned_node_instance_ids:
for source_node_instance_id in source_node_instance_ids:
for target_node_instance_id in target_node_instance_ids:
relationship_instance = _relationship_instance_copy(
relationship=relationship,
target_node_instance_id=target_node_instance_id)
ctx.deployment_node_graph.add_edge(
source_node_instance_id, target_node_instance_id,
relationship=relationship_instance,
index=index)
def _partition_source_and_target_instances(
ctx,
group,
source_node_instance_ids,
target_node_instance_ids):
partitioned_node_instance_ids = []
source_scaling_groups_map = _build_scaling_groups_map(
ctx=ctx,
node_instance_ids=source_node_instance_ids,
group=group)
target_scaling_groups_map = _build_scaling_groups_map(
ctx=ctx,
node_instance_ids=target_node_instance_ids,
group=group)
assert (set(source_scaling_groups_map.keys()) ==
set(target_scaling_groups_map.keys()))
for key, value in list(source_scaling_groups_map.items()):
partitioned_node_instance_ids.append((value,
target_scaling_groups_map[key]))
return partitioned_node_instance_ids
def _build_scaling_groups_map(ctx, node_instance_ids, group):
node_instances = [ctx.deployment_node_graph.node[n]['node']
for n in node_instance_ids]
scaling_groups_map = collections.defaultdict(list)
for node_instance in node_instances:
node_instance_id = node_instance['id']
group_id = ctx.containing_group_id(
node_instance_id=node_instance_id,
group_name=group)
if not group_id:
raise RuntimeError('Unexpected state')
scaling_groups_map[group_id].append(node_instance_id)
return scaling_groups_map
def _node_instance_id(node_id, ctx):
new_node_instance_id = '{0}_{1}'.format(node_id, _generate_id())
while new_node_instance_id in ctx.node_instance_ids:
new_node_instance_id = '{0}_{1}'.format(node_id, _generate_id())
ctx.node_instance_ids.add(new_node_instance_id)
return new_node_instance_id
def _generate_id(id_len=6):
return ''.join(choice(digits + ascii_lowercase) for _ in range(id_len))
def _node_instance_copy(node, node_instance_id):
node_id = _node_id_from_node(node)
result = {
'name': node_id,
'node_id': node_id,
'id': node_instance_id
}
if 'host_id' in node:
result['host_id'] = node['host_id']
if node.get('group'):
result['group'] = True
return result
def _relationship_instance_copy(relationship,
target_node_instance_id):
result = {
'type': relationship['type'],
'target_name': relationship['target_id'],
'target_id': target_node_instance_id
}
replaced = relationship.get('replaced')
if replaced:
result['replaced'] = replaced
return result
def _verify_no_unsupported_relationships(graph):
for s, t, edge in graph.edges_iter(data=True):
if not _relationship_type_hierarchy_includes_one_of(
edge['relationship'], [
DEPENDS_ON_REL_TYPE,
CONTAINED_IN_REL_TYPE,
CONNECTED_TO_REL_TYPE,
GROUP_CONTAINED_IN_REL_TYPE]):
raise exceptions.UnsupportedRelationship()
def _verify_and_get_connection_type(relationship):
connection_type = relationship.get('properties', {}).get(CONNECTION_TYPE)
if connection_type not in [ALL_TO_ALL, ALL_TO_ONE]:
raise exceptions.IllegalConnectedToConnectionType()
return connection_type
def _relationship_type_hierarchy_includes_one_of(relationship, expected_types):
relationship_type_hierarchy = relationship['type_hierarchy']
return any([relationship_type in expected_types
for relationship_type in relationship_type_hierarchy])
def _node_id_from_node(node):
return node.get('name') or node.get('id')
def _node_id_from_node_instance(instance):
return instance.get('name') or instance.get('node_id')
class Context(object):
def __init__(self,
plan_node_graph,
deployment_node_graph,
previous_deployment_node_graph=None,
previous_deployment_contained_graph=None,
modified_nodes=None):
self.plan_node_graph = plan_node_graph
self.plan_contained_graph = self._build_contained_in_graph(
plan_node_graph)
self.plan_connected_graph = self._build_connected_to_and_depends_on_graph(
plan_node_graph)
self.deployment_node_graph = deployment_node_graph
self.deployment_contained_graph = None
self.previous_deployment_node_graph = previous_deployment_node_graph
self.previous_deployment_contained_graph = (
previous_deployment_contained_graph)
self.modified_nodes = modified_nodes
self.node_ids_to_node_instance_ids = collections.defaultdict(set)
self.node_instance_ids = set()
if self.is_modification:
for node_instance_id, data in \
self.previous_deployment_node_graph.nodes_iter(data=True):
self.node_instance_ids.add(node_instance_id)
node_instance = data['node']
self.node_ids_to_node_instance_ids[
_node_id_from_node_instance(node_instance)].add(
node_instance_id)
@property
def is_modification(self):
return self.previous_deployment_node_graph is not None
def minimal_containing_group(self, node_a, node_b):
a_groups = self._containing_groups(node_a)
b_groups = self._containing_groups(node_b)
shared_groups = set(a_groups) & set(b_groups)
if not shared_groups:
return None
return nx.topological_sort(self.plan_contained_graph,
nbunch=shared_groups)[0]
def _containing_groups(self, node_id):
graph = self.plan_contained_graph
result = []
while True:
succ = graph.succ[node_id]
if succ:
assert len(succ) == 1
node_id = list(succ.keys())[0]
if not graph.node[node_id]['node'].get('group'):
continue
result.append(node_id)
else:
break
return result
def containing_group_id(self, node_instance_id, group_name):
graph = self.deployment_contained_graph
while True:
succ = graph.succ[node_instance_id]
if succ:
assert len(succ) == 1
node_instance_id = list(succ.keys())[0]
node = graph.node[node_instance_id]['node']
if not node.get('group'):
continue
if _node_id_from_node_instance(node) == group_name:
return node['id']
else:
return None
@staticmethod
def containing_group_instances(instance_id,
contained_graph):
result = []
while True:
succ = contained_graph.succ[instance_id]
if succ:
assert len(succ) == 1
node_instance_id = list(succ.keys())[0]
node = contained_graph.node[node_instance_id]['node']
instance_id = node['id']
result.append({
'name': _node_id_from_node_instance(node),
'id': instance_id
})
if not node.get('group'):
return result[:-1], result[-1]
else:
return result, None
def restore_plan_node_graph(self):
for _, data in self.plan_node_graph.nodes_iter(data=True):
node = data['node']
for relationship in node.get('relationships', []):
replaced = relationship.pop('replaced', None)
if replaced:
relationship['target_id'] = replaced
def _build_connected_to_and_depends_on_graph(self, graph):
return self._build_graph_by_relationship_types(
graph,
build_from_types=[CONNECTED_TO_REL_TYPE, DEPENDS_ON_REL_TYPE],
exclude_types=[CONTAINED_IN_REL_TYPE])
def _build_contained_in_graph(self, graph):
result = self._build_graph_by_relationship_types(
graph,
build_from_types=[CONTAINED_IN_REL_TYPE,
GROUP_CONTAINED_IN_REL_TYPE],
exclude_types=[])
# in them (these will be considered 1 node trees)
result.add_nodes_from(graph.nodes_iter(data=True))
return result
@staticmethod
def _build_graph_by_relationship_types(graph,
build_from_types,
exclude_types):
relationship_base_graph = nx.DiGraph()
for source, target, edge_data in graph.edges_iter(data=True):
include_edge = (
_relationship_type_hierarchy_includes_one_of(
edge_data['relationship'], build_from_types) and not
_relationship_type_hierarchy_includes_one_of(
edge_data['relationship'], exclude_types))
if include_edge:
relationship_base_graph.add_node(source, graph.node[source])
relationship_base_graph.add_node(target, graph.node[target])
relationship_base_graph.add_edge(source, target, edge_data)
return relationship_base_graph
class Container(object):
def __init__(self,
node_instance,
relationship_instance,
current_host_instance_id):
self.node_instance = node_instance
self.relationship_instance = relationship_instance
self.current_host_instance_id = current_host_instance_id
| true | true |
1c362714be819cfd94e050bb3ff14a6468e2694a | 4,046 | py | Python | src/third_party/beaengine/tests/0f65.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | 1 | 2022-01-17T17:40:29.000Z | 2022-01-17T17:40:29.000Z | src/third_party/beaengine/tests/0f65.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | src/third_party/beaengine/tests/0f65.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0F 65 /r
# pcmpgtw mm1, mm2/m64
Buffer = bytes.fromhex('660f659011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xf65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'pcmpgtw')
assert_equal(myDisasm.repr(), 'pcmpgtw xmm2, xmmword ptr [rax+44332211h]')
# VEX.NDS.128.66.0F.WIG 65 /r
# vpcmpgtw xmm1, xmm2, xmm3/m128
Buffer = bytes.fromhex('c40101650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw xmm9, xmm15, xmmword ptr [r14]')
# VEX.NDS.256.66.0F.WIG 65 /r
# vpcmpgtw ymm1, ymm2, ymm3/m256
Buffer = bytes.fromhex('c40105650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw ymm9, ymm15, ymmword ptr [r14]')
# EVEX.NDS.128.66.0F.WIG 65 /r
# vpcmpgtw xmm1 {k1}{z}, xmm2, xmm3/m128
Buffer = bytes.fromhex('62010506650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw xmm25, xmm31, xmmword ptr [r14]')
# EVEX.NDS.256.66.0F.WIG 65 /r
# vpcmpgtw ymm1 {k1}{z}, ymm2, ymm3/m256
Buffer = bytes.fromhex('62010520650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw ymm25, ymm31, ymmword ptr [r14]')
# EVEX.NDS.512.66.0F.WIG 65 /r
# vpcmpgtw zmm1 {k1}{z}, zmm2, zmm3/m512
Buffer = bytes.fromhex('62010540650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw zmm25, zmm31, zmmword ptr [r14]')
| 45.460674 | 82 | 0.672269 |
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
Buffer = bytes.fromhex('660f659011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xf65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'pcmpgtw')
assert_equal(myDisasm.repr(), 'pcmpgtw xmm2, xmmword ptr [rax+44332211h]')
Buffer = bytes.fromhex('c40101650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw xmm9, xmm15, xmmword ptr [r14]')
Buffer = bytes.fromhex('c40105650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw ymm9, ymm15, ymmword ptr [r14]')
Buffer = bytes.fromhex('62010506650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw xmm25, xmm31, xmmword ptr [r14]')
Buffer = bytes.fromhex('62010520650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw ymm25, ymm31, ymmword ptr [r14]')
Buffer = bytes.fromhex('62010540650e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0x65')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpcmpgtw')
assert_equal(myDisasm.repr(), 'vpcmpgtw zmm25, zmm31, zmmword ptr [r14]')
| true | true |
1c3628455a1625f4ac5ca3b93c99383ec6b9c2e6 | 6,391 | py | Python | test/paper_examples/test_axiv_paper_examples.py | ykaitao/gluon-ts | 9622550974e9e0819e25438fc45353f8a6474b55 | [
"Apache-2.0"
] | 1 | 2020-01-19T13:27:51.000Z | 2020-01-19T13:27:51.000Z | test/paper_examples/test_axiv_paper_examples.py | ykaitao/gluon-ts | 9622550974e9e0819e25438fc45353f8a6474b55 | [
"Apache-2.0"
] | 1 | 2021-08-29T08:20:37.000Z | 2021-09-02T09:00:52.000Z | test/paper_examples/test_axiv_paper_examples.py | ykaitao/gluon-ts | 9622550974e9e0819e25438fc45353f8a6474b55 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# First-party imports
from gluonts.dataset.artificial import constant_dataset
from gluonts.dataset.field_names import FieldName
def test_listing_1():
"""
Test GluonTS paper examples from arxiv paper:
https://arxiv.org/abs/1906.05264
Listing 1
"""
from gluonts.dataset.repository.datasets import get_dataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import backtest_metrics
# We use electricity in the paper but that would take too long to run in
# the unit test
dataset_info, train_ds, test_ds = constant_dataset()
meta = dataset_info.metadata
estimator = DeepAREstimator(
freq=meta.freq,
prediction_length=1,
trainer=Trainer(epochs=1, batch_size=32),
)
predictor = estimator.train(train_ds)
evaluator = Evaluator(quantiles=(0.1, 0.5, 0.9))
agg_metrics, item_metrics = backtest_metrics(
train_dataset=train_ds,
test_dataset=test_ds,
forecaster=predictor,
evaluator=evaluator,
)
def test_appendix_c():
"""
Test GluonTS paper examples from arxiv paper:
https://arxiv.org/abs/1906.05264
Appendix C
"""
from typing import List
from mxnet import gluon
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.trainer import Trainer
from gluonts.transform import (
InstanceSplitter,
Transformation,
ExpectedNumInstanceSampler,
)
from gluonts.core.component import validated
from gluonts.support.util import copy_parameters
class MyTrainNetwork(gluon.HybridBlock):
def __init__(self, prediction_length, cells, act_type, **kwargs):
super().__init__(**kwargs)
self.prediction_length = prediction_length
with self.name_scope():
# Set up a network that predicts the target
self.nn = gluon.nn.HybridSequential()
for c in cells:
self.nn.add(gluon.nn.Dense(units=c, activation=act_type))
self.nn.add(
gluon.nn.Dense(
units=self.prediction_length, activation=act_type
)
)
def hybrid_forward(self, F, past_target, future_target):
prediction = self.nn(past_target)
# calculate L1 loss to learn the median
return (prediction - future_target).abs().mean(axis=-1)
class MyPredNetwork(MyTrainNetwork):
# The prediction network only receives
# past target and returns predictions
def hybrid_forward(self, F, past_target):
prediction = self.nn(past_target)
return prediction.expand_dims(axis=1)
class MyEstimator(GluonEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
act_type: str = "relu",
context_length: int = 30,
cells: List[int] = [40, 40, 40],
trainer: Trainer = Trainer(epochs=10),
) -> None:
super().__init__(trainer=trainer)
self.freq = freq
self.prediction_length = prediction_length
self.act_type = act_type
self.context_length = context_length
self.cells = cells
def create_training_network(self) -> MyTrainNetwork:
return MyTrainNetwork(
prediction_length=self.prediction_length,
cells=self.cells,
act_type=self.act_type,
)
def create_predictor(
self,
transformation: Transformation,
trained_network: gluon.HybridBlock,
) -> Predictor:
prediction_network = MyPredNetwork(
prediction_length=self.prediction_length,
cells=self.cells,
act_type=self.act_type,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
def create_transformation(self):
# Model specific input transform
# Here we use a transformation that randomly
# selects training samples from all series.
return InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.context_length,
future_length=self.prediction_length,
)
from gluonts.trainer import Trainer
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import backtest_metrics
dataset_info, train_ds, test_ds = constant_dataset()
meta = dataset_info.metadata
estimator = MyEstimator(
freq=meta.freq,
prediction_length=1,
trainer=Trainer(epochs=1, batch_size=32),
)
predictor = estimator.train(train_ds)
evaluator = Evaluator(quantiles=(0.1, 0.5, 0.9))
agg_metrics, item_metrics = backtest_metrics(
train_dataset=train_ds,
test_dataset=test_ds,
forecaster=predictor,
evaluator=evaluator,
)
| 34.923497 | 78 | 0.637615 |
from gluonts.dataset.artificial import constant_dataset
from gluonts.dataset.field_names import FieldName
def test_listing_1():
from gluonts.dataset.repository.datasets import get_dataset
from gluonts.model.deepar import DeepAREstimator
from gluonts.trainer import Trainer
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import backtest_metrics
dataset_info, train_ds, test_ds = constant_dataset()
meta = dataset_info.metadata
estimator = DeepAREstimator(
freq=meta.freq,
prediction_length=1,
trainer=Trainer(epochs=1, batch_size=32),
)
predictor = estimator.train(train_ds)
evaluator = Evaluator(quantiles=(0.1, 0.5, 0.9))
agg_metrics, item_metrics = backtest_metrics(
train_dataset=train_ds,
test_dataset=test_ds,
forecaster=predictor,
evaluator=evaluator,
)
def test_appendix_c():
from typing import List
from mxnet import gluon
from gluonts.model.estimator import GluonEstimator
from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
from gluonts.trainer import Trainer
from gluonts.transform import (
InstanceSplitter,
Transformation,
ExpectedNumInstanceSampler,
)
from gluonts.core.component import validated
from gluonts.support.util import copy_parameters
class MyTrainNetwork(gluon.HybridBlock):
def __init__(self, prediction_length, cells, act_type, **kwargs):
super().__init__(**kwargs)
self.prediction_length = prediction_length
with self.name_scope():
self.nn = gluon.nn.HybridSequential()
for c in cells:
self.nn.add(gluon.nn.Dense(units=c, activation=act_type))
self.nn.add(
gluon.nn.Dense(
units=self.prediction_length, activation=act_type
)
)
def hybrid_forward(self, F, past_target, future_target):
prediction = self.nn(past_target)
return (prediction - future_target).abs().mean(axis=-1)
class MyPredNetwork(MyTrainNetwork):
def hybrid_forward(self, F, past_target):
prediction = self.nn(past_target)
return prediction.expand_dims(axis=1)
class MyEstimator(GluonEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
act_type: str = "relu",
context_length: int = 30,
cells: List[int] = [40, 40, 40],
trainer: Trainer = Trainer(epochs=10),
) -> None:
super().__init__(trainer=trainer)
self.freq = freq
self.prediction_length = prediction_length
self.act_type = act_type
self.context_length = context_length
self.cells = cells
def create_training_network(self) -> MyTrainNetwork:
return MyTrainNetwork(
prediction_length=self.prediction_length,
cells=self.cells,
act_type=self.act_type,
)
def create_predictor(
self,
transformation: Transformation,
trained_network: gluon.HybridBlock,
) -> Predictor:
prediction_network = MyPredNetwork(
prediction_length=self.prediction_length,
cells=self.cells,
act_type=self.act_type,
)
copy_parameters(trained_network, prediction_network)
return RepresentableBlockPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
def create_transformation(self):
return InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExpectedNumInstanceSampler(num_instances=1),
past_length=self.context_length,
future_length=self.prediction_length,
)
from gluonts.trainer import Trainer
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import backtest_metrics
dataset_info, train_ds, test_ds = constant_dataset()
meta = dataset_info.metadata
estimator = MyEstimator(
freq=meta.freq,
prediction_length=1,
trainer=Trainer(epochs=1, batch_size=32),
)
predictor = estimator.train(train_ds)
evaluator = Evaluator(quantiles=(0.1, 0.5, 0.9))
agg_metrics, item_metrics = backtest_metrics(
train_dataset=train_ds,
test_dataset=test_ds,
forecaster=predictor,
evaluator=evaluator,
)
| true | true |
1c362a8ce127fd4d5b2b6dbfe44d6a581ddc66d6 | 2,651 | py | Python | ml_service/util/smoke_test_scoring_service.py | CloudBreadPaPa/MLOpsPython | 6f5744febf443a532ee94839823704aa04893f6c | [
"MIT"
] | null | null | null | ml_service/util/smoke_test_scoring_service.py | CloudBreadPaPa/MLOpsPython | 6f5744febf443a532ee94839823704aa04893f6c | [
"MIT"
] | null | null | null | ml_service/util/smoke_test_scoring_service.py | CloudBreadPaPa/MLOpsPython | 6f5744febf443a532ee94839823704aa04893f6c | [
"MIT"
] | null | null | null | import os
import sys
import argparse
import requests
import time
from azureml.core import Workspace
from azureml.core.webservice import AksWebservice, AciWebservice
sys.path.append(os.path.abspath("./ml_service/util")) # NOQA: E402
from env_variables import Env
import secrets
input = {"data": [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]]}
output_len = 2
def call_web_service(e, service_type, service_name):
aml_workspace = Workspace.get(
name=e.workspace_name,
subscription_id=e.subscription_id,
resource_group=e.resource_group
)
print("Fetching service")
headers = {}
if service_type == "ACI":
service = AciWebservice(aml_workspace, service_name)
else:
service = AksWebservice(aml_workspace, service_name)
if service.auth_enabled:
service_keys = service.get_keys()
headers['Authorization'] = 'Bearer ' + service_keys[0]
print("Testing service")
print(". url: %s" % service.scoring_uri)
output = call_web_app(service.scoring_uri, headers)
return output
def call_web_app(url, headers):
# Generate an HTTP 'traceparent' distributed tracing header
# (per the W3C Trace Context proposed specification).
headers['traceparent'] = "00-{0}-{1}-00".format(
secrets.token_hex(16), secrets.token_hex(8))
retries = 10
for i in range(retries):
try:
print("url :", url)
print("input :", input)
print("headers :", headers)
response = requests.post(
url, json=input, headers=headers)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if i == retries-1:
raise e
print(e)
print("Retrying...")
time.sleep(1)
def main():
parser = argparse.ArgumentParser("smoke_test_scoring_service.py")
parser.add_argument(
"--type",
type=str,
choices=["AKS", "ACI", "Webapp"],
required=True,
help="type of service"
)
parser.add_argument(
"--service",
type=str,
required=True,
help="Name of the image to test"
)
args = parser.parse_args()
e = Env()
if args.type == "Webapp":
output = call_web_app(args.service, {})
else:
output = call_web_service(e, args.type, args.service)
print("Verifying service output")
assert "result" in output
assert len(output["result"]) == output_len
print("Smoke test successful.")
if __name__ == '__main__':
main()
| 27.05102 | 69 | 0.610336 | import os
import sys
import argparse
import requests
import time
from azureml.core import Workspace
from azureml.core.webservice import AksWebservice, AciWebservice
sys.path.append(os.path.abspath("./ml_service/util"))
from env_variables import Env
import secrets
input = {"data": [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]]}
output_len = 2
def call_web_service(e, service_type, service_name):
aml_workspace = Workspace.get(
name=e.workspace_name,
subscription_id=e.subscription_id,
resource_group=e.resource_group
)
print("Fetching service")
headers = {}
if service_type == "ACI":
service = AciWebservice(aml_workspace, service_name)
else:
service = AksWebservice(aml_workspace, service_name)
if service.auth_enabled:
service_keys = service.get_keys()
headers['Authorization'] = 'Bearer ' + service_keys[0]
print("Testing service")
print(". url: %s" % service.scoring_uri)
output = call_web_app(service.scoring_uri, headers)
return output
def call_web_app(url, headers):
headers['traceparent'] = "00-{0}-{1}-00".format(
secrets.token_hex(16), secrets.token_hex(8))
retries = 10
for i in range(retries):
try:
print("url :", url)
print("input :", input)
print("headers :", headers)
response = requests.post(
url, json=input, headers=headers)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if i == retries-1:
raise e
print(e)
print("Retrying...")
time.sleep(1)
def main():
parser = argparse.ArgumentParser("smoke_test_scoring_service.py")
parser.add_argument(
"--type",
type=str,
choices=["AKS", "ACI", "Webapp"],
required=True,
help="type of service"
)
parser.add_argument(
"--service",
type=str,
required=True,
help="Name of the image to test"
)
args = parser.parse_args()
e = Env()
if args.type == "Webapp":
output = call_web_app(args.service, {})
else:
output = call_web_service(e, args.type, args.service)
print("Verifying service output")
assert "result" in output
assert len(output["result"]) == output_len
print("Smoke test successful.")
if __name__ == '__main__':
main()
| true | true |
1c362ad7486b0a1887b8ed6c9c386259848b5a4b | 863 | py | Python | iss.py | menosvelasco/se-q3-iss | 03f52a6ee2305ec4aa5fea1953212cb35a68db8d | [
"MIT"
] | null | null | null | iss.py | menosvelasco/se-q3-iss | 03f52a6ee2305ec4aa5fea1953212cb35a68db8d | [
"MIT"
] | null | null | null | iss.py | menosvelasco/se-q3-iss | 03f52a6ee2305ec4aa5fea1953212cb35a68db8d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Manuel Velasco'
import requests
def astronaut_info():
astronauts_names = requests.get("http://api.open-notify.org/astros.json")
list_name = astronauts_names.json()["people"]
print(f'number of astronaut {len(list_name)}')
for value in list_name:
print(f"{value['name']} on abort {value['craft']}")
def location_iss():
loc_iss = requests.get("http://api.open-notify.org/iss-now.json")
time_stamp = loc_iss.json()["timestamp"]
lat_long = loc_iss.json()["iss_position"]
print('current location:')
print(f'latitude: {lat_long["latitude"]}')
print(f'longitude: {lat_long["longitude"]}')
print()
print(f'time: {time_stamp}')
return lat_long
def tur():
a = location_iss()
def main():
astronaut_info()
tur()
if __name__ == '__main__':
main()
| 19.177778 | 77 | 0.645423 |
__author__ = 'Manuel Velasco'
import requests
def astronaut_info():
astronauts_names = requests.get("http://api.open-notify.org/astros.json")
list_name = astronauts_names.json()["people"]
print(f'number of astronaut {len(list_name)}')
for value in list_name:
print(f"{value['name']} on abort {value['craft']}")
def location_iss():
loc_iss = requests.get("http://api.open-notify.org/iss-now.json")
time_stamp = loc_iss.json()["timestamp"]
lat_long = loc_iss.json()["iss_position"]
print('current location:')
print(f'latitude: {lat_long["latitude"]}')
print(f'longitude: {lat_long["longitude"]}')
print()
print(f'time: {time_stamp}')
return lat_long
def tur():
a = location_iss()
def main():
astronaut_info()
tur()
if __name__ == '__main__':
main()
| true | true |
1c362b0ddeedbaabeba6cd52adde96baee9230d6 | 1,050 | py | Python | pytezos/rpc/__init__.py | jpic/pytezos | 39327363235902de9355e667b8661345865bddd1 | [
"MIT"
] | null | null | null | pytezos/rpc/__init__.py | jpic/pytezos | 39327363235902de9355e667b8661345865bddd1 | [
"MIT"
] | null | null | null | pytezos/rpc/__init__.py | jpic/pytezos | 39327363235902de9355e667b8661345865bddd1 | [
"MIT"
] | null | null | null | from pytezos.rpc.shell import *
from pytezos.rpc.protocol import *
from pytezos.rpc.helpers import *
from pytezos.rpc.search import *
from pytezos.rpc.node import RpcNode
class RpcProvider:
def __init__(self, **urls):
self.urls = urls
@lru_cache(maxsize=None)
def __getattr__(self, network) -> ShellQuery:
return ShellQuery(node=RpcNode(uri=self.urls[network], network=network))
def __dir__(self):
return list(super(RpcProvider, self).__dir__()) + list(self.urls.keys())
def __repr__(self):
res = [
super(RpcProvider, self).__repr__(),
'\nNetworks',
*list(map(lambda x: f'.{x[0]} # {x[1]}', self.urls.items()))
]
return '\n'.join(res)
localhost = RpcProvider(
sandboxnet='http://127.0.0.1:8732/'
)
tzkt = RpcProvider(
mainnet='https://rpc.tzkt.io/mainnet/',
babylonnet='https://rpc.tzkt.io/babylonnet/',
zeronet='https://rpc.tzkt.io/zeronet/'
)
mainnet = tzkt.mainnet
babylonnet = tzkt.babylonnet
zeronet = tzkt.zeronet
| 25.609756 | 80 | 0.642857 | from pytezos.rpc.shell import *
from pytezos.rpc.protocol import *
from pytezos.rpc.helpers import *
from pytezos.rpc.search import *
from pytezos.rpc.node import RpcNode
class RpcProvider:
def __init__(self, **urls):
self.urls = urls
@lru_cache(maxsize=None)
def __getattr__(self, network) -> ShellQuery:
return ShellQuery(node=RpcNode(uri=self.urls[network], network=network))
def __dir__(self):
return list(super(RpcProvider, self).__dir__()) + list(self.urls.keys())
def __repr__(self):
res = [
super(RpcProvider, self).__repr__(),
'\nNetworks',
*list(map(lambda x: f'.{x[0]} # {x[1]}', self.urls.items()))
]
return '\n'.join(res)
localhost = RpcProvider(
sandboxnet='http://127.0.0.1:8732/'
)
tzkt = RpcProvider(
mainnet='https://rpc.tzkt.io/mainnet/',
babylonnet='https://rpc.tzkt.io/babylonnet/',
zeronet='https://rpc.tzkt.io/zeronet/'
)
mainnet = tzkt.mainnet
babylonnet = tzkt.babylonnet
zeronet = tzkt.zeronet
| true | true |
1c362c8d86c2a4b4878f2d6fd129b56d26ac9d88 | 8,528 | py | Python | tests/models/test_listeners.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | tests/models/test_listeners.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | tests/models/test_listeners.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
import datetime
import functools
from nose.tools import (
eq_,
)
from parameterized import parameterized
from .. import DatabaseTest
from ... import lane
from ... import model
from ...config import Configuration
from ...model import (
CachedFeed,
ConfigurationSetting,
create,
site_configuration_has_changed,
Timestamp,
WorkCoverageRecord,
)
class TestSiteConfigurationHasChanged(DatabaseTest):
class MockSiteConfigurationHasChanged(object):
"""Keep track of whether site_configuration_has_changed was
ever called.
"""
def __init__(self):
self.was_called = False
def run(self, _db):
self.was_called = True
site_configuration_has_changed(_db)
def assert_was_called(self):
"Assert that `was_called` is True, then reset it for the next assertion."
assert self.was_called
self.was_called = False
def assert_was_not_called(self):
assert not self.was_called
def setup(self):
super(TestSiteConfigurationHasChanged, self).setup()
# Mock model.site_configuration_has_changed
self.old_site_configuration_has_changed = model.listeners.site_configuration_has_changed
self.mock = self.MockSiteConfigurationHasChanged()
for module in model.listeners, lane:
module.site_configuration_has_changed = self.mock.run
def teardown(self):
super(TestSiteConfigurationHasChanged, self).teardown()
for module in model.listeners, lane:
module.site_configuration_has_changed = self.old_site_configuration_has_changed
def test_site_configuration_has_changed(self):
"""Test the site_configuration_has_changed() function and its
effects on the Configuration object.
"""
# The database configuration timestamp is initialized as part
# of the default data. In that case, it happened during the
# package_setup() for this test run.
last_update = Configuration.site_configuration_last_update(self._db)
def ts():
return Timestamp.value(
self._db, Configuration.SITE_CONFIGURATION_CHANGED,
service_type=None, collection=None
)
timestamp_value = ts()
eq_(timestamp_value, last_update)
# Now let's call site_configuration_has_changed().
#
# Sending cooldown=0 ensures we can change the timestamp value
# even though it changed less than one second ago.
time_of_update = datetime.datetime.utcnow()
site_configuration_has_changed(self._db, cooldown=0)
# The Timestamp has changed in the database.
assert ts() > timestamp_value
# The locally-stored last update value has been updated.
new_last_update_time = Configuration.site_configuration_last_update(
self._db, timeout=0
)
assert new_last_update_time > last_update
assert (new_last_update_time - time_of_update).total_seconds() < 1
# Let's be sneaky and update the timestamp directly,
# without calling site_configuration_has_changed(). This
# simulates another process on a different machine calling
# site_configuration_has_changed() -- they will know about the
# change but we won't be informed.
timestamp = Timestamp.stamp(
self._db, Configuration.SITE_CONFIGURATION_CHANGED,
service_type=None, collection=None
)
# Calling Configuration.check_for_site_configuration_update
# with a timeout doesn't detect the change.
eq_(new_last_update_time,
Configuration.site_configuration_last_update(self._db, timeout=60)
)
# But the default behavior -- a timeout of zero -- forces
# the method to go to the database and find the correct
# answer.
newer_update = Configuration.site_configuration_last_update(
self._db
)
assert newer_update > last_update
# The Timestamp that tracks the last configuration update has
# a cooldown; the default cooldown is 1 second. This means the
# last update time will only be set once per second, to avoid
# spamming the Timestamp with updates.
# It's been less than one second since we updated the timeout
# (with the Timestamp.stamp call). If this call decided that
# the cooldown had expired, it would try to update the
# Timestamp, and the code would crash because we're passing in
# None instead of a database connection.
#
# But it knows the cooldown has not expired, so nothing
# happens.
site_configuration_has_changed(None)
# Verify that the Timestamp has not changed (how could it,
# with no database connection to modify the Timestamp?)
eq_(newer_update,
Configuration.site_configuration_last_update(self._db))
# We don't test every event listener, but we do test one of each type.
def test_configuration_relevant_lifecycle_event_updates_configuration(self):
"""When you create or modify a relevant item such as a
ConfigurationSetting, site_configuration_has_changed is called.
"""
ConfigurationSetting.sitewide(self._db, "setting").value = "value"
self.mock.assert_was_called()
ConfigurationSetting.sitewide(self._db, "setting").value = "value2"
self.mock.assert_was_called()
def test_lane_change_updates_configuration(self):
"""Verify that configuration-relevant changes work the same way
in the lane module as they do in the model module.
"""
lane = self._lane()
self.mock.assert_was_called()
lane.add_genre("Science Fiction")
self.mock.assert_was_called()
def test_configuration_relevant_collection_change_updates_configuration(self):
"""When you add a relevant item to a SQLAlchemy collection, such as
adding a Collection to library.collections,
site_configuration_has_changed is called.
"""
# Creating a collection calls the method via an 'after_insert'
# event on Collection.
library = self._default_library
collection = self._collection()
self._db.commit()
self.mock.assert_was_called()
# Adding the collection to the library calls the method via
# an 'append' event on Collection.libraries.
library.collections.append(collection)
self._db.commit()
self.mock.assert_was_called()
# Associating a CachedFeed with the library does _not_ call
# the method, because nothing changed on the Library object and
# we don't listen for 'append' events on Library.cachedfeeds.
create(self._db, CachedFeed, type='page', pagination='',
facets='', library=library)
self._db.commit()
self.mock.assert_was_not_called()
# NOTE: test_work.py:TestWork.test_reindex_on_availability_change
# tests the circumstances under which a database change
# requires that a Work's entry in the search index be
# recreated.
def _set_property(object, value, property_name):
setattr(object, property_name, value)
class TestListeners(DatabaseTest):
@parameterized.expand([
('works_when_open_access_property_changes', functools.partial(_set_property, property_name='open_access')),
('works_when_self_hosted_property_changes', functools.partial(_set_property, property_name='self_hosted'))
])
def test_licensepool_storage_status_change(self, name, status_property_setter):
# Arrange
work = self._work(with_license_pool=True)
[pool] = work.license_pools
# Clear out any WorkCoverageRecords created as the work was initialized.
work.coverage_records = []
# Act
# Change the field
status_property_setter(pool, True)
# Then verify that if the field is 'set' to its existing value, this doesn't happen.
# pool.self_hosted = True
status_property_setter(pool, True)
# Assert
eq_(1, len(work.coverage_records))
eq_(work.id, work.coverage_records[0].work_id)
eq_(WorkCoverageRecord.UPDATE_SEARCH_INDEX_OPERATION, work.coverage_records[0].operation)
eq_(WorkCoverageRecord.REGISTERED, work.coverage_records[0].status)
| 38.588235 | 115 | 0.681168 |
import datetime
import functools
from nose.tools import (
eq_,
)
from parameterized import parameterized
from .. import DatabaseTest
from ... import lane
from ... import model
from ...config import Configuration
from ...model import (
CachedFeed,
ConfigurationSetting,
create,
site_configuration_has_changed,
Timestamp,
WorkCoverageRecord,
)
class TestSiteConfigurationHasChanged(DatabaseTest):
class MockSiteConfigurationHasChanged(object):
def __init__(self):
self.was_called = False
def run(self, _db):
self.was_called = True
site_configuration_has_changed(_db)
def assert_was_called(self):
assert self.was_called
self.was_called = False
def assert_was_not_called(self):
assert not self.was_called
def setup(self):
super(TestSiteConfigurationHasChanged, self).setup()
self.old_site_configuration_has_changed = model.listeners.site_configuration_has_changed
self.mock = self.MockSiteConfigurationHasChanged()
for module in model.listeners, lane:
module.site_configuration_has_changed = self.mock.run
def teardown(self):
super(TestSiteConfigurationHasChanged, self).teardown()
for module in model.listeners, lane:
module.site_configuration_has_changed = self.old_site_configuration_has_changed
def test_site_configuration_has_changed(self):
last_update = Configuration.site_configuration_last_update(self._db)
def ts():
return Timestamp.value(
self._db, Configuration.SITE_CONFIGURATION_CHANGED,
service_type=None, collection=None
)
timestamp_value = ts()
eq_(timestamp_value, last_update)
#
# Sending cooldown=0 ensures we can change the timestamp value
# even though it changed less than one second ago.
time_of_update = datetime.datetime.utcnow()
site_configuration_has_changed(self._db, cooldown=0)
# The Timestamp has changed in the database.
assert ts() > timestamp_value
# The locally-stored last update value has been updated.
new_last_update_time = Configuration.site_configuration_last_update(
self._db, timeout=0
)
assert new_last_update_time > last_update
assert (new_last_update_time - time_of_update).total_seconds() < 1
# Let's be sneaky and update the timestamp directly,
timestamp = Timestamp.stamp(
self._db, Configuration.SITE_CONFIGURATION_CHANGED,
service_type=None, collection=None
)
# Calling Configuration.check_for_site_configuration_update
# with a timeout doesn't detect the change.
eq_(new_last_update_time,
Configuration.site_configuration_last_update(self._db, timeout=60)
)
newer_update = Configuration.site_configuration_last_update(
self._db
)
assert newer_update > last_update
# (with the Timestamp.stamp call). If this call decided that
# the cooldown had expired, it would try to update the
# Timestamp, and the code would crash because we're passing in
site_configuration_has_changed(None)
eq_(newer_update,
Configuration.site_configuration_last_update(self._db))
def test_configuration_relevant_lifecycle_event_updates_configuration(self):
ConfigurationSetting.sitewide(self._db, "setting").value = "value"
self.mock.assert_was_called()
ConfigurationSetting.sitewide(self._db, "setting").value = "value2"
self.mock.assert_was_called()
def test_lane_change_updates_configuration(self):
lane = self._lane()
self.mock.assert_was_called()
lane.add_genre("Science Fiction")
self.mock.assert_was_called()
def test_configuration_relevant_collection_change_updates_configuration(self):
# Creating a collection calls the method via an 'after_insert'
# event on Collection.
library = self._default_library
collection = self._collection()
self._db.commit()
self.mock.assert_was_called()
# Adding the collection to the library calls the method via
# an 'append' event on Collection.libraries.
library.collections.append(collection)
self._db.commit()
self.mock.assert_was_called()
# Associating a CachedFeed with the library does _not_ call
# the method, because nothing changed on the Library object and
# we don't listen for 'append' events on Library.cachedfeeds.
create(self._db, CachedFeed, type='page', pagination='',
facets='', library=library)
self._db.commit()
self.mock.assert_was_not_called()
# recreated.
def _set_property(object, value, property_name):
setattr(object, property_name, value)
class TestListeners(DatabaseTest):
@parameterized.expand([
('works_when_open_access_property_changes', functools.partial(_set_property, property_name='open_access')),
('works_when_self_hosted_property_changes', functools.partial(_set_property, property_name='self_hosted'))
])
def test_licensepool_storage_status_change(self, name, status_property_setter):
# Arrange
work = self._work(with_license_pool=True)
[pool] = work.license_pools
# Clear out any WorkCoverageRecords created as the work was initialized.
work.coverage_records = []
# Act
# Change the field
status_property_setter(pool, True)
# Then verify that if the field is 'set' to its existing value, this doesn't happen.
status_property_setter(pool, True)
eq_(1, len(work.coverage_records))
eq_(work.id, work.coverage_records[0].work_id)
eq_(WorkCoverageRecord.UPDATE_SEARCH_INDEX_OPERATION, work.coverage_records[0].operation)
eq_(WorkCoverageRecord.REGISTERED, work.coverage_records[0].status)
| true | true |
1c362ce6923ae92d87c53d68433d20989f95e1d0 | 5,593 | py | Python | tests/parser/functions/test_convert_to_bytes32.py | Solexplorer/vyper | 135edd6a91d47c72de105066d6e6c1bdfe9ea66e | [
"MIT"
] | 1 | 2021-04-23T21:48:20.000Z | 2021-04-23T21:48:20.000Z | tests/parser/functions/test_convert_to_bytes32.py | Solexplorer/vyper | 135edd6a91d47c72de105066d6e6c1bdfe9ea66e | [
"MIT"
] | null | null | null | tests/parser/functions/test_convert_to_bytes32.py | Solexplorer/vyper | 135edd6a91d47c72de105066d6e6c1bdfe9ea66e | [
"MIT"
] | 1 | 2020-01-27T05:21:39.000Z | 2020-01-27T05:21:39.000Z | from vyper.utils import (
DECIMAL_DIVISOR,
SizeLimits,
)
def test_convert_to_bytes32(w3, get_contract_with_gas_estimation, bytes_helper):
code = """
a: int128
b: uint256
c: address
d: bytes[32]
@public
def int128_to_bytes32(inp: int128) -> (bytes32, bytes32, bytes32):
self.a = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.a, bytes32)
literal: bytes32 = convert(1, bytes32)
return memory, storage, literal
@public
def uint256_to_bytes32(inp: uint256) -> (bytes32, bytes32, bytes32):
self.b = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.b, bytes32)
literal: bytes32 = convert(1, bytes32)
return memory, storage, literal
@public
def address_to_bytes32(inp: address) -> (bytes32, bytes32):
self.c = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.c, bytes32)
return memory, storage
@public
def bytes_to_bytes32(inp: bytes[32]) -> (bytes32, bytes32):
self.d = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.d, bytes32)
return memory, storage
@public
def bytes_to_bytes32_from_smaller(inp: bytes[10]) -> bytes32:
memory: bytes32 = convert(inp, bytes32)
return memory
"""
c = get_contract_with_gas_estimation(code)
assert c.int128_to_bytes32(1) == [bytes_helper('', 31) + b'\x01'] * 3
assert c.uint256_to_bytes32(1) == [bytes_helper('', 31) + b'\x01'] * 3
assert c.address_to_bytes32(w3.eth.accounts[0]) == [bytes_helper('', 12) + w3.toBytes(hexstr=w3.eth.accounts[0])] * 2 # noqa: E501
assert c.bytes_to_bytes32(bytes_helper('', 32)) == [bytes_helper('', 32)] * 2
assert c.bytes_to_bytes32_from_smaller(b'hello') == bytes_helper('hello', 32)
def test_convert_from_address(get_contract_with_gas_estimation):
test_address = "0xF5D4020dCA6a62bB1efFcC9212AAF3c9819E30D7"
test_bytes = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF5\xD4\x02\x0d\xCA\x6a\x62\xbB\x1e\xfF\xcC\x92\x12\xAA\xF3\xc9\x81\x9E\x30\xD7" # noqa: E501
test_address_to_bytes = """
@public
def test_address_to_bytes(x: address) -> bytes32:
return convert(x, bytes32)
"""
c = get_contract_with_gas_estimation(test_address_to_bytes)
assert c.test_address_to_bytes(test_address) == test_bytes
def test_convert_from_bool(get_contract_with_gas_estimation):
exp_code = """
@public
def testConvertBytes32(flag: bool) -> bytes32:
flagBytes: bytes32 = convert(flag, bytes32)
return flagBytes
"""
c = get_contract_with_gas_estimation(exp_code)
falseBytes = c.testConvertBytes32(False)
assert falseBytes[31:32] == b'\x00'
assert len(falseBytes) == 32
trueBytes = c.testConvertBytes32(True)
assert trueBytes[31:32] == b'\x01'
assert len(trueBytes) == 32
def int_to_bytes_helper(val):
return (val).to_bytes(32, byteorder="big", signed=True)
#################################################################################
# NOTE: Vyper uses a decimal divisor of 10000000000 (or 10^10).
#
# This means that `decimal` type variables can store values
# that are of 1/10000000000.
#
# Because of this, when converting from `decimal` to `bytes32`,
# the conversion can be thought of as converting integer result of
# the decimal value of interest multiplied by 10000000000.
#
# For example, converting the decimal value `5.0` to `byte32`
# can be thought of as giving the `bytes32` value of the integer
# result of 5 * 10000000000 = 50000000000
#################################################################################
def test_convert_from_decimal(get_contract_with_gas_estimation):
code = """
temp: decimal
@public
def convert_literal_zero() -> bytes32:
return convert(0.0, bytes32)
@public
def convert_literal_zero_storage() -> bytes32:
self.temp = 0.0
return convert(self.temp, bytes32)
@public
def convert_min_decimal() -> bytes32:
return convert(MIN_DECIMAL, bytes32)
@public
def convert_min_decimal_storage() -> bytes32:
self.temp = MIN_DECIMAL
return convert(self.temp, bytes32)
@public
def convert_max_decimal() -> bytes32:
return convert(MAX_DECIMAL, bytes32)
@public
def convert_max_decimal_storage() -> bytes32:
self.temp = MAX_DECIMAL
return convert(self.temp, bytes32)
@public
def convert_positive_decimal() -> bytes32:
return convert(5.0, bytes32)
@public
def convert_positive_decimal_storage() -> bytes32:
self.temp = 5.0
return convert(self.temp, bytes32)
@public
def convert_negative_decimal() -> bytes32:
return convert(-5.0, bytes32)
@public
def convert_negative_decimal_storage() -> bytes32:
self.temp = -5.0
return convert(self.temp, bytes32)
"""
c = get_contract_with_gas_estimation(code)
_temp = (b"\x00" * 32)
assert _temp == c.convert_literal_zero()
assert _temp == c.convert_literal_zero_storage()
_temp = int_to_bytes_helper(SizeLimits.MINDECIMAL)
assert _temp == c.convert_min_decimal()
assert _temp == c.convert_min_decimal_storage()
_temp = int_to_bytes_helper(SizeLimits.MAXDECIMAL)
assert _temp == c.convert_max_decimal()
assert _temp == c.convert_max_decimal_storage()
_temp = int_to_bytes_helper(5 * DECIMAL_DIVISOR)
assert _temp == c.convert_positive_decimal()
assert _temp == c.convert_positive_decimal_storage()
_temp = int_to_bytes_helper(-5 * DECIMAL_DIVISOR)
assert _temp == c.convert_negative_decimal()
assert _temp == c.convert_negative_decimal_storage()
| 31.24581 | 162 | 0.690506 | from vyper.utils import (
DECIMAL_DIVISOR,
SizeLimits,
)
def test_convert_to_bytes32(w3, get_contract_with_gas_estimation, bytes_helper):
code = """
a: int128
b: uint256
c: address
d: bytes[32]
@public
def int128_to_bytes32(inp: int128) -> (bytes32, bytes32, bytes32):
self.a = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.a, bytes32)
literal: bytes32 = convert(1, bytes32)
return memory, storage, literal
@public
def uint256_to_bytes32(inp: uint256) -> (bytes32, bytes32, bytes32):
self.b = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.b, bytes32)
literal: bytes32 = convert(1, bytes32)
return memory, storage, literal
@public
def address_to_bytes32(inp: address) -> (bytes32, bytes32):
self.c = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.c, bytes32)
return memory, storage
@public
def bytes_to_bytes32(inp: bytes[32]) -> (bytes32, bytes32):
self.d = inp
memory: bytes32 = convert(inp, bytes32)
storage: bytes32 = convert(self.d, bytes32)
return memory, storage
@public
def bytes_to_bytes32_from_smaller(inp: bytes[10]) -> bytes32:
memory: bytes32 = convert(inp, bytes32)
return memory
"""
c = get_contract_with_gas_estimation(code)
assert c.int128_to_bytes32(1) == [bytes_helper('', 31) + b'\x01'] * 3
assert c.uint256_to_bytes32(1) == [bytes_helper('', 31) + b'\x01'] * 3
assert c.address_to_bytes32(w3.eth.accounts[0]) == [bytes_helper('', 12) + w3.toBytes(hexstr=w3.eth.accounts[0])] * 2
assert c.bytes_to_bytes32(bytes_helper('', 32)) == [bytes_helper('', 32)] * 2
assert c.bytes_to_bytes32_from_smaller(b'hello') == bytes_helper('hello', 32)
def test_convert_from_address(get_contract_with_gas_estimation):
test_address = "0xF5D4020dCA6a62bB1efFcC9212AAF3c9819E30D7"
test_bytes = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF5\xD4\x02\x0d\xCA\x6a\x62\xbB\x1e\xfF\xcC\x92\x12\xAA\xF3\xc9\x81\x9E\x30\xD7"
test_address_to_bytes = """
@public
def test_address_to_bytes(x: address) -> bytes32:
return convert(x, bytes32)
"""
c = get_contract_with_gas_estimation(test_address_to_bytes)
assert c.test_address_to_bytes(test_address) == test_bytes
def test_convert_from_bool(get_contract_with_gas_estimation):
exp_code = """
@public
def testConvertBytes32(flag: bool) -> bytes32:
flagBytes: bytes32 = convert(flag, bytes32)
return flagBytes
"""
c = get_contract_with_gas_estimation(exp_code)
falseBytes = c.testConvertBytes32(False)
assert falseBytes[31:32] == b'\x00'
assert len(falseBytes) == 32
trueBytes = c.testConvertBytes32(True)
assert trueBytes[31:32] == b'\x01'
assert len(trueBytes) == 32
def int_to_bytes_helper(val):
return (val).to_bytes(32, byteorder="big", signed=True)
| true | true |
1c362cfb5912a83a53a94f889b24b3ad052684b9 | 912 | py | Python | testproject/testapp/migrations/0001_initial.py | tumb1er/rest-framework-smoke | c011918de728ba20c93961f979da408a9bbd95e9 | [
"MIT"
] | 5 | 2021-04-22T08:39:49.000Z | 2021-06-02T00:40:38.000Z | testproject/testapp/migrations/0001_initial.py | tumb1er/rest-framework-smoke | c011918de728ba20c93961f979da408a9bbd95e9 | [
"MIT"
] | 61 | 2020-03-25T10:49:50.000Z | 2022-03-01T13:27:16.000Z | testproject/testapp/migrations/0001_initial.py | tumb1er/rest-framework-smoke | c011918de728ba20c93961f979da408a9bbd95e9 | [
"MIT"
] | 5 | 2020-03-25T07:45:58.000Z | 2021-11-10T16:12:22.000Z | # Generated by Django 3.0.4 on 2020-03-25 07:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Project')),
],
),
]
| 29.419355 | 114 | 0.576754 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Project')),
],
),
]
| true | true |
1c362cfcd82b4292b1b1b46edbeee9a97e7fba89 | 9,756 | py | Python | invconv/xlsx.py | TechPowerAwaits/ax-toolkit | d49924ef2dcd3f54f494ba3859afb070cc12ef91 | [
"0BSD"
] | null | null | null | invconv/xlsx.py | TechPowerAwaits/ax-toolkit | d49924ef2dcd3f54f494ba3859afb070cc12ef91 | [
"0BSD"
] | 16 | 2021-04-14T03:46:37.000Z | 2022-02-11T16:15:00.000Z | invconv/xlsx.py | TechPowerAwaits/ax-toolkit | d49924ef2dcd3f54f494ba3859afb070cc12ef91 | [
"0BSD"
] | null | null | null | # Copyright 2021 Richard Johnston <techpowerawaits@outlook.com>
# SPDX-license-identifier: 0BSD
import string
from loguru import logger
try:
import cell_pos
from exceptions import InvconvMissingHeaders
import ftype
import msg_handler
except ModuleNotFoundError:
import invconv.cell_pos as cell_pos
from invconv.exceptions import InvconvMissingHeaders
import invconv.ftype as ftype
import invconv.msg_handler as msg_handler
used = True
try:
from openpyxl import load_workbook
except ModuleNotFoundError:
used = False
# load_workbook is used repeatedly with similar settings
# every time.
WB_SETTINGS = {
"read_only": True,
"keep_vba": False,
"data_only": True,
"keep_links": False,
}
class XlsxDataTuple(ftype.BasicFtypeDataClass):
def __init__(self, filename, wsname, headers):
self.filename = filename
self.wsname = wsname
self.headers = headers
self.cur_row = None
self.cur_col = None
super().__init__(
filename=self.filename, sectionname=self.wsname, headers=self.headers
)
# Set relevant values and gets the number of operations
# to be performed based on the dimensions.
def set_oper_num(self, min_row, max_row, max_col):
self.min_row = min_row
self.min_col = 1
self.max_row = max_row
self.max_col = max_col
delta_col = self.max_col - self.min_col + 1
delta_row = self.max_row - self.min_row + 1
self.num_oper = delta_col * delta_row
return self.num_oper
def load_workbook(self):
return load_workbook(self.filename, **WB_SETTINGS)
def parser(self):
if self.cur_row is None:
self.cur_row = self.min_row
if self.cur_col is None:
self.cur_col = self.min_col
if self.cur_col > self.max_col:
self.cur_col = self.min_col
self.cur_row += 1
if self.cur_row > self.max_row:
self.cur_row = None
self.cur_col = None
return None
col_letter = cell_pos.get_col_letter(self.cur_col)
row_str = str(self.cur_row)
wb = self.load_workbook()
ws = wb[self.wsname]
cell_val = ws[col_letter + row_str].value
return_str = str(cell_val)
if cell_val is None:
return_str = ""
if return_str == "#REF!":
logger.warning(
string.Template(
'Unknown reference found at $cell_pos in $id. Defaulting to "unknown".'
).substitute(
cell_pos=col_letter + row_str,
id=msg_handler.get_id((self.filename, self.wsname), "WS"),
)
)
return_str = "unknown"
self.cur_col += 1
wb.close()
return return_str
# Will store a file, worksheet tuple-like class
# with additional data accessible.
xlsx_data_list = ftype.FtypeDataList()
# Contains just a list of file, worksheet tuples.
xlsx_tuple_list = []
# xlsx files always start counting at 1.
INVALID_ROW = 0
def start(input_files):
# Gets the name of worksheets and
# adds it to xlsx_tuple_list.
get_worksheets(input_files)
# Sometimes, openpyxl can't get
# the proper dimensions of a worksheet,
# so it handles that. It also deals with
# headers in the worksheets and removes
# blank cells from the size of the sheet.
set_data()
# Check if some file worksheet pairs don't
# have a valid header.
if not xlsx_data_list:
raise InvconvMissingHeaders
# Can't directly check for membership of
# items from xlsx_tuple_list in xlsx_data_list,
# for they are different types.
for file_section in xlsx_tuple_list:
found_file_section = False
for data_file_section in xlsx_data_list:
# The first element in if statement
# has to be XlsxDataTuple, as it
# contains a __eq__() function
# that should work in this case.
if data_file_section == file_section:
found_file_section = True
break
if not found_file_section:
logger.error(
f"{msg_handler.get_id(file_section, 'ws')} contains no valid headers."
)
msg_handler.does_continue()
return xlsx_data_list
def get_worksheets(input_files):
for input_file in input_files:
wb = load_workbook(input_file, **WB_SETTINGS)
sheetname_list = wb.sheetnames
for sheetname in sheetname_list:
xlsx_tuple_list.append((input_file, sheetname))
wb.close()
def set_data():
for filename, wsname in xlsx_tuple_list:
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# max_col and max_row can be None.
cur_max_col = ws.max_column
cur_max_row = ws.max_row
# Close workbook right away so
# it won't remain open in case script
# gets closed or crashes.
wb.close()
max_col = get_max_col(filename, wsname, cur_max_col)
max_row = get_max_row(filename, wsname, cur_max_row)
# Get the row where a header was found.
header_row = get_header_row(filename, wsname, max_row)
# check_header_row() ensures that a non-blank row
# is after header row. If not, it might not
# actually be a header row.
if (
header_row == INVALID_ROW
or header_row == max_row
or not check_header_row(filename, wsname, max_col, header_row)
):
continue
# The first row after the header_row.
min_row = header_row + 1
header_list = get_header_list(filename, wsname, max_col, header_row)
if max_col > len(header_list):
logger.info(
string.Template(
"Reducing max column length of $id from $cur_col to $new_col due to None in $cell_pos."
)
)
max_col = len(header_list)
DataTuple = XlsxDataTuple(filename, wsname, header_list)
DataTuple.set_oper_num(min_row, max_row, max_col)
xlsx_data_list.append(DataTuple)
def get_max_col(filename, wsname, max_col):
xlsx_id = msg_handler.get_id((filename, wsname), "WS")
while (not isinstance(max_col, int)) or (max_col <= INVALID_ROW):
logger.error(f"Max col for {xlsx_id} is {str(max_col)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of columns (starting at 1).")
max_col = int(
input("Please provide the number of columns (starting at 1) > ")
)
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_col = None
if (isinstance(max_col, int)) and (max_col <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_col
def get_max_row(filename, wsname, max_row):
xlsx_id = msg_handler.get_id((filename, wsname))
while (not isinstance(max_row, int)) or (max_row <= 0):
logger.error(f"Max row for {xlsx_id} is {str(max_row)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of rows (starting at 1).")
max_row = int(input("Please provide the number of rows (starting at 1) > "))
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_row = None
if (isinstance(max_row, int)) and (max_row <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_row
def get_header_row(filename, wsname, max_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# header_row starts at 1,
# so a value of 0 indicates
# it wasn't found.
header_row = INVALID_ROW
for row in cell_pos.row_iter(max_row):
row_str = str(row)
# A row with just a title would not fill up the entire max_column.
# As a result, there would be None at either the first or second
# position.
cell1 = ws["A" + row_str].value
cell2 = ws["B" + row_str].value
if cell1 is not None and cell2 is not None:
header_row = row
break
wb.close()
return header_row
def check_header_row(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# Check the row after the header row
# for content.
post_header_row = header_row + 1
row_str = str(post_header_row)
# List of items in row.
row_list = []
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
row_list.append(str(ws[col_letter + row_str].value))
wb.close()
# Ensure the row is not blank.
if row_list.count("None") != len(row_list):
return True
return False
def get_header_list(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
header_list = []
row_str = str(header_row)
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
header_item = ws[col_letter + row_str].value
# Assuming the header doesn't have blank
# items between entries. Only at the end.
if header_item is None:
logger.warning(
f"Blank header {col_letter+row_str} in {msg_handler.get_id((filename, wsname), 'WS')} will be ignored."
)
break
header_list.append(header_item)
wb.close()
return header_list
if used:
ftype.add("xlsx", start)
| 33.410959 | 119 | 0.625974 |
import string
from loguru import logger
try:
import cell_pos
from exceptions import InvconvMissingHeaders
import ftype
import msg_handler
except ModuleNotFoundError:
import invconv.cell_pos as cell_pos
from invconv.exceptions import InvconvMissingHeaders
import invconv.ftype as ftype
import invconv.msg_handler as msg_handler
used = True
try:
from openpyxl import load_workbook
except ModuleNotFoundError:
used = False
WB_SETTINGS = {
"read_only": True,
"keep_vba": False,
"data_only": True,
"keep_links": False,
}
class XlsxDataTuple(ftype.BasicFtypeDataClass):
def __init__(self, filename, wsname, headers):
self.filename = filename
self.wsname = wsname
self.headers = headers
self.cur_row = None
self.cur_col = None
super().__init__(
filename=self.filename, sectionname=self.wsname, headers=self.headers
)
def set_oper_num(self, min_row, max_row, max_col):
self.min_row = min_row
self.min_col = 1
self.max_row = max_row
self.max_col = max_col
delta_col = self.max_col - self.min_col + 1
delta_row = self.max_row - self.min_row + 1
self.num_oper = delta_col * delta_row
return self.num_oper
def load_workbook(self):
return load_workbook(self.filename, **WB_SETTINGS)
def parser(self):
if self.cur_row is None:
self.cur_row = self.min_row
if self.cur_col is None:
self.cur_col = self.min_col
if self.cur_col > self.max_col:
self.cur_col = self.min_col
self.cur_row += 1
if self.cur_row > self.max_row:
self.cur_row = None
self.cur_col = None
return None
col_letter = cell_pos.get_col_letter(self.cur_col)
row_str = str(self.cur_row)
wb = self.load_workbook()
ws = wb[self.wsname]
cell_val = ws[col_letter + row_str].value
return_str = str(cell_val)
if cell_val is None:
return_str = ""
if return_str == "#REF!":
logger.warning(
string.Template(
'Unknown reference found at $cell_pos in $id. Defaulting to "unknown".'
).substitute(
cell_pos=col_letter + row_str,
id=msg_handler.get_id((self.filename, self.wsname), "WS"),
)
)
return_str = "unknown"
self.cur_col += 1
wb.close()
return return_str
xlsx_data_list = ftype.FtypeDataList()
xlsx_tuple_list = []
INVALID_ROW = 0
def start(input_files):
get_worksheets(input_files)
# the proper dimensions of a worksheet,
# so it handles that. It also deals with
# headers in the worksheets and removes
# blank cells from the size of the sheet.
set_data()
# Check if some file worksheet pairs don't
if not xlsx_data_list:
raise InvconvMissingHeaders
# items from xlsx_tuple_list in xlsx_data_list,
# for they are different types.
for file_section in xlsx_tuple_list:
found_file_section = False
for data_file_section in xlsx_data_list:
# The first element in if statement
# has to be XlsxDataTuple, as it
# contains a __eq__() function
# that should work in this case.
if data_file_section == file_section:
found_file_section = True
break
if not found_file_section:
logger.error(
f"{msg_handler.get_id(file_section, 'ws')} contains no valid headers."
)
msg_handler.does_continue()
return xlsx_data_list
def get_worksheets(input_files):
for input_file in input_files:
wb = load_workbook(input_file, **WB_SETTINGS)
sheetname_list = wb.sheetnames
for sheetname in sheetname_list:
xlsx_tuple_list.append((input_file, sheetname))
wb.close()
def set_data():
for filename, wsname in xlsx_tuple_list:
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# max_col and max_row can be None.
cur_max_col = ws.max_column
cur_max_row = ws.max_row
# Close workbook right away so
# it won't remain open in case script
wb.close()
max_col = get_max_col(filename, wsname, cur_max_col)
max_row = get_max_row(filename, wsname, cur_max_row)
header_row = get_header_row(filename, wsname, max_row)
if (
header_row == INVALID_ROW
or header_row == max_row
or not check_header_row(filename, wsname, max_col, header_row)
):
continue
min_row = header_row + 1
header_list = get_header_list(filename, wsname, max_col, header_row)
if max_col > len(header_list):
logger.info(
string.Template(
"Reducing max column length of $id from $cur_col to $new_col due to None in $cell_pos."
)
)
max_col = len(header_list)
DataTuple = XlsxDataTuple(filename, wsname, header_list)
DataTuple.set_oper_num(min_row, max_row, max_col)
xlsx_data_list.append(DataTuple)
def get_max_col(filename, wsname, max_col):
xlsx_id = msg_handler.get_id((filename, wsname), "WS")
while (not isinstance(max_col, int)) or (max_col <= INVALID_ROW):
logger.error(f"Max col for {xlsx_id} is {str(max_col)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of columns (starting at 1).")
max_col = int(
input("Please provide the number of columns (starting at 1) > ")
)
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_col = None
if (isinstance(max_col, int)) and (max_col <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_col
def get_max_row(filename, wsname, max_row):
xlsx_id = msg_handler.get_id((filename, wsname))
while (not isinstance(max_row, int)) or (max_row <= 0):
logger.error(f"Max row for {xlsx_id} is {str(max_row)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of rows (starting at 1).")
max_row = int(input("Please provide the number of rows (starting at 1) > "))
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_row = None
if (isinstance(max_row, int)) and (max_row <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_row
def get_header_row(filename, wsname, max_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
header_row = INVALID_ROW
for row in cell_pos.row_iter(max_row):
row_str = str(row)
# A row with just a title would not fill up the entire max_column.
# As a result, there would be None at either the first or second
# position.
cell1 = ws["A" + row_str].value
cell2 = ws["B" + row_str].value
if cell1 is not None and cell2 is not None:
header_row = row
break
wb.close()
return header_row
def check_header_row(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# Check the row after the header row
# for content.
post_header_row = header_row + 1
row_str = str(post_header_row)
# List of items in row.
row_list = []
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
row_list.append(str(ws[col_letter + row_str].value))
wb.close()
# Ensure the row is not blank.
if row_list.count("None") != len(row_list):
return True
return False
def get_header_list(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
header_list = []
row_str = str(header_row)
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
header_item = ws[col_letter + row_str].value
# Assuming the header doesn't have blank
if header_item is None:
logger.warning(
f"Blank header {col_letter+row_str} in {msg_handler.get_id((filename, wsname), 'WS')} will be ignored."
)
break
header_list.append(header_item)
wb.close()
return header_list
if used:
ftype.add("xlsx", start)
| true | true |
1c362d4340d19824636817119f7e5ba8f4d31102 | 5,185 | py | Python | netconf_client/rpc.py | hschulzbln/netconf_client | b776e6f7ec59dc2ce9517bc4513eadc6c7ca8cdb | [
"Apache-2.0"
] | null | null | null | netconf_client/rpc.py | hschulzbln/netconf_client | b776e6f7ec59dc2ce9517bc4513eadc6c7ca8cdb | [
"Apache-2.0"
] | null | null | null | netconf_client/rpc.py | hschulzbln/netconf_client | b776e6f7ec59dc2ce9517bc4513eadc6c7ca8cdb | [
"Apache-2.0"
] | null | null | null | import uuid
from lxml import etree
def make_rpc(guts, msg_id=None):
if not msg_id:
msg_id = uuid.uuid4()
return '<rpc message-id="{id}" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">{guts}</rpc>'.format(
guts=guts, id=msg_id
).encode(
"utf-8"
)
def edit_config(
config,
target="running",
default_operation=None,
test_option=None,
error_option=None,
msg_id=None,
):
pieces = []
pieces.append('<edit-config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">')
pieces.append("<target><{}/></target>".format(target))
if default_operation:
pieces.append(
"<default-operation>{}</default-operation>".format(default_operation)
)
if test_option:
pieces.append("<test-option>{}</test-option>".format(test_option))
if error_option:
pieces.append("<error-option>{}</error-option>".format(error_option))
pieces.append(config)
pieces.append("</edit-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def get(filter=None, with_defaults=None, msg_id=None):
pieces = []
pieces.append('<get xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">')
if filter:
pieces.append(filter)
if with_defaults:
pieces.append(make_with_defaults(with_defaults))
pieces.append("</get>")
return make_rpc("".join(pieces), msg_id=msg_id)
def get_config(source="running", filter=None, with_defaults=None, msg_id=None):
pieces = []
pieces.append('<get-config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">')
pieces.append("<source><{}/></source>".format(source))
if filter:
pieces.append(filter)
if with_defaults:
pieces.append(make_with_defaults(with_defaults))
pieces.append("</get-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def copy_config(target, source, filter=None, with_defaults=None, msg_id=None):
pieces = []
pieces.append("<copy-config>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("<source><{}/></source>".format(source))
if with_defaults:
pieces.append(make_with_defaults(with_defaults))
pieces.append("</copy-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def discard_changes(msg_id=None):
return make_rpc("<discard-changes/>", msg_id=msg_id)
def commit(
confirmed=False, confirm_timeout=None, persist=None, persist_id=None, msg_id=None
):
pieces = []
pieces.append("<commit>")
if confirmed:
pieces.append("<confirmed/>")
if confirm_timeout:
pieces.append("<confirm-timeout>{}</confirm-timeout>".format(confirm_timeout))
if persist:
pieces.append("<persist>{}</persist>".format(persist))
if persist_id:
pieces.append("<persist-id>{}</persist-id>".format(persist_id))
pieces.append("</commit>")
return make_rpc("".join(pieces), msg_id=msg_id)
def lock(target, msg_id=None):
pieces = []
pieces.append("<lock>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("</lock>")
return make_rpc("".join(pieces), msg_id=msg_id)
def unlock(target, msg_id=None):
pieces = []
pieces.append("<unlock>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("</unlock>")
return make_rpc("".join(pieces), msg_id=msg_id)
def kill_session(session_id, msg_id=None):
pieces = []
pieces.append("<kill-session>")
pieces.append("<session-id>{}</session-id>".format(session_id))
pieces.append("</kill-session>")
return make_rpc("".join(pieces), msg_id=msg_id)
def close_session(msg_id=None):
return make_rpc("<close-session/>", msg_id=msg_id)
def create_subscription(
stream=None, filter=None, start_time=None, stop_time=None, msg_id=None
):
pieces = []
pieces.append(
'<create-subscription xmlns="urn:ietf:params:xml:ns:netconf:notification:1.0">'
)
if stream:
pieces.append("<stream>{}</stream>".format(stream))
if filter:
pieces.append(filter)
if start_time:
pieces.append("<startTime>{}</startTime>".format(start_time))
if stop_time:
pieces.append("<stopTime>{}</stopTime>".format(stop_time))
pieces.append("</create-subscription>")
return make_rpc("".join(pieces), msg_id=msg_id)
def validate(source, msg_id=None):
pieces = []
pieces.append("<validate>")
if etree.iselement(source):
pieces.append(
"<source>{}</source>".format(etree.tostring(source).decode("utf-8"))
)
else:
pieces.append("<source><{}/></source>".format(source))
pieces.append("</validate>")
return make_rpc("".join(pieces), msg_id=msg_id)
def delete_config(target, msg_id=None):
pieces = []
pieces.append("<delete-config>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("</delete-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def make_with_defaults(with_defaults):
return (
'<with-defaults xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults">'
"{}"
"</with-defaults>"
).format(with_defaults)
| 30.5 | 104 | 0.647059 | import uuid
from lxml import etree
def make_rpc(guts, msg_id=None):
if not msg_id:
msg_id = uuid.uuid4()
return '<rpc message-id="{id}" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">{guts}</rpc>'.format(
guts=guts, id=msg_id
).encode(
"utf-8"
)
def edit_config(
config,
target="running",
default_operation=None,
test_option=None,
error_option=None,
msg_id=None,
):
pieces = []
pieces.append('<edit-config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">')
pieces.append("<target><{}/></target>".format(target))
if default_operation:
pieces.append(
"<default-operation>{}</default-operation>".format(default_operation)
)
if test_option:
pieces.append("<test-option>{}</test-option>".format(test_option))
if error_option:
pieces.append("<error-option>{}</error-option>".format(error_option))
pieces.append(config)
pieces.append("</edit-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def get(filter=None, with_defaults=None, msg_id=None):
pieces = []
pieces.append('<get xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">')
if filter:
pieces.append(filter)
if with_defaults:
pieces.append(make_with_defaults(with_defaults))
pieces.append("</get>")
return make_rpc("".join(pieces), msg_id=msg_id)
def get_config(source="running", filter=None, with_defaults=None, msg_id=None):
pieces = []
pieces.append('<get-config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">')
pieces.append("<source><{}/></source>".format(source))
if filter:
pieces.append(filter)
if with_defaults:
pieces.append(make_with_defaults(with_defaults))
pieces.append("</get-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def copy_config(target, source, filter=None, with_defaults=None, msg_id=None):
pieces = []
pieces.append("<copy-config>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("<source><{}/></source>".format(source))
if with_defaults:
pieces.append(make_with_defaults(with_defaults))
pieces.append("</copy-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def discard_changes(msg_id=None):
return make_rpc("<discard-changes/>", msg_id=msg_id)
def commit(
confirmed=False, confirm_timeout=None, persist=None, persist_id=None, msg_id=None
):
pieces = []
pieces.append("<commit>")
if confirmed:
pieces.append("<confirmed/>")
if confirm_timeout:
pieces.append("<confirm-timeout>{}</confirm-timeout>".format(confirm_timeout))
if persist:
pieces.append("<persist>{}</persist>".format(persist))
if persist_id:
pieces.append("<persist-id>{}</persist-id>".format(persist_id))
pieces.append("</commit>")
return make_rpc("".join(pieces), msg_id=msg_id)
def lock(target, msg_id=None):
pieces = []
pieces.append("<lock>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("</lock>")
return make_rpc("".join(pieces), msg_id=msg_id)
def unlock(target, msg_id=None):
pieces = []
pieces.append("<unlock>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("</unlock>")
return make_rpc("".join(pieces), msg_id=msg_id)
def kill_session(session_id, msg_id=None):
pieces = []
pieces.append("<kill-session>")
pieces.append("<session-id>{}</session-id>".format(session_id))
pieces.append("</kill-session>")
return make_rpc("".join(pieces), msg_id=msg_id)
def close_session(msg_id=None):
return make_rpc("<close-session/>", msg_id=msg_id)
def create_subscription(
stream=None, filter=None, start_time=None, stop_time=None, msg_id=None
):
pieces = []
pieces.append(
'<create-subscription xmlns="urn:ietf:params:xml:ns:netconf:notification:1.0">'
)
if stream:
pieces.append("<stream>{}</stream>".format(stream))
if filter:
pieces.append(filter)
if start_time:
pieces.append("<startTime>{}</startTime>".format(start_time))
if stop_time:
pieces.append("<stopTime>{}</stopTime>".format(stop_time))
pieces.append("</create-subscription>")
return make_rpc("".join(pieces), msg_id=msg_id)
def validate(source, msg_id=None):
pieces = []
pieces.append("<validate>")
if etree.iselement(source):
pieces.append(
"<source>{}</source>".format(etree.tostring(source).decode("utf-8"))
)
else:
pieces.append("<source><{}/></source>".format(source))
pieces.append("</validate>")
return make_rpc("".join(pieces), msg_id=msg_id)
def delete_config(target, msg_id=None):
pieces = []
pieces.append("<delete-config>")
pieces.append("<target><{}/></target>".format(target))
pieces.append("</delete-config>")
return make_rpc("".join(pieces), msg_id=msg_id)
def make_with_defaults(with_defaults):
return (
'<with-defaults xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults">'
"{}"
"</with-defaults>"
).format(with_defaults)
| true | true |
1c362dc94151362e3d05a442f9f69965597c4e5d | 3,050 | py | Python | python/dye/window.py | statiolake/DyeVim | cf3d86c556c6ab0d80de6c58d3589a7982e7467e | [
"MIT"
] | 54 | 2016-09-22T12:11:52.000Z | 2021-12-01T11:18:54.000Z | python/dye/window.py | statiolake/DyeVim | cf3d86c556c6ab0d80de6c58d3589a7982e7467e | [
"MIT"
] | 12 | 2016-12-03T21:48:11.000Z | 2020-05-07T18:28:14.000Z | python/dye/window.py | statiolake/DyeVim | cf3d86c556c6ab0d80de6c58d3589a7982e7467e | [
"MIT"
] | 5 | 2017-04-14T11:21:38.000Z | 2020-05-16T07:32:18.000Z | #!/usr/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Davit Samvelyan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from .utils import log, viewport
class Window( object ):
def __init__( self, wid, buffer ):
self._wid = wid
self._buffer = buffer
self._viewport = viewport.Current()
def OnUpdateTokens( self ):
self._RemoveMatchesFromInterval( self._viewport )
self._buffer.Reset()
self._CreateMatchesForInterval( self._viewport )
def OnBufferChanged( self, buffer ):
self._RemoveMatchesFromInterval( self._viewport )
self._buffer = buffer
self._CreateMatchesForInterval( self._viewport )
def ClearWindow( self ):
self._RemoveMatchesFromInterval( self._viewport )
def _CreateMatchesForInterval( self, view ):
for sr in self._buffer.GetSkippedRanges( view ):
sr.AddMatch( self._wid )
for token in self._buffer.GetTokens( view ):
token.AddMatch( self._wid )
def _RemoveMatchesFromInterval( self, view ):
for sr in self._buffer.GetSkippedRanges( view, False ):
sr.RemoveMatch( self._wid )
for token in self._buffer.GetTokens( view, False ):
token.RemoveMatch( self._wid )
def OnCursorMoved( self ):
current = viewport.Current()
if self._viewport != current:
self.OnViewportChanged( current )
def OnViewportChanged( self, current ):
log.debug( "Viewport Changed {0} -> {1}"
.format( self._viewport, current ) )
remove_views = self._viewport - current
for view in remove_views:
self._RemoveMatchesFromInterval( view )
apply_views = current - self._viewport
for view in apply_views:
self._CreateMatchesForInterval( view )
self._viewport = current
| 33.888889 | 80 | 0.698361 |
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from .utils import log, viewport
class Window( object ):
def __init__( self, wid, buffer ):
self._wid = wid
self._buffer = buffer
self._viewport = viewport.Current()
def OnUpdateTokens( self ):
self._RemoveMatchesFromInterval( self._viewport )
self._buffer.Reset()
self._CreateMatchesForInterval( self._viewport )
def OnBufferChanged( self, buffer ):
self._RemoveMatchesFromInterval( self._viewport )
self._buffer = buffer
self._CreateMatchesForInterval( self._viewport )
def ClearWindow( self ):
self._RemoveMatchesFromInterval( self._viewport )
def _CreateMatchesForInterval( self, view ):
for sr in self._buffer.GetSkippedRanges( view ):
sr.AddMatch( self._wid )
for token in self._buffer.GetTokens( view ):
token.AddMatch( self._wid )
def _RemoveMatchesFromInterval( self, view ):
for sr in self._buffer.GetSkippedRanges( view, False ):
sr.RemoveMatch( self._wid )
for token in self._buffer.GetTokens( view, False ):
token.RemoveMatch( self._wid )
def OnCursorMoved( self ):
current = viewport.Current()
if self._viewport != current:
self.OnViewportChanged( current )
def OnViewportChanged( self, current ):
log.debug( "Viewport Changed {0} -> {1}"
.format( self._viewport, current ) )
remove_views = self._viewport - current
for view in remove_views:
self._RemoveMatchesFromInterval( view )
apply_views = current - self._viewport
for view in apply_views:
self._CreateMatchesForInterval( view )
self._viewport = current
| true | true |
1c362e1a2638166aa2752fde82ea2249b2fe4683 | 4,226 | py | Python | Commands/test.py | trand2/Discord-Levels-Bot | ab445ef3291efecf0f0ba36907eab99121d51b89 | [
"Apache-2.0"
] | 38 | 2021-07-10T07:02:58.000Z | 2022-03-30T20:06:58.000Z | Commands/test.py | trand2/Discord-Levels-Bot | ab445ef3291efecf0f0ba36907eab99121d51b89 | [
"Apache-2.0"
] | 6 | 2021-02-20T18:28:37.000Z | 2021-04-12T05:24:42.000Z | Commands/test.py | trand2/Discord-Levels-Bot | ab445ef3291efecf0f0ba36907eab99121d51b89 | [
"Apache-2.0"
] | 34 | 2021-07-05T04:31:16.000Z | 2022-03-29T16:28:02.000Z |
from discord.ext import commands
from ruamel.yaml import YAML
from kumoslab.get import *
from kumoslab.getServer import *
from kumoslab.set import *
yaml = YAML()
with open("Configs/config.yml", "r", encoding="utf-8") as file:
config = yaml.load(file)
# Spam system class
class test(commands.Cog):
def __init__(self, client):
self.client = client
# Leaderboard Command
@commands.command()
@commands.guild_only()
async def test(self, ctx, member: discord.Member = None):
# embed
if member is None:
member = ctx.author
xp_colour = getXPColour(id=member.id, guildID=ctx.guild.id)
colour_xp = await xp_colour
without_tag = colour_xp.replace("#", '')
embed = discord.Embed(title=f"TEST | USER | {member.name}", colour=int(f"0x{without_tag}", 0))
level = getLevel(id=member.id, guildID=ctx.guild.id)
embed.add_field(name="Level:", value="`" + str(await level) + "`")
xp = getXP(id=member.id, guildID=ctx.guild.id)
embed.add_field(name="XP:", value="`" + str(await xp) + "`")
embed.add_field(name="XP Colour:", value="`" + str(colour_xp) + "`")
circle = getCirlce(id=member.id, guildID=ctx.guild.id)
embed.add_field(name="Circle Pic?:", value="`" + str(await circle) + "`")
background = backgroundUrl(id=member.id, guildID=ctx.guild.id)
embed.set_image(url=str(await background))
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def server(self, ctx):
xp_colour = getXPColour(id=ctx.author.id, guildID=ctx.guild.id)
colour_xp = await xp_colour
without_tag = colour_xp.replace("#", '')
embed = discord.Embed(title=f"TEST | SERVER | {ctx.guild.name}", colour=int(f"0x{without_tag}", 0))
xp = xpPerMessage(guildID=ctx.guild.id)
embed.add_field(name="XP/Message:", value='`' + str(await xp) + '`')
double_xp = doubleXPRole(guildID=ctx.guild.id)
embed.add_field(name="x2 XP Role:", value='`' + str(await double_xp) + '`')
level_channel = levelChannel(guildID=ctx.guild.id)
embed.add_field(name="Level Channel: ", value='`#' + str(await level_channel) + '`')
levels = getLevels(guildID=ctx.guild.id)
embed.add_field(name="Levels for Roles:", value='`' + str(await levels) + '`')
roles = getRoles(guildID=ctx.guild.id)
embed.add_field(name="Roles for Levels:", value='`' + str(await roles) + '`')
ignored_role = ignoredRole(guildID=ctx.guild.id)
embed.add_field(name="Ignored Role:", value='`' + str(await ignored_role) + '`')
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def setxp(self, ctx, amount=None):
if amount is None:
await ctx.send("amount not set")
await setXP(id=ctx.author.id, guildID=ctx.guild.id, amount=amount)
await ctx.send(f"Set <@{ctx.author.id}>'s xp to {amount}xp ")
@commands.command()
@commands.guild_only()
async def setbackground(self, ctx, link=None):
if link is None:
await ctx.send("amount not set")
await setBackground(id=ctx.author.id, guildID=ctx.guild.id, link=link)
await ctx.send(f"Set <@{ctx.author.id}>'s background to {link}")
@commands.command()
@commands.guild_only()
async def setxpcolour(self, ctx, hex_code=None):
if hex is None:
await ctx.send("hex not set")
await setXPColour(id=ctx.author.id, guildID=ctx.guild.id, hex_code=hex_code)
await ctx.send(f"Set <@{ctx.author.id}>'s xp colour to {hex_code}")
@commands.command()
@commands.guild_only()
async def setcircle(self, ctx, state=None):
if hex is None:
await ctx.send("state not set")
await setCircle(id=ctx.author.id, guildID=ctx.guild.id, value=state)
await ctx.send(f"Set <@{ctx.author.id}>'s xp colour to {state}")
# Sets-up the cog for help
def setup(client):
client.add_cog(test(client))
| 40.634615 | 108 | 0.614529 |
from discord.ext import commands
from ruamel.yaml import YAML
from kumoslab.get import *
from kumoslab.getServer import *
from kumoslab.set import *
yaml = YAML()
with open("Configs/config.yml", "r", encoding="utf-8") as file:
config = yaml.load(file)
class test(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@commands.guild_only()
async def test(self, ctx, member: discord.Member = None):
if member is None:
member = ctx.author
xp_colour = getXPColour(id=member.id, guildID=ctx.guild.id)
colour_xp = await xp_colour
without_tag = colour_xp.replace("#", '')
embed = discord.Embed(title=f"TEST | USER | {member.name}", colour=int(f"0x{without_tag}", 0))
level = getLevel(id=member.id, guildID=ctx.guild.id)
embed.add_field(name="Level:", value="`" + str(await level) + "`")
xp = getXP(id=member.id, guildID=ctx.guild.id)
embed.add_field(name="XP:", value="`" + str(await xp) + "`")
embed.add_field(name="XP Colour:", value="`" + str(colour_xp) + "`")
circle = getCirlce(id=member.id, guildID=ctx.guild.id)
embed.add_field(name="Circle Pic?:", value="`" + str(await circle) + "`")
background = backgroundUrl(id=member.id, guildID=ctx.guild.id)
embed.set_image(url=str(await background))
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def server(self, ctx):
xp_colour = getXPColour(id=ctx.author.id, guildID=ctx.guild.id)
colour_xp = await xp_colour
without_tag = colour_xp.replace("#", '')
embed = discord.Embed(title=f"TEST | SERVER | {ctx.guild.name}", colour=int(f"0x{without_tag}", 0))
xp = xpPerMessage(guildID=ctx.guild.id)
embed.add_field(name="XP/Message:", value='`' + str(await xp) + '`')
double_xp = doubleXPRole(guildID=ctx.guild.id)
embed.add_field(name="x2 XP Role:", value='`' + str(await double_xp) + '`')
level_channel = levelChannel(guildID=ctx.guild.id)
embed.add_field(name="Level Channel: ", value='`#' + str(await level_channel) + '`')
levels = getLevels(guildID=ctx.guild.id)
embed.add_field(name="Levels for Roles:", value='`' + str(await levels) + '`')
roles = getRoles(guildID=ctx.guild.id)
embed.add_field(name="Roles for Levels:", value='`' + str(await roles) + '`')
ignored_role = ignoredRole(guildID=ctx.guild.id)
embed.add_field(name="Ignored Role:", value='`' + str(await ignored_role) + '`')
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def setxp(self, ctx, amount=None):
if amount is None:
await ctx.send("amount not set")
await setXP(id=ctx.author.id, guildID=ctx.guild.id, amount=amount)
await ctx.send(f"Set <@{ctx.author.id}>'s xp to {amount}xp ")
@commands.command()
@commands.guild_only()
async def setbackground(self, ctx, link=None):
if link is None:
await ctx.send("amount not set")
await setBackground(id=ctx.author.id, guildID=ctx.guild.id, link=link)
await ctx.send(f"Set <@{ctx.author.id}>'s background to {link}")
@commands.command()
@commands.guild_only()
async def setxpcolour(self, ctx, hex_code=None):
if hex is None:
await ctx.send("hex not set")
await setXPColour(id=ctx.author.id, guildID=ctx.guild.id, hex_code=hex_code)
await ctx.send(f"Set <@{ctx.author.id}>'s xp colour to {hex_code}")
@commands.command()
@commands.guild_only()
async def setcircle(self, ctx, state=None):
if hex is None:
await ctx.send("state not set")
await setCircle(id=ctx.author.id, guildID=ctx.guild.id, value=state)
await ctx.send(f"Set <@{ctx.author.id}>'s xp colour to {state}")
def setup(client):
client.add_cog(test(client))
| true | true |
1c362fbf208c98dbe0a37831fc57b5812ad94de3 | 1,808 | py | Python | aliyun-python-sdk-core/aliyunsdkcore/endpoint/user_customized_endpoint_resolver.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-core/aliyunsdkcore/endpoint/user_customized_endpoint_resolver.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-core/aliyunsdkcore/endpoint/user_customized_endpoint_resolver.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with self work for additional information
# regarding copyright ownership. The ASF licenses self file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use self file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from aliyunsdkcore.endpoint.endpoint_resolver_base import EndpointResolverBase
class UserCustomizedEndpointResolver(EndpointResolverBase):
def __init__(self):
EndpointResolverBase.__init__(self)
self._valid_region_ids = set()
def put_endpoint_entry(self, region_id, product_code, endpoint):
EndpointResolverBase.put_endpoint_entry(
self, self._make_endpoint_entry_key(product_code, region_id), endpoint)
self._valid_region_ids.add(region_id)
def resolve(self, request):
return self.fetch_endpoint_entry(request)
def get_endpoint_key_from_request(self, request):
return self._make_endpoint_entry_key(request.product_code, request.region_id)
def _make_endpoint_entry_key(self, product_code, region_id):
return product_code.lower() + "." + region_id.lower()
def is_region_id_valid(self, request):
return self.verify_region_id(request.region_id.lower())
def reset(self):
self.endpoints_data = dict()
| 38.468085 | 85 | 0.754425 |
from aliyunsdkcore.endpoint.endpoint_resolver_base import EndpointResolverBase
class UserCustomizedEndpointResolver(EndpointResolverBase):
def __init__(self):
EndpointResolverBase.__init__(self)
self._valid_region_ids = set()
def put_endpoint_entry(self, region_id, product_code, endpoint):
EndpointResolverBase.put_endpoint_entry(
self, self._make_endpoint_entry_key(product_code, region_id), endpoint)
self._valid_region_ids.add(region_id)
def resolve(self, request):
return self.fetch_endpoint_entry(request)
def get_endpoint_key_from_request(self, request):
return self._make_endpoint_entry_key(request.product_code, request.region_id)
def _make_endpoint_entry_key(self, product_code, region_id):
return product_code.lower() + "." + region_id.lower()
def is_region_id_valid(self, request):
return self.verify_region_id(request.region_id.lower())
def reset(self):
self.endpoints_data = dict()
| true | true |
1c362fcdf6f20203626f83c58fe53ad3cc5cbe40 | 9,045 | py | Python | tests/utils/test_models.py | fossabot/BALSAMIC | f68931a2dc92ae47236147a77e1f4554a34eb35d | [
"MIT"
] | null | null | null | tests/utils/test_models.py | fossabot/BALSAMIC | f68931a2dc92ae47236147a77e1f4554a34eb35d | [
"MIT"
] | null | null | null | tests/utils/test_models.py | fossabot/BALSAMIC | f68931a2dc92ae47236147a77e1f4554a34eb35d | [
"MIT"
] | null | null | null | import os
import pytest
from pathlib import Path
from pydantic import ValidationError
from BALSAMIC.utils.models import (VCFAttributes, VarCallerFilter, QCModel,
VarcallerAttribute, VCFModel, AnalysisModel,
SampleInstanceModel, BioinfoToolsModel,
ReferenceUrlsModel, ReferenceMeta)
def test_referencemeta():
"""test ReferenceMeta for correctly building model"""
#GIVEN a reference model
reference_files = {
"basedir": "basedir",
"reference_genome": {
"url": "gs://some_path/b37/human_g1k_v37.fasta.gz",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": "genome.fa",
"output_path": "genome",
},
"dbsnp": {
"url": "gs://some_path/b37/dbsnp_138.b37.vcf.gz",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": "dbsnp.vcf"
}
}
#WHEN build the model
build_model = ReferenceMeta.parse_obj(reference_files)
#THEN model should have correct attributes
assert build_model.reference_genome.genome_version == "hg19"
assert build_model.dbsnp.genome_version == "hg19"
assert build_model.reference_genome.get_output_file == "basedir/genome/genome.fa"
def test_referenceurlsmodel_build_model():
"""test ReferenceUrlsModel for correctly building the model"""
#GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
#WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
#THEN model should have correct attributes
assert built_model.url.scheme == "gs"
assert built_model.get_output_file == actual_path
def test_referenceurlsmodel_validate_file_type():
"""test ReferenceUrlsModel for validating file type"""
#GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "wrong_type",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
#WHEN building the model
#THEN model raise error on validation
with pytest.raises(ValidationError) as excinfo:
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
assert "not a valid reference file format" in excinfo.value
def test_referenceurlsmodel_write_md5(tmp_path_factory):
"""test ReferenceUrlsModel for writing md5 of the output file"""
#GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = tmp_path_factory.mktemp("some_path")
Path(dummy_output_path, dummy_output_file).write_bytes(os.urandom(8196))
actual_md5_file = Path(dummy_output_path, dummy_output_file + ".md5")
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path.as_posix(),
}
#WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
#THEN when md5 of the file should exist
built_model.write_md5
assert actual_md5_file.is_file()
def test_referenceurlsmodel_write_md5_no_output_file(tmp_path_factory):
"""test ReferenceUrlsModel for failing to write md5 if outputfile doesn't exist"""
#GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = tmp_path_factory.mktemp("some_path")
actual_md5_file = Path(dummy_output_path, dummy_output_file + ".md5")
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path.as_posix(),
}
#WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
#THEN when md5 of the file should exist
with pytest.raises(FileNotFoundError) as excinfo:
built_model.write_md5
assert "file does not exist" in excinfo.value
def test_referenceurlsmodel_validate_genome_version():
"""test ReferenceUrlsModel for validating genome version """
#GIVEN a reference model
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "wrong_genome",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
with pytest.raises(ValidationError) as excinfo:
#WHEN building the model
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
#THEN model raise error on validation
assert "not a valid genome version" in excinfo.value
def test_vcfattributes():
"""test VCFAttributes model for correct validation"""
# GIVEN a VCF attribute
dummy_attribute = {
"tag_value": 5.0,
"filter_name": "dummy_filter_name",
"field": "INFO"
}
# WHEN building the model
dummy_attribute_built = VCFAttributes(**dummy_attribute)
# THEN assert values can be reterived currently
assert dummy_attribute_built.tag_value == 5.0
assert dummy_attribute_built.field == "INFO"
assert dummy_attribute_built.filter_name == "dummy_filter_name"
def test_varcallerfilter():
"""test required VarCallerFilters for being set correctly"""
# GIVEN a VarCallerFilter
dummy_varcaller = {
"AD": {
"tag_value": 5.0,
"filter_name": "dummy_alt_depth",
"field": "INFO"
},
"DP": {
"tag_value": 100.0,
"filter_name": "dummy_depth",
"field": "INFO"
},
"varcaller_name": "dummy_varcaller",
"filter_type": "dummy_ffpe_filter",
"analysis_type": "dummy_tumor_only",
"description": "dummy description of this filter"
}
# WHEN building the model
dummy_varcaller_filter = VarCallerFilter(**dummy_varcaller)
# THEN assert required values are set
assert dummy_varcaller_filter.AD.tag_value == 5.0
assert dummy_varcaller_filter.DP.tag_value == 100.0
assert dummy_varcaller_filter.analysis_type == "dummy_tumor_only"
def test_qc_model():
#GIVEN valid input arguments
#THEN we can successully create a config dict
valid_args = {"umi_trim": True, "min_seq_length": 25, "umi_trim_length": 5}
assert QCModel.parse_obj(valid_args)
def test_varcaller_attribute():
#GIVEN valid input arguments
valid_args = {"mutation": "somatic", "type": "SNV"}
#THEN we can successully create a config dict
assert VarcallerAttribute.parse_obj(valid_args)
#GIVEN invalid input arguments
invalid_args = {"mutation": "strange", "type": "unacceptable"}
#THEN should trigger ValueError
with pytest.raises(ValueError) as excinfo:
VarcallerAttribute.parse_obj(invalid_args)
assert "not a valid argument" in excinfo.value
def test_analysis_model():
#GIVEN valid input arguments
valid_args = {
"case_id": "case_id",
"analysis_type": "paired",
"sequencing_type": "targeted",
"analysis_dir": "tests/test_data"
}
#THEN we can successully create a config dict
assert AnalysisModel.parse_obj(valid_args)
#GIVEN invalid input arguments
invalid_args = {
"case_id": "case_id",
"analysis_type": "odd",
"sequencing_type": "wrong",
"analysis_dir": "tests/test_data"
}
#THEN should trigger ValueError
with pytest.raises(ValueError) as excinfo:
AnalysisModel.parse_obj(invalid_args)
assert "not supported" in excinfo.value
def test_sample_instance_model():
#GIVEN valid input arguments
valid_args = {
"file_prefix": "S2_R",
"type": "normal",
}
#THEN we can successully create a config dict
assert SampleInstanceModel.parse_obj(valid_args)
#GIVEN invalid input arguments
invalid_args = {
"file_prefix": "S2_R",
"type": "fungal",
}
#THEN should trigger ValueError
with pytest.raises(ValueError) as excinfo:
SampleInstanceModel.parse_obj(invalid_args)
assert "not supported" in excinfo.value
| 32.419355 | 86 | 0.66733 | import os
import pytest
from pathlib import Path
from pydantic import ValidationError
from BALSAMIC.utils.models import (VCFAttributes, VarCallerFilter, QCModel,
VarcallerAttribute, VCFModel, AnalysisModel,
SampleInstanceModel, BioinfoToolsModel,
ReferenceUrlsModel, ReferenceMeta)
def test_referencemeta():
reference_files = {
"basedir": "basedir",
"reference_genome": {
"url": "gs://some_path/b37/human_g1k_v37.fasta.gz",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": "genome.fa",
"output_path": "genome",
},
"dbsnp": {
"url": "gs://some_path/b37/dbsnp_138.b37.vcf.gz",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": "dbsnp.vcf"
}
}
build_model = ReferenceMeta.parse_obj(reference_files)
assert build_model.reference_genome.genome_version == "hg19"
assert build_model.dbsnp.genome_version == "hg19"
assert build_model.reference_genome.get_output_file == "basedir/genome/genome.fa"
def test_referenceurlsmodel_build_model():
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
assert built_model.url.scheme == "gs"
assert built_model.get_output_file == actual_path
def test_referenceurlsmodel_validate_file_type():
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "wrong_type",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
with pytest.raises(ValidationError) as excinfo:
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
assert "not a valid reference file format" in excinfo.value
def test_referenceurlsmodel_write_md5(tmp_path_factory):
dummy_output_file = "some_random_file"
dummy_output_path = tmp_path_factory.mktemp("some_path")
Path(dummy_output_path, dummy_output_file).write_bytes(os.urandom(8196))
actual_md5_file = Path(dummy_output_path, dummy_output_file + ".md5")
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path.as_posix(),
}
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
built_model.write_md5
assert actual_md5_file.is_file()
def test_referenceurlsmodel_write_md5_no_output_file(tmp_path_factory):
dummy_output_file = "some_random_file"
dummy_output_path = tmp_path_factory.mktemp("some_path")
actual_md5_file = Path(dummy_output_path, dummy_output_file + ".md5")
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "hg19",
"output_file": dummy_output_file,
"output_path": dummy_output_path.as_posix(),
}
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
with pytest.raises(FileNotFoundError) as excinfo:
built_model.write_md5
assert "file does not exist" in excinfo.value
def test_referenceurlsmodel_validate_genome_version():
dummy_output_file = "some_random_file"
dummy_output_path = "some_path"
actual_path = Path(dummy_output_path, dummy_output_file).as_posix()
dummy_reference = {
"url": "gs://domain/file_name",
"file_type": "fasta",
"gzip": True,
"genome_version": "wrong_genome",
"output_file": dummy_output_file,
"output_path": dummy_output_path,
}
with pytest.raises(ValidationError) as excinfo:
built_model = ReferenceUrlsModel.parse_obj(dummy_reference)
assert "not a valid genome version" in excinfo.value
def test_vcfattributes():
dummy_attribute = {
"tag_value": 5.0,
"filter_name": "dummy_filter_name",
"field": "INFO"
}
dummy_attribute_built = VCFAttributes(**dummy_attribute)
assert dummy_attribute_built.tag_value == 5.0
assert dummy_attribute_built.field == "INFO"
assert dummy_attribute_built.filter_name == "dummy_filter_name"
def test_varcallerfilter():
dummy_varcaller = {
"AD": {
"tag_value": 5.0,
"filter_name": "dummy_alt_depth",
"field": "INFO"
},
"DP": {
"tag_value": 100.0,
"filter_name": "dummy_depth",
"field": "INFO"
},
"varcaller_name": "dummy_varcaller",
"filter_type": "dummy_ffpe_filter",
"analysis_type": "dummy_tumor_only",
"description": "dummy description of this filter"
}
dummy_varcaller_filter = VarCallerFilter(**dummy_varcaller)
assert dummy_varcaller_filter.AD.tag_value == 5.0
assert dummy_varcaller_filter.DP.tag_value == 100.0
assert dummy_varcaller_filter.analysis_type == "dummy_tumor_only"
def test_qc_model():
valid_args = {"umi_trim": True, "min_seq_length": 25, "umi_trim_length": 5}
assert QCModel.parse_obj(valid_args)
def test_varcaller_attribute():
valid_args = {"mutation": "somatic", "type": "SNV"}
assert VarcallerAttribute.parse_obj(valid_args)
invalid_args = {"mutation": "strange", "type": "unacceptable"}
with pytest.raises(ValueError) as excinfo:
VarcallerAttribute.parse_obj(invalid_args)
assert "not a valid argument" in excinfo.value
def test_analysis_model():
valid_args = {
"case_id": "case_id",
"analysis_type": "paired",
"sequencing_type": "targeted",
"analysis_dir": "tests/test_data"
}
assert AnalysisModel.parse_obj(valid_args)
invalid_args = {
"case_id": "case_id",
"analysis_type": "odd",
"sequencing_type": "wrong",
"analysis_dir": "tests/test_data"
}
with pytest.raises(ValueError) as excinfo:
AnalysisModel.parse_obj(invalid_args)
assert "not supported" in excinfo.value
def test_sample_instance_model():
valid_args = {
"file_prefix": "S2_R",
"type": "normal",
}
assert SampleInstanceModel.parse_obj(valid_args)
invalid_args = {
"file_prefix": "S2_R",
"type": "fungal",
}
with pytest.raises(ValueError) as excinfo:
SampleInstanceModel.parse_obj(invalid_args)
assert "not supported" in excinfo.value
| true | true |
1c362febd2da79bb92f50166516a732aaf4e0292 | 653 | py | Python | tests/test_agent.py | johnnoone/aioconsul | 02f7a529d7dc2e49bed942111067aa5faf320e90 | [
"BSD-3-Clause"
] | 7 | 2015-03-17T18:29:14.000Z | 2020-01-03T06:45:43.000Z | tests/test_agent.py | johnnoone/aioconsul | 02f7a529d7dc2e49bed942111067aa5faf320e90 | [
"BSD-3-Clause"
] | 1 | 2015-06-04T03:06:46.000Z | 2015-06-04T03:06:46.000Z | tests/test_agent.py | johnnoone/aioconsul | 02f7a529d7dc2e49bed942111067aa5faf320e90 | [
"BSD-3-Clause"
] | 2 | 2015-06-03T16:53:11.000Z | 2021-12-16T13:38:23.000Z | import pytest
@pytest.mark.asyncio
async def test_endpoint(client):
assert repr(client.agent) == "<AgentEndpoint(%r)>" % str(client.address)
@pytest.mark.asyncio
async def test_info(client):
result = await client.agent.info()
assert isinstance(result, dict)
assert result["Config"]["ClientAddr"] == "127.0.0.1"
assert result["Config"]["Datacenter"] == "dc1"
assert result["Member"]["Addr"] == "127.0.0.1"
@pytest.mark.asyncio
async def test_maintenance(client, server):
result = await client.agent.disable("testing")
assert result is True
result = await client.agent.enable("testing")
assert result is True
| 27.208333 | 76 | 0.69219 | import pytest
@pytest.mark.asyncio
async def test_endpoint(client):
assert repr(client.agent) == "<AgentEndpoint(%r)>" % str(client.address)
@pytest.mark.asyncio
async def test_info(client):
result = await client.agent.info()
assert isinstance(result, dict)
assert result["Config"]["ClientAddr"] == "127.0.0.1"
assert result["Config"]["Datacenter"] == "dc1"
assert result["Member"]["Addr"] == "127.0.0.1"
@pytest.mark.asyncio
async def test_maintenance(client, server):
result = await client.agent.disable("testing")
assert result is True
result = await client.agent.enable("testing")
assert result is True
| true | true |
1c3631c201d055d6da590d47ef021b01d38d3e12 | 1,482 | py | Python | terzani/utils/clean_text.py | JanMaxime/terzani_colorization | 6538e0053c9119b1bf67da930f309b22cbdece30 | [
"MIT"
] | null | null | null | terzani/utils/clean_text.py | JanMaxime/terzani_colorization | 6538e0053c9119b1bf67da930f309b22cbdece30 | [
"MIT"
] | 1 | 2020-12-16T14:16:16.000Z | 2020-12-24T10:35:27.000Z | terzani/utils/clean_text.py | JanMaxime/terzani_colorization | 6538e0053c9119b1bf67da930f309b22cbdece30 | [
"MIT"
] | null | null | null | # Import nltk to process text
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
import nltk
import string
nltk.download('punkt')
def clean_text(text: str, lower: bool = True, rmv_punc: bool = True, stem: bool = True):
"""
This function accepts a string and performs preprocessing steps on it.
:param text (str): The string or text on which the preprocessing has to be performed.
:param lower (bool): Default=True, indicates if the text has to be made into lower case.
:param rmv_punc (bool): Default=True, indicates if the punctuation should be removed in the text.
:param stem (bool): Default=True, indicates if the stemming should be performed on the words in the text.
:return cleaned_text (list): The modified text is returned as list after performing the indicated operations.
"""
# split into words
tokens = word_tokenize(text)
if lower:
# convert to lower case
tokens = [w.lower() for w in tokens]
if rmv_punc:
# remove punctuation from each word
table = str.maketrans('', '', string.punctuation)
tokens = [w.translate(table)
for w in tokens if w.translate(table) != '']
if stem:
# stemming of words
porter = PorterStemmer()
stemmed_tokens = [porter.stem(word) for word in tokens]
tokens.extend(stemmed_tokens)
cleaned_text = sorted(list(set(tokens)), key=str.lower)
return cleaned_text
| 40.054054 | 113 | 0.684211 |
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
import nltk
import string
nltk.download('punkt')
def clean_text(text: str, lower: bool = True, rmv_punc: bool = True, stem: bool = True):
tokens = word_tokenize(text)
if lower:
tokens = [w.lower() for w in tokens]
if rmv_punc:
table = str.maketrans('', '', string.punctuation)
tokens = [w.translate(table)
for w in tokens if w.translate(table) != '']
if stem:
porter = PorterStemmer()
stemmed_tokens = [porter.stem(word) for word in tokens]
tokens.extend(stemmed_tokens)
cleaned_text = sorted(list(set(tokens)), key=str.lower)
return cleaned_text
| true | true |
1c36347d156955c0e1e0b19d2c9fddcbb5147fe6 | 569 | py | Python | model/group.py | Oilg/python_traning | b29e5e95be55c10f0d15e2001f6483004aa163a8 | [
"Apache-2.0"
] | null | null | null | model/group.py | Oilg/python_traning | b29e5e95be55c10f0d15e2001f6483004aa163a8 | [
"Apache-2.0"
] | null | null | null | model/group.py | Oilg/python_traning | b29e5e95be55c10f0d15e2001f6483004aa163a8 | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return '%s:%s;%s;%s' % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 25.863636 | 103 | 0.58348 | from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return '%s:%s;%s;%s' % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| true | true |
1c3634d2850b1af0b826da326e86fcff4093ad36 | 523 | py | Python | GUI_demos/SimpleExitButton.py | ssklykov/traininPy | b0ae12ebae27dcfb74db2712f1153b6a69154338 | [
"Unlicense"
] | null | null | null | GUI_demos/SimpleExitButton.py | ssklykov/traininPy | b0ae12ebae27dcfb74db2712f1153b6a69154338 | [
"Unlicense"
] | null | null | null | GUI_demos/SimpleExitButton.py | ssklykov/traininPy | b0ae12ebae27dcfb74db2712f1153b6a69154338 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple demo of Exit button
@author: ssklykov
"""
from tkinter import Tk, Button, RIGHT
topLevelWidget = Tk()
topLevelWidget.geometry("250x200")
topLevelWidget.title("Window with Exit")
buttn = Button(topLevelWidget, text='Exit', command=topLevelWidget.destroy) # Actually, Tk().quit() doesn't work
# But the .destroy works - suprisingly. Proper indentation for this comment - ?
buttn.config(font=('Liberation Sans', 12))
buttn.pack(side=RIGHT)
topLevelWidget.mainloop()
| 30.764706 | 113 | 0.73805 |
from tkinter import Tk, Button, RIGHT
topLevelWidget = Tk()
topLevelWidget.geometry("250x200")
topLevelWidget.title("Window with Exit")
buttn = Button(topLevelWidget, text='Exit', command=topLevelWidget.destroy)
# But the .destroy works - suprisingly. Proper indentation for this comment - ?
buttn.config(font=('Liberation Sans', 12))
buttn.pack(side=RIGHT)
topLevelWidget.mainloop()
| true | true |
1c3634d673a3381de49763ef69562fb954a620d6 | 1,779 | py | Python | bert_pytorch/model/language_model.py | luomou97/BERT-pytorch | 61bb990d75a23dc39b5a1ec27787c4a596ba5352 | [
"Apache-2.0"
] | null | null | null | bert_pytorch/model/language_model.py | luomou97/BERT-pytorch | 61bb990d75a23dc39b5a1ec27787c4a596ba5352 | [
"Apache-2.0"
] | null | null | null | bert_pytorch/model/language_model.py | luomou97/BERT-pytorch | 61bb990d75a23dc39b5a1ec27787c4a596ba5352 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
from .bert import BERT
class BERTLM(nn.Module):
"""
BERT Language Model
Next Sentence Prediction Model + Masked Language Model
"""
def __init__(self, bert: BERT, vocab_size):
"""
:param bert: BERT model which should be trained
:param vocab_size: total vocab size for masked_lm
"""
super().__init__()
self.bert = bert
self.next_sentence = NextSentencePrediction(self.bert.hidden) # next sentence prediction task
self.mask_lm = MaskedLanguageModel(self.bert.hidden, vocab_size) # next sentence prediction task
def forward(self, x, segment_label):
x = self.bert(x, segment_label)
return self.next_sentence(x), self.mask_lm(x)
class NextSentencePrediction(nn.Module):
"""
2-class classification model : is_next, is_not_next
"""
def __init__(self, hidden):
"""
:param hidden: BERT model output size
"""
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x[:, 0])) # \ref page 4, using the first token for classification task, <batch_size, 2>
class MaskedLanguageModel(nn.Module):
"""
predicting origin token from masked input sequence
n-class classification problem, n-class = vocab_size
"""
def __init__(self, hidden, vocab_size):
"""
:param hidden: output size of BERT model
:param vocab_size: total vocab size
"""
super().__init__()
self.linear = nn.Linear(hidden, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x))
| 28.693548 | 129 | 0.631816 | import torch.nn as nn
from .bert import BERT
class BERTLM(nn.Module):
def __init__(self, bert: BERT, vocab_size):
super().__init__()
self.bert = bert
self.next_sentence = NextSentencePrediction(self.bert.hidden)
self.mask_lm = MaskedLanguageModel(self.bert.hidden, vocab_size)
def forward(self, x, segment_label):
x = self.bert(x, segment_label)
return self.next_sentence(x), self.mask_lm(x)
class NextSentencePrediction(nn.Module):
def __init__(self, hidden):
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x[:, 0]))
class MaskedLanguageModel(nn.Module):
def __init__(self, hidden, vocab_size):
super().__init__()
self.linear = nn.Linear(hidden, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x))
| true | true |
1c3634fd0074817fb3353ed0690404e572d19535 | 85 | py | Python | QuestoesBeecrowd-Iniciante/1011.py | AtosNeves/Beecrowd | f1192218eac3f6300290fe8234bbc720e9fb859e | [
"MIT"
] | null | null | null | QuestoesBeecrowd-Iniciante/1011.py | AtosNeves/Beecrowd | f1192218eac3f6300290fe8234bbc720e9fb859e | [
"MIT"
] | null | null | null | QuestoesBeecrowd-Iniciante/1011.py | AtosNeves/Beecrowd | f1192218eac3f6300290fe8234bbc720e9fb859e | [
"MIT"
] | null | null | null | pi = 3.14159
r = float(input())
v = 4/3*pi*r**3
print("VOLUME = {:.3f}".format(v))
| 12.142857 | 34 | 0.541176 | pi = 3.14159
r = float(input())
v = 4/3*pi*r**3
print("VOLUME = {:.3f}".format(v))
| true | true |
1c363535e34fb0e70e850fa96dec71197c5cb022 | 8,977 | py | Python | config/settings/production.py | blueNoteb5/simssadb | 85eeaeb09cf2172831aa47d1fbf66416030d43c6 | [
"Apache-1.1"
] | null | null | null | config/settings/production.py | blueNoteb5/simssadb | 85eeaeb09cf2172831aa47d1fbf66416030d43c6 | [
"Apache-1.1"
] | null | null | null | config/settings/production.py | blueNoteb5/simssadb | 85eeaeb09cf2172831aa47d1fbf66416030d43c6 | [
"Apache-1.1"
] | null | null | null | import logging
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['localhost'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_AUTO_CREATE_BUCKET = True
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='simssadb <noreply@localhost>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[simssadb]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware'] + MIDDLEWARE # noqa F405
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
# raven
# ------------------------------------------------------------------------------
# https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat'] # noqa F405
MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware'] + MIDDLEWARE
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env('SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'dsn': SENTRY_DSN
}
# Your stuff...
# ------------------------------------------------------------------------------
| 41.560185 | 106 | 0.606104 | import logging
from .base import *
from .base import env
= env('DJANGO_SECRET_KEY')
= env.list('DJANGO_ALLOWED_HOSTS', default=['localhost'])
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
': True,
}
}
}
= ('HTTP_X_FORWARDED_PROTO', 'https')
= env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
= True
= True
= True
= True
env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
= env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
= True
= 'DENY'
PS += ['storages']
SS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
ET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
_CREATE_BUCKET = True
YSTRING_AUTH = False
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='simssadb <noreply@localhost>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[simssadb]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware'] + MIDDLEWARE # noqa F405
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
# raven
# ------------------------------------------------------------------------------
# https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat'] # noqa F405
MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware'] + MIDDLEWARE
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env('SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'dsn': SENTRY_DSN
}
# Your stuff...
# ------------------------------------------------------------------------------
| true | true |
1c36353b19456de449d0f9ed0272bd5e24ec3728 | 401 | py | Python | profile/migrations/0005_notifications_threshold.py | ritstudentgovernment/PawPrints | 6f52d721d4c367a8524f49881e62a162a81469b4 | [
"Apache-2.0"
] | 15 | 2017-04-03T14:01:44.000Z | 2022-03-18T06:38:56.000Z | profile/migrations/0005_notifications_threshold.py | ritstudentgovernment/PawPrints | 6f52d721d4c367a8524f49881e62a162a81469b4 | [
"Apache-2.0"
] | 87 | 2016-10-13T01:53:38.000Z | 2022-02-11T03:39:55.000Z | profile/migrations/0005_notifications_threshold.py | ritstudentgovernment/PawPrints | 6f52d721d4c367a8524f49881e62a162a81469b4 | [
"Apache-2.0"
] | 8 | 2017-10-19T18:30:48.000Z | 2021-04-03T02:26:01.000Z | # Generated by Django 2.1.3 on 2019-02-18 17:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0004_notifications_reported'),
]
operations = [
migrations.AddField(
model_name='notifications',
name='threshold',
field=models.BooleanField(default=False),
),
]
| 21.105263 | 53 | 0.613466 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0004_notifications_reported'),
]
operations = [
migrations.AddField(
model_name='notifications',
name='threshold',
field=models.BooleanField(default=False),
),
]
| true | true |
1c36359b36a691522aa396f2dacea67ffcbf7cf9 | 2,985 | py | Python | aiida/backends/sqlalchemy/migrations/versions/ce56d84bcc35_delete_trajectory_symbols_array.py | yakutovicha/aiida-core | 35b5c341e24df22b9b920c094348cef4f1a72846 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/backends/sqlalchemy/migrations/versions/ce56d84bcc35_delete_trajectory_symbols_array.py | yakutovicha/aiida-core | 35b5c341e24df22b9b920c094348cef4f1a72846 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/backends/sqlalchemy/migrations/versions/ce56d84bcc35_delete_trajectory_symbols_array.py | yakutovicha/aiida-core | 35b5c341e24df22b9b920c094348cef4f1a72846 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Delete trajectory symbols array from the repository and the reference in the attributes
Revision ID: ce56d84bcc35
Revises: 12536798d4d3
Create Date: 2019-01-21 15:35:07.280805
"""
# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed
# pylint: disable=no-member,no-name-in-module,import-error
import numpy
from alembic import op
from sqlalchemy.sql import table, column, select, func, text
from sqlalchemy import String, Integer, cast
from sqlalchemy.dialects.postgresql import UUID, JSONB
from aiida.backends.general.migrations import utils
# revision identifiers, used by Alembic.
revision = 'ce56d84bcc35'
down_revision = '12536798d4d3'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
# yapf:disable
connection = op.get_bind()
DbNode = table('db_dbnode', column('id', Integer), column('uuid', UUID), column('type', String),
column('attributes', JSONB))
nodes = connection.execute(
select(DbNode.c.id, DbNode.c.uuid).where(
DbNode.c.type == op.inline_literal('node.data.array.trajectory.TrajectoryData.'))).fetchall()
for pk, uuid in nodes:
connection.execute(
text(f"""UPDATE db_dbnode SET attributes = attributes #- '{{array|symbols}}' WHERE id = {pk}"""))
utils.delete_numpy_array_from_repository(uuid, 'symbols')
def downgrade():
"""Migrations for the downgrade."""
# yapf:disable
connection = op.get_bind()
DbNode = table('db_dbnode', column('id', Integer), column('uuid', UUID), column('type', String),
column('attributes', JSONB))
nodes = connection.execute(
select(DbNode.c.id, DbNode.c.uuid).where(
DbNode.c.type == op.inline_literal('node.data.array.trajectory.TrajectoryData.'))).fetchall()
for pk, uuid in nodes:
attributes = connection.execute(select(DbNode.c.attributes).where(DbNode.c.id == pk)).fetchone()
symbols = numpy.array(attributes['symbols'])
utils.store_numpy_array_in_repository(uuid, 'symbols', symbols)
key = op.inline_literal('{"array|symbols"}')
connection.execute(DbNode.update().where(DbNode.c.id == pk).values(
attributes=func.jsonb_set(DbNode.c.attributes, key, cast(list(symbols.shape), JSONB))))
| 40.337838 | 109 | 0.621106 | true | true | |
1c3636aa178df5e66b3264f7dc77edc6bdc57574 | 3,959 | py | Python | translate.py | mmubarak0/google-translate-cli | 5fa7593efbbf61a8bb295b50443f98958385b229 | [
"MIT"
] | null | null | null | translate.py | mmubarak0/google-translate-cli | 5fa7593efbbf61a8bb295b50443f98958385b229 | [
"MIT"
] | null | null | null | translate.py | mmubarak0/google-translate-cli | 5fa7593efbbf61a8bb295b50443f98958385b229 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from google.cloud import translate
from language_to_iso import lang_to_iso
from speech import text_to_speech
from web_page import web_page_translation
from print_languages import print_languages, print_language_name
from helpers import decode, valid_lang, credentials, print_usage
opt_b = None
opt_s = None
opt_c = None
def translate_text(text, target_language):
if len(text) > 10000:
print ('Error: Text too large. Maximum: 10000 characters')
sys.exit()
try:
client = translate.Client()
except:
credentials()
sys.exit()
try:
confidence = client.detect_language(text)
result = client.translate(text, target_language)
if opt_c == True:
print('Detected language confidence: '),
print('{:.2%}'.format(confidence['confidence'])) # convert to %
if opt_b != True:
if result['detectedSourceLanguage'] != target_language:
print_language_name(result['detectedSourceLanguage'])
print(result['input'])
print_language_name(target_language)
print(decode(result['translatedText'])).encode('utf-8')
if opt_s == True:
text_to_speech(result['translatedText'], target_language)
except Exception as e:
print('Error: '),
print(e)
sys.exit()
def file_translation(argv):
try:
f = open(argv[2], 'r')
except Exception:
print("Error: Can't find file or read data")
sys.exit()
else:
if f.mode == 'r':
text = f.read(10000)
if len(text) >= 10000:
print ('Error: File too large. Maximum: 10000 characters')
f.close()
sys.exit()
if text[-1] == '\n': # if last char is \n
text = text[:-1] # remove it
if len(argv) == 3: # if no language is given, default to English
translate_text(text, 'en')
else:
for l in argv[3:]: # iterate through languages
lang = lang_to_iso(l, False, False)
if valid_lang(lang) == True:
translate_text(text, lang)
f.close()
def interactive_translation():
print ('Type \033[1;37mCHANGE\033[0;0m to change target language')
print('Type \033[1;37mEXIT\033[0;0m to exit')
try:
lang = raw_input('Enter target language: ')
except: # handles Ctrl-D / Ctrl-C
print('')
sys.exit()
if lang == 'EXIT':
sys.exit()
lang = lang_to_iso(lang, True, False)
if valid_lang(lang) == True:
print('\033[1;32m✔︎\033[0;0m')
text = ''
try:
while True:
while valid_lang(lang) == False or text == 'CHANGE':
text = ''
lang = raw_input('Enter target language: ')
if lang == 'EXIT':
sys.exit()
lang = lang_to_iso(lang, True, False)
if valid_lang(lang) == True:
print('\033[1;32m✔︎\033[0;0m')
text = raw_input('Enter text to translate: ')
if text == 'EXIT':
sys.exit()
if text == 'CHANGE':
continue
translate_text(text, lang)
except: # handles Ctrl-D / Ctrl-C
print('')
sys.exit()
def main(argv):
global opt_b
global opt_s
global opt_c
if '-b' in argv:
opt_b = True
argv.remove('-b')
if '--bare' in argv:
opt_b = True
argv.remove('--bare')
if '-s' in argv:
opt_s = True
argv.remove('-s')
if '--speech' in argv:
opt_s = True
argv.remove('--speech')
if '-c' in argv:
opt_c = True
argv.remove('-c')
if '--confidence' in argv:
opt_c = True
argv.remove('--confidence')
if len(argv) < 2 or argv[1] == '-h' or argv[1] == '--help':
print_usage(1, argv[0])
sys.exit()
elif len(argv) == 2 and (argv[1] == '-l' or argv[1] == '--languages'):
print_languages(1)
sys.exit()
elif len(argv) >= 3 and (argv[1] == '-f' or argv[1] == '--file'):
file_translation(argv)
elif len(argv) >= 3 and (argv[1] == '-u' or argv[1] == '--url'):
web_page_translation(argv)
elif len(argv) >= 2 and (argv[1] == '-i' or argv[1] == '--interactive'):
interactive_translation()
elif len(argv) == 2:
translate_text(argv[1], 'en')
elif len(argv) > 2:
for l in argv[2:]:
lang = lang_to_iso(l, False, False)
if valid_lang(lang) == True:
translate_text(argv[1], lang)
if __name__ == "__main__":
main(sys.argv)
| 27.116438 | 73 | 0.647891 |
import sys
from google.cloud import translate
from language_to_iso import lang_to_iso
from speech import text_to_speech
from web_page import web_page_translation
from print_languages import print_languages, print_language_name
from helpers import decode, valid_lang, credentials, print_usage
opt_b = None
opt_s = None
opt_c = None
def translate_text(text, target_language):
if len(text) > 10000:
print ('Error: Text too large. Maximum: 10000 characters')
sys.exit()
try:
client = translate.Client()
except:
credentials()
sys.exit()
try:
confidence = client.detect_language(text)
result = client.translate(text, target_language)
if opt_c == True:
print('Detected language confidence: '),
print('{:.2%}'.format(confidence['confidence']))
if opt_b != True:
if result['detectedSourceLanguage'] != target_language:
print_language_name(result['detectedSourceLanguage'])
print(result['input'])
print_language_name(target_language)
print(decode(result['translatedText'])).encode('utf-8')
if opt_s == True:
text_to_speech(result['translatedText'], target_language)
except Exception as e:
print('Error: '),
print(e)
sys.exit()
def file_translation(argv):
try:
f = open(argv[2], 'r')
except Exception:
print("Error: Can't find file or read data")
sys.exit()
else:
if f.mode == 'r':
text = f.read(10000)
if len(text) >= 10000:
print ('Error: File too large. Maximum: 10000 characters')
f.close()
sys.exit()
if text[-1] == '\n': # if last char is \n
text = text[:-1] # remove it
if len(argv) == 3: # if no language is given, default to English
translate_text(text, 'en')
else:
for l in argv[3:]: # iterate through languages
lang = lang_to_iso(l, False, False)
if valid_lang(lang) == True:
translate_text(text, lang)
f.close()
def interactive_translation():
print ('Type \033[1;37mCHANGE\033[0;0m to change target language')
print('Type \033[1;37mEXIT\033[0;0m to exit')
try:
lang = raw_input('Enter target language: ')
except: # handles Ctrl-D / Ctrl-C
print('')
sys.exit()
if lang == 'EXIT':
sys.exit()
lang = lang_to_iso(lang, True, False)
if valid_lang(lang) == True:
print('\033[1;32m✔︎\033[0;0m')
text = ''
try:
while True:
while valid_lang(lang) == False or text == 'CHANGE':
text = ''
lang = raw_input('Enter target language: ')
if lang == 'EXIT':
sys.exit()
lang = lang_to_iso(lang, True, False)
if valid_lang(lang) == True:
print('\033[1;32m✔︎\033[0;0m')
text = raw_input('Enter text to translate: ')
if text == 'EXIT':
sys.exit()
if text == 'CHANGE':
continue
translate_text(text, lang)
except: # handles Ctrl-D / Ctrl-C
print('')
sys.exit()
def main(argv):
global opt_b
global opt_s
global opt_c
if '-b' in argv:
opt_b = True
argv.remove('-b')
if '--bare' in argv:
opt_b = True
argv.remove('--bare')
if '-s' in argv:
opt_s = True
argv.remove('-s')
if '--speech' in argv:
opt_s = True
argv.remove('--speech')
if '-c' in argv:
opt_c = True
argv.remove('-c')
if '--confidence' in argv:
opt_c = True
argv.remove('--confidence')
if len(argv) < 2 or argv[1] == '-h' or argv[1] == '--help':
print_usage(1, argv[0])
sys.exit()
elif len(argv) == 2 and (argv[1] == '-l' or argv[1] == '--languages'):
print_languages(1)
sys.exit()
elif len(argv) >= 3 and (argv[1] == '-f' or argv[1] == '--file'):
file_translation(argv)
elif len(argv) >= 3 and (argv[1] == '-u' or argv[1] == '--url'):
web_page_translation(argv)
elif len(argv) >= 2 and (argv[1] == '-i' or argv[1] == '--interactive'):
interactive_translation()
elif len(argv) == 2:
translate_text(argv[1], 'en')
elif len(argv) > 2:
for l in argv[2:]:
lang = lang_to_iso(l, False, False)
if valid_lang(lang) == True:
translate_text(argv[1], lang)
if __name__ == "__main__":
main(sys.argv)
| true | true |
1c3636afe04726a49037190e2e3e669ee9b16056 | 11,023 | py | Python | apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/google/protobuf/service_reflection.py | tharindu1st/apim-migration-resources | dd68aa8c53cf310392bb72e699dd24c57b109cfb | [
"Apache-2.0"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/google/protobuf/service_reflection.py | tharindu1st/apim-migration-resources | dd68aa8c53cf310392bb72e699dd24c57b109cfb | [
"Apache-2.0"
] | 4,673 | 2017-03-29T10:43:43.000Z | 2022-03-31T08:33:44.000Z | apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/google/protobuf/service_reflection.py | tharindu1st/apim-migration-resources | dd68aa8c53cf310392bb72e699dd24c57b109cfb | [
"Apache-2.0"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains metaclasses used to create protocol service and service stub
classes from ServiceDescriptor objects at runtime.
The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to
inject all useful functionality into the classes output by the protocol
compiler at compile-time.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class GeneratedServiceType(type):
"""Metaclass for service classes created at runtime from ServiceDescriptors.
Implementations for all methods described in the Service class are added here
by this class. We also create properties to allow getting/setting all fields
in the protocol message.
The protocol compiler currently uses this metaclass to create protocol service
classes at runtime. Clients can also manually create their own classes at
runtime, as in this example:
mydescriptor = ServiceDescriptor(.....)
class MyProtoService(service.Service):
__metaclass__ = GeneratedServiceType
DESCRIPTOR = mydescriptor
myservice_instance = MyProtoService()
...
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service class.
Args:
name: Name of the class (ignored, but required by the metaclass
protocol).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service class is subclassed.
if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]
service_builder = _ServiceBuilder(descriptor)
service_builder.BuildService(cls)
class GeneratedServiceStubType(GeneratedServiceType):
"""Metaclass for service stubs created at runtime from ServiceDescriptors.
This class has similar responsibilities as GeneratedServiceType, except that
it creates the service stub classes.
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service stub class.
Args:
name: Name of the class (ignored, here).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service stub is subclassed.
if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
service_stub_builder = _ServiceStubBuilder(descriptor)
service_stub_builder.BuildServiceStub(cls)
class _ServiceBuilder(object):
"""This class constructs a protocol service class using a service descriptor.
Given a service descriptor, this class constructs a class that represents
the specified service descriptor. One service builder instance constructs
exactly one service class. That means all instances of that class share the
same builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
service class.
"""
self.descriptor = service_descriptor
def BuildService(self, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
"""
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback)
def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class
def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback))
def _NonImplementedMethod(self, method_name, rpc_controller, callback):
"""The body of all methods in the generated service class.
Args:
method_name: Name of the method being executed.
rpc_controller: RPC controller used to execute this method.
callback: A callback which will be invoked when the method finishes.
"""
rpc_controller.SetFailed('Method %s not implemented.' % method_name)
callback(None)
class _ServiceStubBuilder(object):
"""Constructs a protocol service stub class using a service descriptor.
Given a service descriptor, this class constructs a suitable stub class.
A stub is just a type-safe wrapper around an RpcChannel which emulates a
local implementation of the service.
One service stub builder instance constructs exactly one class. It means all
instances of that class share the same service stub builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service stub class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
stub class.
"""
self.descriptor = service_descriptor
def BuildServiceStub(self, cls):
"""Constructs the stub class.
Args:
cls: The class that will be constructed.
"""
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method))
def _GenerateStubMethod(self, method):
return (lambda inst, rpc_controller, request, callback=None:
self._StubMethod(inst, method, rpc_controller, request, callback))
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback)
| 38.677193 | 80 | 0.741268 |
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = 'petar@google.com (Petar Petrov)'
class GeneratedServiceType(type):
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service class is subclassed.
if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]
service_builder = _ServiceBuilder(descriptor)
service_builder.BuildService(cls)
class GeneratedServiceStubType(GeneratedServiceType):
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service stub is subclassed.
if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
service_stub_builder = _ServiceStubBuilder(descriptor)
service_stub_builder.BuildServiceStub(cls)
class _ServiceBuilder(object):
def __init__(self, service_descriptor):
self.descriptor = service_descriptor
def BuildService(self, cls):
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback)
def _GetRequestClass(self, method_descriptor):
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class
def _GetResponseClass(self, method_descriptor):
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class
def _GenerateNonImplementedMethod(self, method):
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback))
def _NonImplementedMethod(self, method_name, rpc_controller, callback):
rpc_controller.SetFailed('Method %s not implemented.' % method_name)
callback(None)
class _ServiceStubBuilder(object):
def __init__(self, service_descriptor):
self.descriptor = service_descriptor
def BuildServiceStub(self, cls):
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method))
def _GenerateStubMethod(self, method):
return (lambda inst, rpc_controller, request, callback=None:
self._StubMethod(inst, method, rpc_controller, request, callback))
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback)
| true | true |
1c3637cec9c2b33936f2a86c81707b3e82bca411 | 1,135 | py | Python | hub_module/tests/unittests/test_ernie_tiny_couplet.py | 18621579069/PaddleHub-yu | 15e8bcef2addf239081e235bdcfd039de12330e0 | [
"Apache-2.0"
] | 4 | 2021-02-25T03:27:38.000Z | 2021-05-15T03:20:23.000Z | hub_module/tests/unittests/test_ernie_tiny_couplet.py | 18621579069/PaddleHub-yu | 15e8bcef2addf239081e235bdcfd039de12330e0 | [
"Apache-2.0"
] | null | null | null | hub_module/tests/unittests/test_ernie_tiny_couplet.py | 18621579069/PaddleHub-yu | 15e8bcef2addf239081e235bdcfd039de12330e0 | [
"Apache-2.0"
] | 2 | 2021-03-01T07:04:01.000Z | 2021-05-14T05:54:18.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase, main
import paddlehub as hub
class ErnieTinyCoupletTestCase(TestCase):
def setUp(self):
self.module = hub.Module(name='ernie_tiny_couplet')
self.left = ["风吹云乱天垂泪", "若有经心风过耳"]
def test_predict(self):
rights = self.module.predict(self.left)
self.assertEqual(len(rights), 2)
self.assertEqual(len(rights[0]), 10)
self.assertEqual(len(rights[0][0]), 7)
self.assertEqual(len(rights[1][0]), 7)
if __name__ == '__main__':
main()
| 34.393939 | 74 | 0.710132 |
from unittest import TestCase, main
import paddlehub as hub
class ErnieTinyCoupletTestCase(TestCase):
def setUp(self):
self.module = hub.Module(name='ernie_tiny_couplet')
self.left = ["风吹云乱天垂泪", "若有经心风过耳"]
def test_predict(self):
rights = self.module.predict(self.left)
self.assertEqual(len(rights), 2)
self.assertEqual(len(rights[0]), 10)
self.assertEqual(len(rights[0][0]), 7)
self.assertEqual(len(rights[1][0]), 7)
if __name__ == '__main__':
main()
| true | true |
1c3638583bdef0dc4f94bb3ed3e45831bb87811e | 852 | py | Python | google-cloud-sdk/lib/third_party/pygments/styles/rrt.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 166 | 2015-01-04T19:31:45.000Z | 2021-11-10T15:54:09.000Z | thirdparty/pygments/pygments/styles/rrt.py | ipwnosx/hatebin | cafb8d34adeb13fc2187b6fb2b6fea34ad4c64ef | [
"MIT"
] | 3 | 2017-12-28T16:49:19.000Z | 2021-03-31T05:15:51.000Z | thirdparty/pygments/pygments/styles/rrt.py | ipwnosx/hatebin | cafb8d34adeb13fc2187b6fb2b6fea34ad4c64ef | [
"MIT"
] | 39 | 2015-01-04T09:56:35.000Z | 2021-05-16T09:10:18.000Z | # -*- coding: utf-8 -*-
"""
pygments.styles.rrt
~~~~~~~~~~~~~~~~~~~
pygments "rrt" theme, based on Zap and Emacs defaults.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
"""
Minimalistic "rrt" theme, based on Zap and Emacs defaults.
"""
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
| 25.058824 | 70 | 0.550469 |
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
| true | true |
1c3638f51c04f9cca713bbfb99b56e5e417f1a93 | 6,752 | py | Python | SOFTWARE/custom_script_linear_dwell_time.py | sbooeshaghi/colosseum | 07f51d95a576924880d6f958fe44fb067781e415 | [
"BSD-2-Clause"
] | 16 | 2021-01-29T10:11:46.000Z | 2022-03-20T17:50:11.000Z | SOFTWARE/custom_script_linear_dwell_time.py | sbooeshaghi/colosseum | 07f51d95a576924880d6f958fe44fb067781e415 | [
"BSD-2-Clause"
] | 1 | 2021-06-11T05:46:33.000Z | 2021-06-11T05:46:33.000Z | SOFTWARE/custom_script_linear_dwell_time.py | sbooeshaghi/colosseum | 07f51d95a576924880d6f958fe44fb067781e415 | [
"BSD-2-Clause"
] | 1 | 2022-03-20T17:50:31.000Z | 2022-03-20T17:50:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import serial
import sys
import glob
import time
startMarker = 60 # <
endMarker = 62 # >
midMarker = 44 # ,
def populate_ports():
"""
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result[-1]
def connect(port):
s = serial.Serial()
s.port = port
s.baudrate = 2000000
s.parity = serial.PARITY_NONE
s.stopbits = serial.STOPBITS_ONE
s.bytesize = serial.EIGHTBITS
s.timeout = 1
s.open()
return s
def write_to_Arduino(s, string):
s.write(string.encode())
s.flushInput()
return
def listen(s):
char = ""
x = "z" # any value that is not an end- or startMarker
# wait for the start character
while ord(x) != startMarker:
x = s.read()
# save data until the end marker is found
while ord(x) != endMarker:
if ord(x) != startMarker:
char = char + x.decode()
x = s.read()
return(char)
def talk(s, commands):
waitingForReply = False
for teststr in commands: # could use a while loop + numloops iterator?
if not cmd_valid(teststr):
continue # returns to beginning of for loop and grabs next string
if waitingForReply == False:
write_to_Arduino(s, teststr)
print("Sent from PC -- " + teststr)
waitingForReply = True
if waitingForReply == True:
while s.inWaiting() == 0:
pass
dataRecvd = listen(s)
print("Reply Received -- " + dataRecvd)
waitingForReply = False
time.sleep(0.1)
print("Send and receive complete")
# all this does is check if a command is formatted properly
# used in talk() prior to sending a command to the arduino
def cmd_valid(cmd):
cmds = ["RUN", "STOP", "RESUME", "PAUSE", "SET_SPEED", "SET_ACCEL"]
inds = ["000", "100", "010", "001", "110", "101", "011", "111"]
valid = False
if "," in cmd and cmd[0] == '<' and cmd[-1]=='>':
testcmd = cmd[1:].split(",")[0]
ind = cmd[1:].split(",")[1]
if testcmd in cmds and ind in inds:
valid = True
return valid
return valid
if __name__ == "__main__":
setup_cmds = [
"<SET_ACCEL,111,1000.0,1000.0.0,1000.0>",
"<SET_SPEED,111,1000.0,1000.0,1000.0>",
]
run_cmds = [
"<this should not work>",
"<Neither should this>",
"Or this",
"Or even, this>",
"<RUN, 123, 0.0, 0.0, 0.0>", # this shouldn't run either
#"<RUN,111,5000.0,200.0,200.0>"
]
stop_cmds = [
"<STOP,111,0.0,0.0,0.0>"
]
pause_cmds = [
"<PAUSE,100,0.0,0.0,0.0>"
]
resume_cmds = [
"<RESUME,100,0.0,0.0,0.0>"
]
tube_1 = ["<RUN,100,84,84,84>"]
tube_2 = ["<RUN,100,78,78,78>"]
tube_3 = ["<RUN,100,75,75,75>"]
tube_4 = ["<RUN,100,70,70,70>"]
tube_5 = ["<RUN,100,64,64,64>"]
tube_6 = ["<RUN,100,61,61,61>"]
tube_7 = ["<RUN,100,58,58,58>"]
tube_8 = ["<RUN,100,56,56,56>"]
tube_9 = ["<RUN,100,54,54,54>"]
tube_10 = ["<RUN,100,52,52,52>"]
tube_11 = ["<RUN,100,50,50,50>"]
tube_12 = ["<RUN,100,48,48,48>"]
tube_13 = ["<RUN,100,47,47,47>"]
tube_14 = ["<RUN,100,46,46,46>"]
tube_15 = ["<RUN,100,45,45,45>"]
tube_16 = ["<RUN,100,44,44,44>"]
tube_17 = ["<RUN,100,43,43,43>"]
tube_18 = ["<RUN,100,42,42,42>"]
tube_19 = ["<RUN,100,41,41,41>"]
tube_20 = ["<RUN,100,40,40,40>"]
tube_21 = ["<RUN,100,39,39,39>"]
tube_22 = ["<RUN,100,39,39,39>"]
port = populate_ports()
print("\n[setup] Connecting to port: {}".format(port))
s = connect(port)
time.sleep(5) # wait for the arduino to initialize
print(listen(s))
print("\n[setup] Sending setup commands..")
talk(s, setup_cmds)
time.sleep(1)
print("\n[action] Sending run commands..")
talk(s, run_cmds)
#longer so that we can initialize flow
time.sleep(20)
print("\n[action] Sending run commands..")
talk(s, tube_1)
time.sleep(12)
print("\n[action] Sending run commands..")
talk(s, tube_2)
time.sleep(24)
print("\n[action] Sending run commands..")
talk(s, tube_3)
time.sleep(36)
print("\n[action] Sending run commands..")
talk(s, tube_4)
time.sleep(48)
print("\n[action] Sending run commands..")
talk(s, tube_5)
time.sleep(60)
print("\n[action] Sending run commands..")
talk(s, tube_6)
time.sleep(72)
print("\n[action] Sending run commands..")
talk(s, tube_7)
time.sleep(84)
print("\n[action] Sending run commands..")
talk(s, tube_8)
time.sleep(96)
print("\n[action] Sending run commands..")
talk(s, tube_9)
time.sleep(108)
print("\n[action] Sending run commands..")
talk(s, tube_10)
time.sleep(120)
print("\n[action] Sending run commands..")
talk(s, tube_11)
time.sleep(132)
print("\n[action] Sending run commands")
talk(s, tube_12)
time.sleep(144)
print("\n[action] Sending run commands")
talk(s, tube_13)
time.sleep(156)
print("\n[action] Sending run commands")
talk(s, tube_14)
time.sleep(168)
print("\n[action] Sending run commands")
talk(s, tube_15)
time.sleep(180)
print("\n[action] Sending run commands")
talk(s, tube_16)
time.sleep(192)
print("\n[action] Sending run commands")
talk(s, tube_17)
time.sleep(204)
print("\n[action] Sending run commands")
talk(s, tube_18)
time.sleep(216)
print("\n[action] Sending run commands")
talk(s, tube_19)
time.sleep(228)
print("\n[action] Sending run commands")
talk(s, tube_20)
time.sleep(240)
print("\n[action] Sending run commands")
talk(s, tube_21)
time.sleep(20)
print("\n[action] Sending run commands")
talk(s, tube_22)
time.sleep(200)
print("\n[action] Sending stop commands..")
talk(s, stop_cmds)
print("\n[action] Closing port..")
s.close()
| 24.823529 | 79 | 0.572719 |
import serial
import sys
import glob
import time
startMarker = 60
endMarker = 62
midMarker = 44
def populate_ports():
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result[-1]
def connect(port):
s = serial.Serial()
s.port = port
s.baudrate = 2000000
s.parity = serial.PARITY_NONE
s.stopbits = serial.STOPBITS_ONE
s.bytesize = serial.EIGHTBITS
s.timeout = 1
s.open()
return s
def write_to_Arduino(s, string):
s.write(string.encode())
s.flushInput()
return
def listen(s):
char = ""
x = "z"
while ord(x) != startMarker:
x = s.read()
while ord(x) != endMarker:
if ord(x) != startMarker:
char = char + x.decode()
x = s.read()
return(char)
def talk(s, commands):
waitingForReply = False
for teststr in commands:
if not cmd_valid(teststr):
continue
if waitingForReply == False:
write_to_Arduino(s, teststr)
print("Sent from PC -- " + teststr)
waitingForReply = True
if waitingForReply == True:
while s.inWaiting() == 0:
pass
dataRecvd = listen(s)
print("Reply Received -- " + dataRecvd)
waitingForReply = False
time.sleep(0.1)
print("Send and receive complete")
def cmd_valid(cmd):
cmds = ["RUN", "STOP", "RESUME", "PAUSE", "SET_SPEED", "SET_ACCEL"]
inds = ["000", "100", "010", "001", "110", "101", "011", "111"]
valid = False
if "," in cmd and cmd[0] == '<' and cmd[-1]=='>':
testcmd = cmd[1:].split(",")[0]
ind = cmd[1:].split(",")[1]
if testcmd in cmds and ind in inds:
valid = True
return valid
return valid
if __name__ == "__main__":
setup_cmds = [
"<SET_ACCEL,111,1000.0,1000.0.0,1000.0>",
"<SET_SPEED,111,1000.0,1000.0,1000.0>",
]
run_cmds = [
"<this should not work>",
"<Neither should this>",
"Or this",
"Or even, this>",
"<RUN, 123, 0.0, 0.0, 0.0>",
#"<RUN,111,5000.0,200.0,200.0>"
]
stop_cmds = [
"<STOP,111,0.0,0.0,0.0>"
]
pause_cmds = [
"<PAUSE,100,0.0,0.0,0.0>"
]
resume_cmds = [
"<RESUME,100,0.0,0.0,0.0>"
]
tube_1 = ["<RUN,100,84,84,84>"]
tube_2 = ["<RUN,100,78,78,78>"]
tube_3 = ["<RUN,100,75,75,75>"]
tube_4 = ["<RUN,100,70,70,70>"]
tube_5 = ["<RUN,100,64,64,64>"]
tube_6 = ["<RUN,100,61,61,61>"]
tube_7 = ["<RUN,100,58,58,58>"]
tube_8 = ["<RUN,100,56,56,56>"]
tube_9 = ["<RUN,100,54,54,54>"]
tube_10 = ["<RUN,100,52,52,52>"]
tube_11 = ["<RUN,100,50,50,50>"]
tube_12 = ["<RUN,100,48,48,48>"]
tube_13 = ["<RUN,100,47,47,47>"]
tube_14 = ["<RUN,100,46,46,46>"]
tube_15 = ["<RUN,100,45,45,45>"]
tube_16 = ["<RUN,100,44,44,44>"]
tube_17 = ["<RUN,100,43,43,43>"]
tube_18 = ["<RUN,100,42,42,42>"]
tube_19 = ["<RUN,100,41,41,41>"]
tube_20 = ["<RUN,100,40,40,40>"]
tube_21 = ["<RUN,100,39,39,39>"]
tube_22 = ["<RUN,100,39,39,39>"]
port = populate_ports()
print("\n[setup] Connecting to port: {}".format(port))
s = connect(port)
time.sleep(5) # wait for the arduino to initialize
print(listen(s))
print("\n[setup] Sending setup commands..")
talk(s, setup_cmds)
time.sleep(1)
print("\n[action] Sending run commands..")
talk(s, run_cmds)
#longer so that we can initialize flow
time.sleep(20)
print("\n[action] Sending run commands..")
talk(s, tube_1)
time.sleep(12)
print("\n[action] Sending run commands..")
talk(s, tube_2)
time.sleep(24)
print("\n[action] Sending run commands..")
talk(s, tube_3)
time.sleep(36)
print("\n[action] Sending run commands..")
talk(s, tube_4)
time.sleep(48)
print("\n[action] Sending run commands..")
talk(s, tube_5)
time.sleep(60)
print("\n[action] Sending run commands..")
talk(s, tube_6)
time.sleep(72)
print("\n[action] Sending run commands..")
talk(s, tube_7)
time.sleep(84)
print("\n[action] Sending run commands..")
talk(s, tube_8)
time.sleep(96)
print("\n[action] Sending run commands..")
talk(s, tube_9)
time.sleep(108)
print("\n[action] Sending run commands..")
talk(s, tube_10)
time.sleep(120)
print("\n[action] Sending run commands..")
talk(s, tube_11)
time.sleep(132)
print("\n[action] Sending run commands")
talk(s, tube_12)
time.sleep(144)
print("\n[action] Sending run commands")
talk(s, tube_13)
time.sleep(156)
print("\n[action] Sending run commands")
talk(s, tube_14)
time.sleep(168)
print("\n[action] Sending run commands")
talk(s, tube_15)
time.sleep(180)
print("\n[action] Sending run commands")
talk(s, tube_16)
time.sleep(192)
print("\n[action] Sending run commands")
talk(s, tube_17)
time.sleep(204)
print("\n[action] Sending run commands")
talk(s, tube_18)
time.sleep(216)
print("\n[action] Sending run commands")
talk(s, tube_19)
time.sleep(228)
print("\n[action] Sending run commands")
talk(s, tube_20)
time.sleep(240)
print("\n[action] Sending run commands")
talk(s, tube_21)
time.sleep(20)
print("\n[action] Sending run commands")
talk(s, tube_22)
time.sleep(200)
print("\n[action] Sending stop commands..")
talk(s, stop_cmds)
print("\n[action] Closing port..")
s.close()
| true | true |
1c36392d32f0668c77c831b3f1f6c1dde1d2a96d | 13 | py | Python | PTTLibrary/Version.py | shihyu/PTTLibrary | b40f0d45e1ca2fc8e072473ece63a8628ac7168e | [
"MIT"
] | 1 | 2019-04-30T22:56:38.000Z | 2019-04-30T22:56:38.000Z | PTTLibrary/Version.py | shihyu/PTTLibrary | b40f0d45e1ca2fc8e072473ece63a8628ac7168e | [
"MIT"
] | null | null | null | PTTLibrary/Version.py | shihyu/PTTLibrary | b40f0d45e1ca2fc8e072473ece63a8628ac7168e | [
"MIT"
] | null | null | null | Ver = '0.7.5' | 13 | 13 | 0.461538 | Ver = '0.7.5' | true | true |
1c363c9ad9549524446ff58c32f867f3d750ffbd | 741 | py | Python | src/ims/element/incident/incidents_template/__init__.py | burningmantech/ranger-ims-server | 66c8158b93c80ca342856666acb2689287db8043 | [
"Apache-2.0"
] | 26 | 2015-11-04T19:14:28.000Z | 2022-01-28T14:31:06.000Z | src/ims/element/incident/incidents_template/__init__.py | burningmantech/ranger-ims-server | 66c8158b93c80ca342856666acb2689287db8043 | [
"Apache-2.0"
] | 508 | 2015-04-28T00:24:51.000Z | 2022-03-30T17:29:47.000Z | src/ims/element/incident/incidents_template/__init__.py | burningmantech/ranger-ims-server | 66c8158b93c80ca342856666acb2689287db8043 | [
"Apache-2.0"
] | 6 | 2015-04-15T13:58:40.000Z | 2019-12-09T00:47:40.000Z | ##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Incidents template page element.
"""
from ._incidents_template import IncidentsTemplatePage
__all__ = ("IncidentsTemplatePage",)
| 29.64 | 74 | 0.761134 |
from ._incidents_template import IncidentsTemplatePage
__all__ = ("IncidentsTemplatePage",)
| true | true |
1c363d33f4846c8c45da214041c3dc6b2be4ff76 | 4,919 | py | Python | precommit.py | mristin/icontract | 940977aacb2160b43eb93fc5e1a257907a9a23f1 | [
"MIT"
] | null | null | null | precommit.py | mristin/icontract | 940977aacb2160b43eb93fc5e1a257907a9a23f1 | [
"MIT"
] | null | null | null | precommit.py | mristin/icontract | 940977aacb2160b43eb93fc5e1a257907a9a23f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Runs precommit checks on the repository."""
import argparse
import os
import pathlib
import subprocess
import sys
import cpuinfo
import icontract
def benchmark_against_dpcontracts(repo_root: pathlib.Path, overwrite: bool) -> None:
"""Run benchmars against dpcontracts and include them in the Readme."""
script_rel_paths = [
'benchmarks/against_dpcontracts/compare_invariant.py', 'benchmarks/against_dpcontracts/compare_precondition.py',
'benchmarks/against_dpcontracts/compare_postcondition.py'
]
if not overwrite:
for i, script_rel_path in enumerate(script_rel_paths):
if i > 0:
print()
subprocess.check_call(['python3', str(repo_root / script_rel_path)])
else:
out = ['The following scripts were run:\n\n']
for script_rel_path in script_rel_paths:
out.append('* `{0} <https://github.com/Parquery/icontract/tree/master/{0}>`_\n'.format(script_rel_path))
out.append('\n')
out.append(('The benchmarks were executed on {}.\nWe used icontract {} and dpcontracts 0.6.0.\n\n').format(
cpuinfo.get_cpu_info()['brand'], icontract.__version__))
out.append('The following tables summarize the results.\n\n')
stdouts = [] # type: List[str]
for script_rel_path in script_rel_paths:
stdout = subprocess.check_output(['python3', str(repo_root / script_rel_path)]).decode()
stdouts.append(stdout)
out.append(stdout)
out.append('\n')
readme_path = repo_root / 'README.rst'
readme = readme_path.read_text()
marker_start = '.. Becnhmark report from precommit.py starts.'
marker_end = '.. Benchmark report from precommit.py ends.'
lines = readme.splitlines()
try:
index_start = lines.index(marker_start)
except ValueError as exc:
raise ValueError('Could not find the marker for the benchmarks in the {}: {}'.format(
readme_path, marker_start)) from exc
try:
index_end = lines.index(marker_end)
except ValueError as exc:
raise ValueError('Could not find the start marker for the benchmarks in the {}: {}'.format(
readme_path, marker_end)) from exc
assert index_start < index_end, 'Unexpected end marker before start marker for the benchmarks.'
lines = lines[:index_start + 1] + ['\n'] + (''.join(out)).splitlines() + ['\n'] + lines[index_end:]
readme_path.write_text('\n'.join(lines) + '\n')
print('\n\n'.join(stdouts))
def main() -> int:
""""Execute main routine."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--overwrite",
help="Overwrites the unformatted source files with the well-formatted code in place. "
"If not set, an exception is raised if any of the files do not conform to the style guide.",
action='store_true')
args = parser.parse_args()
overwrite = bool(args.overwrite)
repo_root = pathlib.Path(__file__).parent
print("YAPF'ing...")
if overwrite:
subprocess.check_call(
[
"yapf", "--in-place", "--style=style.yapf", "--recursive", "tests", "icontract", "setup.py",
"precommit.py"
],
cwd=repo_root.as_posix())
else:
subprocess.check_call(
["yapf", "--diff", "--style=style.yapf", "--recursive", "tests", "icontract", "setup.py", "precommit.py"],
cwd=repo_root.as_posix())
print("Mypy'ing...")
subprocess.check_call(["mypy", "--strict", "icontract", "tests"], cwd=repo_root.as_posix())
print("Pylint'ing...")
subprocess.check_call(["pylint", "--rcfile=pylint.rc", "tests", "icontract"], cwd=repo_root.as_posix())
print("Pydocstyle'ing...")
subprocess.check_call(["pydocstyle", "icontract"], cwd=repo_root.as_posix())
print("Testing...")
env = os.environ.copy()
env['ICONTRACT_SLOW'] = 'true'
# yapf: disable
subprocess.check_call(
["coverage", "run",
"--source", "icontract",
"-m", "unittest", "discover", "tests"],
cwd=repo_root.as_posix(),
env=env)
# yapf: enable
subprocess.check_call(["coverage", "report"])
print("Benchmarking against dpcontracts...")
benchmark_against_dpcontracts(repo_root=repo_root, overwrite=overwrite)
print("Doctesting...")
subprocess.check_call(["python3", "-m", "doctest", "README.rst"])
for pth in (repo_root / "icontract").glob("**/*.py"):
subprocess.check_call(["python3", "-m", "doctest", pth.as_posix()])
print("Checking the restructured text of the readme...")
subprocess.check_call(['python3', 'setup.py', 'check', '--restructuredtext', '--strict'])
return 0
if __name__ == "__main__":
sys.exit(main())
| 35.388489 | 120 | 0.622688 |
import argparse
import os
import pathlib
import subprocess
import sys
import cpuinfo
import icontract
def benchmark_against_dpcontracts(repo_root: pathlib.Path, overwrite: bool) -> None:
script_rel_paths = [
'benchmarks/against_dpcontracts/compare_invariant.py', 'benchmarks/against_dpcontracts/compare_precondition.py',
'benchmarks/against_dpcontracts/compare_postcondition.py'
]
if not overwrite:
for i, script_rel_path in enumerate(script_rel_paths):
if i > 0:
print()
subprocess.check_call(['python3', str(repo_root / script_rel_path)])
else:
out = ['The following scripts were run:\n\n']
for script_rel_path in script_rel_paths:
out.append('* `{0} <https://github.com/Parquery/icontract/tree/master/{0}>`_\n'.format(script_rel_path))
out.append('\n')
out.append(('The benchmarks were executed on {}.\nWe used icontract {} and dpcontracts 0.6.0.\n\n').format(
cpuinfo.get_cpu_info()['brand'], icontract.__version__))
out.append('The following tables summarize the results.\n\n')
stdouts = []
for script_rel_path in script_rel_paths:
stdout = subprocess.check_output(['python3', str(repo_root / script_rel_path)]).decode()
stdouts.append(stdout)
out.append(stdout)
out.append('\n')
readme_path = repo_root / 'README.rst'
readme = readme_path.read_text()
marker_start = '.. Becnhmark report from precommit.py starts.'
marker_end = '.. Benchmark report from precommit.py ends.'
lines = readme.splitlines()
try:
index_start = lines.index(marker_start)
except ValueError as exc:
raise ValueError('Could not find the marker for the benchmarks in the {}: {}'.format(
readme_path, marker_start)) from exc
try:
index_end = lines.index(marker_end)
except ValueError as exc:
raise ValueError('Could not find the start marker for the benchmarks in the {}: {}'.format(
readme_path, marker_end)) from exc
assert index_start < index_end, 'Unexpected end marker before start marker for the benchmarks.'
lines = lines[:index_start + 1] + ['\n'] + (''.join(out)).splitlines() + ['\n'] + lines[index_end:]
readme_path.write_text('\n'.join(lines) + '\n')
print('\n\n'.join(stdouts))
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
"--overwrite",
help="Overwrites the unformatted source files with the well-formatted code in place. "
"If not set, an exception is raised if any of the files do not conform to the style guide.",
action='store_true')
args = parser.parse_args()
overwrite = bool(args.overwrite)
repo_root = pathlib.Path(__file__).parent
print("YAPF'ing...")
if overwrite:
subprocess.check_call(
[
"yapf", "--in-place", "--style=style.yapf", "--recursive", "tests", "icontract", "setup.py",
"precommit.py"
],
cwd=repo_root.as_posix())
else:
subprocess.check_call(
["yapf", "--diff", "--style=style.yapf", "--recursive", "tests", "icontract", "setup.py", "precommit.py"],
cwd=repo_root.as_posix())
print("Mypy'ing...")
subprocess.check_call(["mypy", "--strict", "icontract", "tests"], cwd=repo_root.as_posix())
print("Pylint'ing...")
subprocess.check_call(["pylint", "--rcfile=pylint.rc", "tests", "icontract"], cwd=repo_root.as_posix())
print("Pydocstyle'ing...")
subprocess.check_call(["pydocstyle", "icontract"], cwd=repo_root.as_posix())
print("Testing...")
env = os.environ.copy()
env['ICONTRACT_SLOW'] = 'true'
subprocess.check_call(
["coverage", "run",
"--source", "icontract",
"-m", "unittest", "discover", "tests"],
cwd=repo_root.as_posix(),
env=env)
subprocess.check_call(["coverage", "report"])
print("Benchmarking against dpcontracts...")
benchmark_against_dpcontracts(repo_root=repo_root, overwrite=overwrite)
print("Doctesting...")
subprocess.check_call(["python3", "-m", "doctest", "README.rst"])
for pth in (repo_root / "icontract").glob("**/*.py"):
subprocess.check_call(["python3", "-m", "doctest", pth.as_posix()])
print("Checking the restructured text of the readme...")
subprocess.check_call(['python3', 'setup.py', 'check', '--restructuredtext', '--strict'])
return 0
if __name__ == "__main__":
sys.exit(main())
| true | true |
1c363dd5d9593792cc8cb2b27569d7c77eec3110 | 1,951 | py | Python | test/trace_processor/chrome/modified_rail_modes_no_vsyncs.py | proton-vayu/android_external_perfetto | b8f131ee57a09e9f2bb8378364fad80fc753ff97 | [
"Apache-2.0"
] | 933 | 2019-12-10T10:45:28.000Z | 2022-03-31T03:43:44.000Z | test/trace_processor/chrome/modified_rail_modes_no_vsyncs.py | proton-vayu/android_external_perfetto | b8f131ee57a09e9f2bb8378364fad80fc753ff97 | [
"Apache-2.0"
] | 252 | 2019-12-10T16:13:57.000Z | 2022-03-31T09:56:46.000Z | test/trace_processor/chrome/modified_rail_modes_no_vsyncs.py | proton-vayu/android_external_perfetto | b8f131ee57a09e9f2bb8378364fad80fc753ff97 | [
"Apache-2.0"
] | 153 | 2020-01-08T20:17:27.000Z | 2022-03-30T20:53:21.000Z | #!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import sys
import synth_common
from synth_common import s_to_ns
trace = synth_common.create_trace()
trace.add_chrome_metadata(os_name="Android")
track1 = 1234
track2 = 4567
trace.add_process_track_descriptor(track1, pid=0)
trace.add_process_track_descriptor(track2, pid=2)
trace.add_rail_mode_slice(
ts=0,
dur=s_to_ns(1),
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_RESPONSE)
trace.add_rail_mode_slice(
ts=s_to_ns(1),
dur=s_to_ns(2),
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_LOAD)
trace.add_rail_mode_slice(
ts=s_to_ns(3),
dur=-1,
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
trace.add_rail_mode_slice(
ts=0,
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_ANIMATION)
trace.add_rail_mode_slice(
ts=s_to_ns(1),
dur=s_to_ns(2.5),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
trace.add_rail_mode_slice(
ts=s_to_ns(2.5),
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_ANIMATION)
trace.add_rail_mode_slice(
ts=s_to_ns(3.5),
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
sys.stdout.buffer.write(trace.trace.SerializeToString())
| 28.275362 | 74 | 0.754485 |
from os import sys
import synth_common
from synth_common import s_to_ns
trace = synth_common.create_trace()
trace.add_chrome_metadata(os_name="Android")
track1 = 1234
track2 = 4567
trace.add_process_track_descriptor(track1, pid=0)
trace.add_process_track_descriptor(track2, pid=2)
trace.add_rail_mode_slice(
ts=0,
dur=s_to_ns(1),
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_RESPONSE)
trace.add_rail_mode_slice(
ts=s_to_ns(1),
dur=s_to_ns(2),
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_LOAD)
trace.add_rail_mode_slice(
ts=s_to_ns(3),
dur=-1,
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
trace.add_rail_mode_slice(
ts=0,
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_ANIMATION)
trace.add_rail_mode_slice(
ts=s_to_ns(1),
dur=s_to_ns(2.5),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
trace.add_rail_mode_slice(
ts=s_to_ns(2.5),
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_ANIMATION)
trace.add_rail_mode_slice(
ts=s_to_ns(3.5),
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
sys.stdout.buffer.write(trace.trace.SerializeToString())
| true | true |
1c363dd99927d61683b93e7ad53acd35ed85011f | 3,913 | py | Python | app/src/lambda_cleanup.py | troydieter/aws-auto-cleanup | 523bae5cc57b81d3a2f0d43c87b9f1ef5390e3a4 | [
"MIT"
] | 322 | 2019-04-15T01:59:57.000Z | 2022-03-09T00:06:55.000Z | app/src/lambda_cleanup.py | troydieter/aws-auto-cleanup | 523bae5cc57b81d3a2f0d43c87b9f1ef5390e3a4 | [
"MIT"
] | 70 | 2019-04-15T01:27:21.000Z | 2022-03-02T00:39:29.000Z | app/src/lambda_cleanup.py | troydieter/aws-auto-cleanup | 523bae5cc57b81d3a2f0d43c87b9f1ef5390e3a4 | [
"MIT"
] | 49 | 2019-04-15T06:36:42.000Z | 2022-01-17T11:37:32.000Z | import sys
import boto3
from src.helper import Helper
class LambdaCleanup:
def __init__(self, logging, whitelist, settings, execution_log, region):
self.logging = logging
self.whitelist = whitelist
self.settings = settings
self.execution_log = execution_log
self.region = region
self._client_lambda = None
self.is_dry_run = Helper.get_setting(self.settings, "general.dry_run", True)
@property
def client_lambda(self):
if not self._client_lambda:
self._client_lambda = boto3.client("lambda", region_name=self.region)
return self._client_lambda
def run(self):
self.functions()
def functions(self):
"""
Deletes Lambda Functions.
"""
self.logging.debug("Started cleanup of Lambda Functions.")
is_cleaning_enabled = Helper.get_setting(
self.settings, "services.lambda.function.clean", False
)
resource_maximum_age = Helper.get_setting(
self.settings, "services.lambda.function.ttl", 7
)
resource_whitelist = Helper.get_whitelist(self.whitelist, "lambda.function")
if is_cleaning_enabled:
try:
paginator = self.client_lambda.get_paginator("list_functions")
resources = paginator.paginate().build_full_result().get("Functions")
except:
self.logging.error("Could not list all Lambda Functions.")
self.logging.error(sys.exc_info()[1])
return False
for resource in resources:
resource_id = resource.get("FunctionName")
resource_date = resource.get("LastModified")
resource_age = Helper.get_day_delta(resource_date).days
resource_action = None
if resource_id not in resource_whitelist:
if resource_age > resource_maximum_age:
try:
if not self.is_dry_run:
self.client_lambda.delete_function(
FunctionName=resource_id
)
except:
self.logging.error(
f"Could not delete Lambda Function '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.info(
f"Lambda Function '{resource_id}' was last modified {resource_age} days ago "
"and has been deleted."
)
resource_action = "DELETE"
else:
self.logging.debug(
f"Lambda Function '{resource_id}' was last modified {resource_age} days ago "
"(less than TTL setting) and has not been deleted."
)
resource_action = "SKIP - TTL"
else:
self.logging.debug(
f"Lambda Function '{resource_id}' has been whitelisted and has not been deleted."
)
resource_action = "SKIP - WHITELIST"
Helper.record_execution_log_action(
self.execution_log,
self.region,
"Lambda",
"Function",
resource_id,
resource_action,
)
self.logging.debug("Finished cleanup of Lambda Functions.")
return True
else:
self.logging.info("Skipping cleanup of Lambda Functions.")
return True
| 37.990291 | 109 | 0.509839 | import sys
import boto3
from src.helper import Helper
class LambdaCleanup:
def __init__(self, logging, whitelist, settings, execution_log, region):
self.logging = logging
self.whitelist = whitelist
self.settings = settings
self.execution_log = execution_log
self.region = region
self._client_lambda = None
self.is_dry_run = Helper.get_setting(self.settings, "general.dry_run", True)
@property
def client_lambda(self):
if not self._client_lambda:
self._client_lambda = boto3.client("lambda", region_name=self.region)
return self._client_lambda
def run(self):
self.functions()
def functions(self):
self.logging.debug("Started cleanup of Lambda Functions.")
is_cleaning_enabled = Helper.get_setting(
self.settings, "services.lambda.function.clean", False
)
resource_maximum_age = Helper.get_setting(
self.settings, "services.lambda.function.ttl", 7
)
resource_whitelist = Helper.get_whitelist(self.whitelist, "lambda.function")
if is_cleaning_enabled:
try:
paginator = self.client_lambda.get_paginator("list_functions")
resources = paginator.paginate().build_full_result().get("Functions")
except:
self.logging.error("Could not list all Lambda Functions.")
self.logging.error(sys.exc_info()[1])
return False
for resource in resources:
resource_id = resource.get("FunctionName")
resource_date = resource.get("LastModified")
resource_age = Helper.get_day_delta(resource_date).days
resource_action = None
if resource_id not in resource_whitelist:
if resource_age > resource_maximum_age:
try:
if not self.is_dry_run:
self.client_lambda.delete_function(
FunctionName=resource_id
)
except:
self.logging.error(
f"Could not delete Lambda Function '{resource_id}'."
)
self.logging.error(sys.exc_info()[1])
resource_action = "ERROR"
else:
self.logging.info(
f"Lambda Function '{resource_id}' was last modified {resource_age} days ago "
"and has been deleted."
)
resource_action = "DELETE"
else:
self.logging.debug(
f"Lambda Function '{resource_id}' was last modified {resource_age} days ago "
"(less than TTL setting) and has not been deleted."
)
resource_action = "SKIP - TTL"
else:
self.logging.debug(
f"Lambda Function '{resource_id}' has been whitelisted and has not been deleted."
)
resource_action = "SKIP - WHITELIST"
Helper.record_execution_log_action(
self.execution_log,
self.region,
"Lambda",
"Function",
resource_id,
resource_action,
)
self.logging.debug("Finished cleanup of Lambda Functions.")
return True
else:
self.logging.info("Skipping cleanup of Lambda Functions.")
return True
| true | true |
1c363e036998894d50d5ab79c027c7d1820b7b07 | 7,359 | py | Python | routes.py | Crazy-Ideas/happy-rainbow | f6939cab858dd98950ac2ffbbe54c2189449029e | [
"MIT"
] | null | null | null | routes.py | Crazy-Ideas/happy-rainbow | f6939cab858dd98950ac2ffbbe54c2189449029e | [
"MIT"
] | null | null | null | routes.py | Crazy-Ideas/happy-rainbow | f6939cab858dd98950ac2ffbbe54c2189449029e | [
"MIT"
] | null | null | null | from functools import wraps
from typing import List
from flask import render_template, request, url_for, Response, make_response, redirect, current_app, flash, send_file
from flask_login import login_user, current_user, logout_user
from werkzeug.urls import url_parse
from app import app, get_user_from_token
from certificate import create_certificate, certificate_download, certificate_delete, batch_certificate_delete
from config import Config, today
from forms import LoginForm, WorkshopForm, WorkshopDeleteForm, ParticipantForm, ParticipantDeleteForm
from models import Workshop, Participant
def cookie_login_required(route_function):
@wraps(route_function)
def decorated_route(*args, **kwargs):
if current_user.is_authenticated:
return route_function(*args, **kwargs)
token: str = request.cookies.get("token")
user = get_user_from_token(token)
if user:
login_user(user=user)
return route_function(*args, **kwargs)
return current_app.login_manager.unauthorized()
return decorated_route
@app.route("/")
@app.route("/home")
def home():
return render_template("home.html")
@app.route("/workshops/future", methods=["GET", "POST"])
@cookie_login_required
def upcoming_workshops():
workshops: List[Workshop] = Workshop.objects.filter("date", ">=", today()).get()
workshops.sort(key=lambda workshop: workshop.date)
form = WorkshopDeleteForm()
if not form.validate_on_submit():
return render_template("upcoming_workshop.html", title="Upcoming Workshops", workshops=workshops, form=form)
form.workshop.delete()
return redirect(url_for("upcoming_workshops"))
@app.route("/workshops/past", methods=["GET", "POST"])
@cookie_login_required
def completed_workshops():
workshops: List[Workshop] = Workshop.objects.filter("date", "<=", today()).get()
workshops.sort(key=lambda workshop: workshop.date, reverse=True)
form = WorkshopDeleteForm()
if not form.validate_on_submit():
return render_template("completed_workshop.html", title="Completed Workshops", workshops=workshops, form=form)
batch_certificate_delete(form.workshop_id.data)
return redirect(url_for("completed_workshops"))
@app.route("/workshops/<workshop_id>/certificate_url")
@cookie_login_required
def certificate_url(workshop_id: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop:
flash("Invalid workshop id.")
return redirect(url_for("completed_workshops"))
workshop.generate_url()
return render_template("certificate_url.html", title="Certificate Link", workshop=workshop)
@app.route("/workshops/<workshop_id>/certificate/<signature>", methods=["GET", "POST"])
def certificate_preparation(workshop_id: str, signature: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop or not workshop.valid_signature(signature):
return render_template("participant_form.html", workshop=None)
form = ParticipantForm()
if not form.validate_on_submit():
return render_template("participant_form.html", workshop=workshop, form=form, participant=Participant())
participant: Participant = Participant.objects.filter_by(workshop_id=workshop_id,
name_key=form.participant.name_key).first()
if participant:
if not participant.certificate_pdf:
participant.certificate_pdf = create_certificate(workshop, participant)
participant.name = form.participant.name
participant.phone = form.participant.phone
participant.save()
else:
participant = form.participant
participant.certificate_pdf = create_certificate(workshop, participant)
participant.workshop_id = workshop_id
participant.create()
workshop.participants += 1
workshop.save()
return render_template("participant_form.html", workshop=workshop, form=form, participant=participant,
signature=signature)
@app.route("/workshops/<workshop_id>/participants", methods=["GET", "POST"])
@cookie_login_required
def view_participants(workshop_id: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop or workshop.participants == 0:
flash("All attendees deleted.")
return redirect(url_for("completed_workshops"))
participants: List[Participant] = Participant.objects.filter_by(workshop_id=workshop_id).get()
form = ParticipantDeleteForm()
if not form.validate_on_submit():
return render_template("participants.html", workshop=workshop, participants=participants, form=form,
title="Workshop Attendees")
certificate_delete(form.participant.certificate_pdf)
form.participant.delete()
workshop.participants -= 1
workshop.save()
return redirect(url_for("view_participants", workshop_id=workshop_id))
@app.route("/workshops/create", methods=["GET", "POST"])
@cookie_login_required
def create_workshop():
form = WorkshopForm()
if not form.validate_on_submit():
return render_template("workshop_form.html", form=form, title="Add Workshop", create=True)
form.workshop.create()
return redirect(url_for("upcoming_workshops"))
@app.route("/workshops/update/<workshop_id>", methods=["GET", "POST"])
@cookie_login_required
def update_workshop(workshop_id: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop:
flash("Error in retrieving workshop details.")
return redirect(url_for("home"))
form = WorkshopForm(workshop)
if not form.validate_on_submit():
return render_template("workshop_form.html", form=form, title="Edit Workshop", create=False)
form.workshop.save()
return redirect(url_for("upcoming_workshops"))
@app.route("/workshops/<workshop_id>/participants/<participant_id>/download/<signature>")
def download(workshop_id: str, participant_id: str, signature: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
participant: Participant = Participant.get_by_id(participant_id)
if not workshop or not participant or (not workshop.valid_signature(signature) and current_user.is_anonymous):
return render_template("participant_form.html", workshop=None)
file_path = certificate_download(participant.certificate_pdf)
filename = f"Happy Rainbow Certificate - {participant.name}.pdf"
return send_file(file_path, as_attachment=True, attachment_filename=filename)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if not form.validate_on_submit():
return render_template("login.html", form=form)
token = form.user.get_token()
login_user(user=form.user)
next_page = request.args.get("next")
if not next_page or url_parse(next_page).netloc != str():
next_page = url_for("upcoming_workshops")
response: Response = make_response(redirect(next_page))
response.set_cookie("token", token, max_age=Config.TOKEN_EXPIRY, secure=Config.CI_SECURITY, httponly=True,
samesite="Strict")
return response
@app.route("/logout")
def logout():
if current_user.is_authenticated:
current_user.revoke_token()
logout_user()
return redirect(url_for("home"))
| 42.293103 | 118 | 0.725234 | from functools import wraps
from typing import List
from flask import render_template, request, url_for, Response, make_response, redirect, current_app, flash, send_file
from flask_login import login_user, current_user, logout_user
from werkzeug.urls import url_parse
from app import app, get_user_from_token
from certificate import create_certificate, certificate_download, certificate_delete, batch_certificate_delete
from config import Config, today
from forms import LoginForm, WorkshopForm, WorkshopDeleteForm, ParticipantForm, ParticipantDeleteForm
from models import Workshop, Participant
def cookie_login_required(route_function):
@wraps(route_function)
def decorated_route(*args, **kwargs):
if current_user.is_authenticated:
return route_function(*args, **kwargs)
token: str = request.cookies.get("token")
user = get_user_from_token(token)
if user:
login_user(user=user)
return route_function(*args, **kwargs)
return current_app.login_manager.unauthorized()
return decorated_route
@app.route("/")
@app.route("/home")
def home():
return render_template("home.html")
@app.route("/workshops/future", methods=["GET", "POST"])
@cookie_login_required
def upcoming_workshops():
workshops: List[Workshop] = Workshop.objects.filter("date", ">=", today()).get()
workshops.sort(key=lambda workshop: workshop.date)
form = WorkshopDeleteForm()
if not form.validate_on_submit():
return render_template("upcoming_workshop.html", title="Upcoming Workshops", workshops=workshops, form=form)
form.workshop.delete()
return redirect(url_for("upcoming_workshops"))
@app.route("/workshops/past", methods=["GET", "POST"])
@cookie_login_required
def completed_workshops():
workshops: List[Workshop] = Workshop.objects.filter("date", "<=", today()).get()
workshops.sort(key=lambda workshop: workshop.date, reverse=True)
form = WorkshopDeleteForm()
if not form.validate_on_submit():
return render_template("completed_workshop.html", title="Completed Workshops", workshops=workshops, form=form)
batch_certificate_delete(form.workshop_id.data)
return redirect(url_for("completed_workshops"))
@app.route("/workshops/<workshop_id>/certificate_url")
@cookie_login_required
def certificate_url(workshop_id: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop:
flash("Invalid workshop id.")
return redirect(url_for("completed_workshops"))
workshop.generate_url()
return render_template("certificate_url.html", title="Certificate Link", workshop=workshop)
@app.route("/workshops/<workshop_id>/certificate/<signature>", methods=["GET", "POST"])
def certificate_preparation(workshop_id: str, signature: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop or not workshop.valid_signature(signature):
return render_template("participant_form.html", workshop=None)
form = ParticipantForm()
if not form.validate_on_submit():
return render_template("participant_form.html", workshop=workshop, form=form, participant=Participant())
participant: Participant = Participant.objects.filter_by(workshop_id=workshop_id,
name_key=form.participant.name_key).first()
if participant:
if not participant.certificate_pdf:
participant.certificate_pdf = create_certificate(workshop, participant)
participant.name = form.participant.name
participant.phone = form.participant.phone
participant.save()
else:
participant = form.participant
participant.certificate_pdf = create_certificate(workshop, participant)
participant.workshop_id = workshop_id
participant.create()
workshop.participants += 1
workshop.save()
return render_template("participant_form.html", workshop=workshop, form=form, participant=participant,
signature=signature)
@app.route("/workshops/<workshop_id>/participants", methods=["GET", "POST"])
@cookie_login_required
def view_participants(workshop_id: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop or workshop.participants == 0:
flash("All attendees deleted.")
return redirect(url_for("completed_workshops"))
participants: List[Participant] = Participant.objects.filter_by(workshop_id=workshop_id).get()
form = ParticipantDeleteForm()
if not form.validate_on_submit():
return render_template("participants.html", workshop=workshop, participants=participants, form=form,
title="Workshop Attendees")
certificate_delete(form.participant.certificate_pdf)
form.participant.delete()
workshop.participants -= 1
workshop.save()
return redirect(url_for("view_participants", workshop_id=workshop_id))
@app.route("/workshops/create", methods=["GET", "POST"])
@cookie_login_required
def create_workshop():
form = WorkshopForm()
if not form.validate_on_submit():
return render_template("workshop_form.html", form=form, title="Add Workshop", create=True)
form.workshop.create()
return redirect(url_for("upcoming_workshops"))
@app.route("/workshops/update/<workshop_id>", methods=["GET", "POST"])
@cookie_login_required
def update_workshop(workshop_id: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
if not workshop:
flash("Error in retrieving workshop details.")
return redirect(url_for("home"))
form = WorkshopForm(workshop)
if not form.validate_on_submit():
return render_template("workshop_form.html", form=form, title="Edit Workshop", create=False)
form.workshop.save()
return redirect(url_for("upcoming_workshops"))
@app.route("/workshops/<workshop_id>/participants/<participant_id>/download/<signature>")
def download(workshop_id: str, participant_id: str, signature: str):
workshop: Workshop = Workshop.get_by_id(workshop_id)
participant: Participant = Participant.get_by_id(participant_id)
if not workshop or not participant or (not workshop.valid_signature(signature) and current_user.is_anonymous):
return render_template("participant_form.html", workshop=None)
file_path = certificate_download(participant.certificate_pdf)
filename = f"Happy Rainbow Certificate - {participant.name}.pdf"
return send_file(file_path, as_attachment=True, attachment_filename=filename)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if not form.validate_on_submit():
return render_template("login.html", form=form)
token = form.user.get_token()
login_user(user=form.user)
next_page = request.args.get("next")
if not next_page or url_parse(next_page).netloc != str():
next_page = url_for("upcoming_workshops")
response: Response = make_response(redirect(next_page))
response.set_cookie("token", token, max_age=Config.TOKEN_EXPIRY, secure=Config.CI_SECURITY, httponly=True,
samesite="Strict")
return response
@app.route("/logout")
def logout():
if current_user.is_authenticated:
current_user.revoke_token()
logout_user()
return redirect(url_for("home"))
| true | true |
1c363f50c6a203b78ffbb057034c4dda407a0f28 | 8,976 | py | Python | docs/conf.py | sangfrois/NeuroKit | a7cc6dc9e33182892929834644809d63d332dd22 | [
"MIT"
] | 1 | 2020-08-12T09:26:27.000Z | 2020-08-12T09:26:27.000Z | docs/conf.py | sangfrois/NeuroKit | a7cc6dc9e33182892929834644809d63d332dd22 | [
"MIT"
] | null | null | null | docs/conf.py | sangfrois/NeuroKit | a7cc6dc9e33182892929834644809d63d332dd22 | [
"MIT"
] | 1 | 2021-11-14T21:18:48.000Z | 2021-11-14T21:18:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# neurokit2 documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import re
import sys
import mock
from m2r import MdInclude
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('../'))
# -- Mock modules ---------------------------------------------
MOCK_MODULES = ['scipy', 'scipy.signal', 'scipy.ndimage', 'scipy.stats', 'scipy.misc', 'scipy.interpolate', 'scipy.sparse', 'scipy.linalg',
'scipy.spatial', 'scipy.special', 'scipy.integrate', 'scipy.cluster', 'scipy.optimize',
'sklearn', 'sklearn.neighbors', 'sklearn.mixture', 'sklearn.datasets', 'sklearn.metrics', 'sklearn.metrics.pairwise', 'sklearn.decomposition',
'mne', 'bioread', 'cvxopt', 'pywt']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
'nbsphinx',
'sphinx_nbexamples',
'matplotlib.sphinxext.plot_directive',
'sphinx_copybutton',
'recommonmark'
]
# matplotlib plot directive
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = True
plot_html_show_source_link = False
plot_pre_code = """import numpy as np
import pandas as pd"""
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# sphinx-nbexamples
process_examples = not os.path.exists(os.path.join(os.path.dirname(__file__), 'examples'))
not_document_data = 'sphinx_nbexamples.gallery_config'
# Style autodoc
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = False
napoleon_use_ivar = False
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
def find_author():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__author__"), open('../neurokit2/__init__.py').read())
return str(result.group(1))
project = u'NeuroKit'
copyright = u"2020, Dominique Makowski"
author = find_author()
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
def find_version():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"), open('../neurokit2/__init__.py').read())
return result.group(1)
version = find_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai' # 'default', 'monokai'
# nbsphinx_codecell_lexer = 'default' # Doesn't do anything :/
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# -- Options for HTML THEME: sphinx_rtd_theme -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_favicon = "img/icon.ico"
html_logo = "img/neurokit.png"
html_static_path = ['_static'] # Folder that contain custom static files (e.g., CSS files)
# Theme options are theme-specific and customize the look and feel of a theme further.
# For a list of options available for each theme, see https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html
html_theme_options = {
'collapse_navigation': False # Expandables entries
}
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'neurokit2doc'
# Bootstrap theme
# html_theme = 'bootstrap'
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# html_theme_options = {
# 'source_link_position': "footer",
# 'bootswatch_theme': "readable",
# 'navbar_sidebarrel': False,
# 'nosidebar': True,
# 'navbar_pagenav': False,
# 'bootstrap_version': "3",
# 'navbar_links': [
# ("Installation", "installation"),
# ("What's new", "news"),
# ("Functions", "functions"),
# ("Contributing", "contributing"),
# ("Authors", "credits")
# ],
#
# }
# -- Options for LaTeX output ------------------------------------------
pdf_title = u'NeuroKit2'
author_field = u'Official Documentation'
latex_elements = {
'sphinxsetup': r"""
VerbatimColor={RGB}{38,50,56},
verbatimwithframe=false,
"""
# Background color of chunks
# '
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc,
'neurokit2.tex',
pdf_title,
author_field,
'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc,
'neurokit2',
pdf_title,
[author_field],
1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc,
'neurokit2',
pdf_title,
author_field,
'neurokit2',
'The Python Toolbox for Neurophysiological Signal Processing.',
'Miscellaneous'),
]
# Other
add_module_names = False # so functions aren’t prepended with the name of the package/module
add_function_parentheses = True # to ensure that parentheses are added to the end of all function names
# -- Setup for recommonmark ---------------------------------------------
def setup(app):
# Use m2r only for mdinclude and recommonmark for everything else
# https://github.com/readthedocs/recommonmark/issues/191#issuecomment-622369992
app.add_config_value('recommonmark_config', {
# 'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
# from m2r to make `mdinclude` work
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_directive('mdinclude', MdInclude)
| 31.605634 | 158 | 0.66455 |
import os
import re
import sys
import mock
from m2r import MdInclude
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('../'))
MOCK_MODULES = ['scipy', 'scipy.signal', 'scipy.ndimage', 'scipy.stats', 'scipy.misc', 'scipy.interpolate', 'scipy.sparse', 'scipy.linalg',
'scipy.spatial', 'scipy.special', 'scipy.integrate', 'scipy.cluster', 'scipy.optimize',
'sklearn', 'sklearn.neighbors', 'sklearn.mixture', 'sklearn.datasets', 'sklearn.metrics', 'sklearn.metrics.pairwise', 'sklearn.decomposition',
'mne', 'bioread', 'cvxopt', 'pywt']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
'nbsphinx',
'sphinx_nbexamples',
'matplotlib.sphinxext.plot_directive',
'sphinx_copybutton',
'recommonmark'
]
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = True
plot_html_show_source_link = False
plot_pre_code = """import numpy as np
import pandas as pd"""
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
process_examples = not os.path.exists(os.path.join(os.path.dirname(__file__), 'examples'))
not_document_data = 'sphinx_nbexamples.gallery_config'
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = False
napoleon_use_ivar = False
napoleon_use_rtype = False
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
def find_author():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__author__"), open('../neurokit2/__init__.py').read())
return str(result.group(1))
project = u'NeuroKit'
copyright = u"2020, Dominique Makowski"
author = find_author()
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
def find_version():
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__version__"), open('../neurokit2/__init__.py').read())
return result.group(1)
version = find_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai' # 'default', 'monokai'
# nbsphinx_codecell_lexer = 'default' # Doesn't do anything :/
todo_include_todos = False
html_show_sphinx = False
html_theme = 'sphinx_rtd_theme'
html_favicon = "img/icon.ico"
html_logo = "img/neurokit.png"
html_static_path = ['_static']
html_theme_options = {
'collapse_navigation': False
}
htmlhelp_basename = 'neurokit2doc'
# ("Functions", "functions"),
# ("Contributing", "contributing"),
# ("Authors", "credits")
# ],
#
# }
# -- Options for LaTeX output ------------------------------------------
pdf_title = u'NeuroKit2'
author_field = u'Official Documentation'
latex_elements = {
'sphinxsetup': r"""
VerbatimColor={RGB}{38,50,56},
verbatimwithframe=false,
"""
# Background color of chunks
# '
}
latex_documents = [
(master_doc,
'neurokit2.tex',
pdf_title,
author_field,
'manual'),
]
man_pages = [
(master_doc,
'neurokit2',
pdf_title,
[author_field],
1)
]
texinfo_documents = [
(master_doc,
'neurokit2',
pdf_title,
author_field,
'neurokit2',
'The Python Toolbox for Neurophysiological Signal Processing.',
'Miscellaneous'),
]
add_module_names = False
add_function_parentheses = True
def setup(app):
ue('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_directive('mdinclude', MdInclude)
| true | true |
1c36412a2ca1c8f5c84a14e573c9c6d729e8918c | 32,280 | py | Python | pytorch/libs/training/optim.py | yuyq96/asv-subtools | e3f43f380f26a25a131a9b5b0dfb6fb5bef71474 | [
"Apache-2.0"
] | 370 | 2020-05-31T16:28:08.000Z | 2022-03-24T07:27:50.000Z | pytorch/libs/training/optim.py | ts0923/asv-subtools | a678b8f3327de0e99c445a79a9e91e5e0e006b11 | [
"Apache-2.0"
] | 35 | 2020-07-01T12:08:31.000Z | 2021-12-15T03:18:14.000Z | pytorch/libs/training/optim.py | ts0923/asv-subtools | a678b8f3327de0e99c445a79a9e91e5e0e006b11 | [
"Apache-2.0"
] | 119 | 2020-06-08T11:27:09.000Z | 2022-03-31T05:31:53.000Z | # -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: Snowdar 2019-08-01)
import logging
import types
import math
import itertools as it
from torch._six import inf
from functools import partial, wraps
import warnings
from bisect import bisect_right
import torch
import torch.optim as optim
from torch.optim.optimizer import Optimizer
import libs.support.utils as utils
# Logger
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
## Wrapper ✿
def get_optimizer(model, params:dict={}):
# Suggested weight_decay: 1e-4 for l2 regularization (sgd, adam) and
# 1e-1 for decouped weight decay (sgdw, adamw, radam, ralamb, adamod etc.)
default_params = {
"name":"adamW",
"learn_rate":0.001,
"beta1":0.9,
"beta2":0.999,
"beta3":0.999,
"weight_decay":1e-4,
"lookahead.k":5,
"lookahead.alpha":0.,
"gc":False
}
used_params = utils.assign_params_dict(default_params, params)
# Base params
name = used_params["name"]
learn_rate = used_params["learn_rate"]
beta1 = used_params["beta1"]
beta2 = used_params["beta2"]
beta3 = used_params["beta3"]
weight_decay = used_params["weight_decay"]
gc = used_params["gc"]
extra_params = {}
# Gradient centralization:
# Yong, H., Huang, J., Hua, X., & Zhang, L. (2020). Gradient Centralization:
# A New Optimization Technique for Deep Neural Networks. arXiv e-prints, arXiv:2004.01461.
# Retrieved from https://ui.adsabs.harvard.edu/abs/2020arXiv200401461Y
# Github: https://github.com/Yonghongwei/Gradient-Centralization
if gc:
# Specify this list by developer.
default_support_gc_list = ["adamW", "ralamb"]
if name not in default_support_gc_list:
raise TypeError("Optimizer {} does not support gradient centralization (GC) now.".format(name))
extra_params["gc"] = True
# Select optimizer
if name == "sgd":
base_optimizer = optim.SGD(model.parameters(), lr=learn_rate, momentum=beta1, weight_decay=weight_decay)
elif name == "sgdW":
base_optimizer = SGDW(model.parameters(), lr=learn_rate, momentum=beta1, weight_decay=weight_decay)
elif name == "adam":
base_optimizer = optim.Adam(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)
elif name == "adamW":
base_optimizer = AdamW(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay, **extra_params)
elif name == "radam":
base_optimizer = RAdam(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)
elif name == "ralamb":
base_optimizer = Ralamb(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay, **extra_params)
elif name == "adamod":
base_optimizer = AdaMod(model.parameters(), lr=learn_rate, betas=(beta1, beta2), beta3=beta3, weight_decay=weight_decay)
elif name == "novograd":
base_optimizer = Novograd(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)
else:
raise ValueError("Do not support {0} optimizer now.".format(name))
# Using alpha to decide whether to use lookahead
if used_params["lookahead.alpha"] > 0:
logger.info("Use lookahead optimizer with alpha={} and k={}".format(used_params["lookahead.alpha"], used_params["lookahead.k"]))
optimizer = Lookahead(base_optimizer, k=used_params["lookahead.k"], alpha=used_params["lookahead.alpha"])
else:
optimizer = base_optimizer
return optimizer
## Optim-wrapper ✿
class Lookahead(Optimizer):
"""https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py
"""
def __init__(self, base_optimizer, k=5, alpha=0.5):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
self.is_back_step = False
self.init_weights = False
for group in self.param_groups:
group["step_counter"] = 0
def step(self, closure=None):
self.is_back_step = False
# Init weights after model in a certrain device and keep the device of weights same to model. [Snowdar 2018-09-01]
if not self.init_weights and self.alpha > 0:
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
self.init_weights = True
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
if self.alpha > 0:
for group,slow_weights in zip(self.param_groups,self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
else:
self.is_back_step = True
for p,q in zip(group['params'],slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha * (p.data - q.data))
p.data.copy_(q.data)
return loss
## Optimizer ✿
class SGDW(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum) with decouped weight decay.
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v_{t+1} = \mu * v_{t} + g_{t+1} \\
p_{t+1} = p_{t} - lr * v_{t+1}
where p, g, v and :math:`\mu` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v_{t+1} = \mu * v_{t} + lr * g_{t+1} \\
p_{t+1} = p_{t} - v_{t+1}
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=0.1, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_((1 - dampening) * d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'] * p.data)
p.data.add_(-group['lr'] * d_p)
return loss
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False, gc=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.gc = gc
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data) #, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data) #, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data) #, memory_format=torch.preserve_format)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if self.gc:
# For linear layer Y=WX+b, the tensor shape of weight is (outplanes, inplanes),
# but for CNN layer(1d and 2d etc.), the tensor shape of weight is (outplanes, inplanes, [cnn-core]).
# And here the gc is used in both linear and CNN layer.
# It is not influenced by weight decay for weight decay directly changes the p.data rather than p.grad.
# But when using gc in adam, the order question should be considered for L2 regularization changes
# the p.grad.
if len(list(grad.size()))>=2:
grad.add_(-grad.mean(dim = tuple(range(1,len(list(grad.size())))), keepdim = True))
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss
class RAdam(Optimizer):
'''https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py
a PyTorch implementation of the RAdam Optimizer from th paper
On the Variance of the Adaptive Learning Rate and Beyond.
https://arxiv.org/abs/1908.03265
Example:
>>> from optimizer import RAdam
>>> optimizer = RAdam(model.parameters(), lr=0.001)
Note, here the weight decay is not L2 regularization.
'''
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), N_sma_threshhold=4, eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.N_sma_threshhold = N_sma_threshhold
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'] * p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
p_data_fp32.add_(-step_size * exp_avg)
p.data.copy_(p_data_fp32)
return loss
class Ralamb(Optimizer):
'''https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py
Ralamb optimizer [RAdam + Layer-wise Adaptive Rate Scaling (LARS) trick]
'''
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), N_sma_threshhold=4, eps=1e-8, weight_decay=0, gc=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.N_sma_threshhold = N_sma_threshhold
self.buffer = [[None, None, None] for ind in range(10)]
self.gc = gc
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
if self.gc:
# For linear layer Y=WX+b, the tensor shape of weight is (outplanes, inplanes),
# but for CNN layer(1d and 2d etc.), the tensor shape of weight is (outplanes, inplanes, [cnn-core]).
# And here the gc is used in both linear and CNN layer.
if len(list(grad.size()))>=2:
grad.add_(-grad.mean(dim = tuple(range(1,len(list(grad.size())))), keepdim = True))
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma > self.N_sma_threshhold:
radam_step = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'] * p_data_fp32)
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
radam_norm = p_data_fp32.pow(2).sum().sqrt()
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
# more conservative since it's an approximated value
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-radam_step * trust_ratio)
else:
p_data_fp32.add_(exp_avg, alpha=-radam_step * trust_ratio)
p.data.copy_(p_data_fp32)
return loss
class AdaMod(Optimizer):
"""Implements AdaMod algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)
It has been proposed in `Adaptive and Momental Bounds for Adaptive Learning Rate Methods`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
beta3 (float, optional): smoothing coefficient for adaptive learning rates (default: 0.9999)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay rather than L2 penalty (default: 0)
Reference: https://github.com/lancopku/AdaMod.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), beta3=0.999,
eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= beta3 < 1.0:
raise ValueError("Invalid beta3 parameter: {}".format(beta3))
defaults = dict(lr=lr, betas=betas, beta3=beta3, eps=eps,
weight_decay=weight_decay)
super(AdaMod, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaMod, self).__setstate__(state)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdaMod does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Exponential moving average of actual learning rates
state['exp_avg_lr'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq, exp_avg_lr = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_lr']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'] * p.data)
# Applies momental bounds on actual learning rates
step_size = torch.full_like(denom, step_size)
step_size.div_(denom)
exp_avg_lr.mul_(group['beta3']).add_((1 - group['beta3']) * step_size)
step_size = torch.min(step_size, exp_avg_lr)
step_size.mul_(exp_avg)
p.data.add_(-step_size)
return loss
class Novograd(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (not L2 penalty) (default: 0)
grad_averaging: gradient averaging
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
Reference: https://github.com/NVIDIA/DeepLearningExamples/
blob/22f122183da1d46052a114bfcc1727921829e705/PyTorch/SpeechRecognition/
Jasper/optimizers.py
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0.25), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(Novograd, self).__init__(params, defaults)
def __setstate__(self, state):
super(Novograd, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'] * p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_((1 - beta2) * norm)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
# if group['weight_decay'] != 0:
# grad.add_(group['weight_decay'], p.data)
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(-group['lr'] * exp_avg)
return loss
| 41.813472 | 190 | 0.564405 |
import logging
import types
import math
import itertools as it
from torch._six import inf
from functools import partial, wraps
import warnings
from bisect import bisect_right
import torch
import torch.optim as optim
from torch.optim.optimizer import Optimizer
import libs.support.utils as utils
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
timizer(model, params:dict={}):
default_params = {
"name":"adamW",
"learn_rate":0.001,
"beta1":0.9,
"beta2":0.999,
"beta3":0.999,
"weight_decay":1e-4,
"lookahead.k":5,
"lookahead.alpha":0.,
"gc":False
}
used_params = utils.assign_params_dict(default_params, params)
name = used_params["name"]
learn_rate = used_params["learn_rate"]
beta1 = used_params["beta1"]
beta2 = used_params["beta2"]
beta3 = used_params["beta3"]
weight_decay = used_params["weight_decay"]
gc = used_params["gc"]
extra_params = {}
if gc:
default_support_gc_list = ["adamW", "ralamb"]
if name not in default_support_gc_list:
raise TypeError("Optimizer {} does not support gradient centralization (GC) now.".format(name))
extra_params["gc"] = True
if name == "sgd":
base_optimizer = optim.SGD(model.parameters(), lr=learn_rate, momentum=beta1, weight_decay=weight_decay)
elif name == "sgdW":
base_optimizer = SGDW(model.parameters(), lr=learn_rate, momentum=beta1, weight_decay=weight_decay)
elif name == "adam":
base_optimizer = optim.Adam(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)
elif name == "adamW":
base_optimizer = AdamW(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay, **extra_params)
elif name == "radam":
base_optimizer = RAdam(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)
elif name == "ralamb":
base_optimizer = Ralamb(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay, **extra_params)
elif name == "adamod":
base_optimizer = AdaMod(model.parameters(), lr=learn_rate, betas=(beta1, beta2), beta3=beta3, weight_decay=weight_decay)
elif name == "novograd":
base_optimizer = Novograd(model.parameters(), lr=learn_rate, betas=(beta1, beta2), weight_decay=weight_decay)
else:
raise ValueError("Do not support {0} optimizer now.".format(name))
if used_params["lookahead.alpha"] > 0:
logger.info("Use lookahead optimizer with alpha={} and k={}".format(used_params["lookahead.alpha"], used_params["lookahead.k"]))
optimizer = Lookahead(base_optimizer, k=used_params["lookahead.k"], alpha=used_params["lookahead.alpha"])
else:
optimizer = base_optimizer
return optimizer
Optimizer):
def __init__(self, base_optimizer, k=5, alpha=0.5):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
self.is_back_step = False
self.init_weights = False
for group in self.param_groups:
group["step_counter"] = 0
def step(self, closure=None):
self.is_back_step = False
if not self.init_weights and self.alpha > 0:
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
self.init_weights = True
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
if self.alpha > 0:
for group,slow_weights in zip(self.param_groups,self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
else:
self.is_back_step = True
for p,q in zip(group['params'],slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha * (p.data - q.data))
p.data.copy_(q.data)
return loss
ptimizer):
def __init__(self, params, lr=0.1, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_((1 - dampening) * d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'] * p.data)
p.data.add_(-group['lr'] * d_p)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False, gc=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.gc = gc
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
p.data.mul_(1 - group['lr'] * group['weight_decay'])
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if self.gc:
if len(list(grad.size()))>=2:
grad.add_(-grad.mean(dim = tuple(range(1,len(list(grad.size())))), keepdim = True))
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), N_sma_threshhold=4, eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.N_sma_threshhold = N_sma_threshhold
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'] * p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
else:
p_data_fp32.add_(-step_size * exp_avg)
p.data.copy_(p_data_fp32)
return loss
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), N_sma_threshhold=4, eps=1e-8, weight_decay=0, gc=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.N_sma_threshhold = N_sma_threshhold
self.buffer = [[None, None, None] for ind in range(10)]
self.gc = gc
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
if self.gc:
if len(list(grad.size()))>=2:
grad.add_(-grad.mean(dim = tuple(range(1,len(list(grad.size())))), keepdim = True))
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
radam_step = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'] * p_data_fp32)
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
radam_norm = p_data_fp32.pow(2).sum().sqrt()
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
# more conservative since it's an approximated value
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-radam_step * trust_ratio)
else:
p_data_fp32.add_(exp_avg, alpha=-radam_step * trust_ratio)
p.data.copy_(p_data_fp32)
return loss
class AdaMod(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), beta3=0.999,
eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= beta3 < 1.0:
raise ValueError("Invalid beta3 parameter: {}".format(beta3))
defaults = dict(lr=lr, betas=betas, beta3=beta3, eps=eps,
weight_decay=weight_decay)
super(AdaMod, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaMod, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdaMod does not support sparse gradients')
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['exp_avg_lr'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq, exp_avg_lr = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_lr']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1) * grad)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'] * p.data)
step_size = torch.full_like(denom, step_size)
step_size.div_(denom)
exp_avg_lr.mul_(group['beta3']).add_((1 - group['beta3']) * step_size)
step_size = torch.min(step_size, exp_avg_lr)
step_size.mul_(exp_avg)
p.data.add_(-step_size)
return loss
class Novograd(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.95, 0.25), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(Novograd, self).__init__(params, defaults)
def __setstate__(self, state):
super(Novograd, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'] * p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_((1 - beta2) * norm)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(-group['lr'] * exp_avg)
return loss
| true | true |
1c36419bcef06a7f8db4b50d1abb0f833dc1d196 | 228 | py | Python | zairachem/estimators/from_manifolds/pipe.py | ersilia-os/ersilia-automl-chem | fabb1f05d17cff11ec0e084495eed4c0152f2f63 | [
"MIT"
] | null | null | null | zairachem/estimators/from_manifolds/pipe.py | ersilia-os/ersilia-automl-chem | fabb1f05d17cff11ec0e084495eed4c0152f2f63 | [
"MIT"
] | null | null | null | zairachem/estimators/from_manifolds/pipe.py | ersilia-os/ersilia-automl-chem | fabb1f05d17cff11ec0e084495eed4c0152f2f63 | [
"MIT"
] | null | null | null | from .estimate import Estimator
class ManifoldPipeline(object):
def __init__(self, path):
self.e = Estimator(path=path)
def run(self, time_budget_sec=None):
self.e.run(time_budget_sec=time_budget_sec)
| 22.8 | 51 | 0.710526 | from .estimate import Estimator
class ManifoldPipeline(object):
def __init__(self, path):
self.e = Estimator(path=path)
def run(self, time_budget_sec=None):
self.e.run(time_budget_sec=time_budget_sec)
| true | true |
1c3641dd85bad030ce179280d5fc28ecd8d70b9a | 4,610 | py | Python | scripts/run_model.py | davidhalladay/DSP-Auto-drawer-Generating-and-Modifying-Images-Continually-using-Knowledge-graph | 1610bbd567a5caba0478d8f7026f98766e6e39f8 | [
"Apache-2.0"
] | null | null | null | scripts/run_model.py | davidhalladay/DSP-Auto-drawer-Generating-and-Modifying-Images-Continually-using-Knowledge-graph | 1610bbd567a5caba0478d8f7026f98766e6e39f8 | [
"Apache-2.0"
] | 4 | 2021-03-19T10:11:14.000Z | 2022-03-12T00:11:24.000Z | scripts/run_model.py | davidhalladay/DSP-Auto-drawer-Generating-and-Modifying-Images-Continually-using-Knowledge-graph | 1610bbd567a5caba0478d8f7026f98766e6e39f8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, json, os
from imageio import imwrite
import torch
from sg2im.model import Sg2ImModel
from sg2im.data.utils import imagenet_deprocess_batch
import sg2im.vis as vis
import pickle
import pprint
from sklearn_crfsuite import CRF
from sklearn_crfsuite import metrics
import gensim
from gensim.models import Word2Vec
import nltk
from nltk import word_tokenize
from nltk.tag.util import untag
from nltk.tree import ParentedTree, Tree
from nltk.corpus import brown, movie_reviews, treebank
from sg2im.drawer import *
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='sg2im-models/vg128.pt')
parser.add_argument('--scene_graphs_json', default='scene_graphs/figure_6_sheep.json')
parser.add_argument('--crf_model_pretrained', default='./simple_crf/crf_model.pkl')
parser.add_argument('--coco_cls_txt', default='./datasets/coco_cls.txt')
parser.add_argument('--word2vec_sample', default='models/word2vec_sample/pruned.word2vec.txt')
parser.add_argument('--output_dir', default='outputs')
parser.add_argument('--draw_scene_graphs', type=int, default=0)
parser.add_argument('--device', default='gpu', choices=['cpu', 'gpu'])
def main(args):
if not os.path.isfile(args.checkpoint):
print('ERROR: Checkpoint file "%s" not found' % args.checkpoint)
print('Maybe you forgot to download pretraind models? Try running:')
print('bash scripts/download_models.sh')
return
if not os.path.isdir(args.output_dir):
print('Output directory "%s" does not exist; creating it' % args.output_dir)
os.makedirs(args.output_dir)
if args.device == 'cpu':
device = torch.device('cpu')
elif args.device == 'gpu':
device = torch.device('cuda:0')
if not torch.cuda.is_available():
print('WARNING: CUDA not available; falling back to CPU')
device = torch.device('cpu')
# Load the model, with a bit of care in case there are no GPUs
map_location = 'cpu' if device == torch.device('cpu') else None
checkpoint = torch.load(args.checkpoint, map_location=map_location)
model = Sg2ImModel(**checkpoint['model_kwargs'])
model.load_state_dict(checkpoint['model_state'])
model.eval()
model.to(device)
# Load the scene graphs
# with open(args.scene_graphs_json, 'r') as f:
# scene_graphs = json.load(f)
crf_model_path = args.crf_model_pretrained
crf_model = pickle.load(open(crf_model_path, 'rb'))
cate_list = load_cate(args.coco_cls_txt)
pos_lists, feat_x, feat_y, pca, clf, wn_model = construct_pos_list(args.word2vec_sample)
print("Start drawing something!")
count = 0
sg_list = [{'objects': [], 'relationships': []}]
while 1:
sentence = input("Please input a sentence: ")
# Run the model forward
# scene_graphs only with one graph
token_sentence = word_tokenize(sentence)
t = pos_tag(token_sentence, crf_model)
print(t)
so_list, p_list = spo_extractor(t, cate_list)
if len(so_list) != 2:
print("please make sure that input sentence contain 2 objects in coco_list.")
print("Only find ",so_list)
continue
so_list = so_extractor(so_list, cate_list)
p_list = p_extractor(p_list, pos_lists, feat_x, feat_y, pca, clf, wn_model)
scene_graphs = sg_constructor(so_list, p_list, sg_list)
print(sg_list)
with torch.no_grad():
imgs, boxes_pred, masks_pred, _ = model.forward_json(scene_graphs)
imgs = imagenet_deprocess_batch(imgs)
# Save the generated images
for i in range(imgs.shape[0]):
img_np = imgs[i].numpy().transpose(1, 2, 0)
img_path = os.path.join(args.output_dir, 'img%06d.png' % count)
imwrite(img_path, img_np)
# Draw the scene graphs
if args.draw_scene_graphs == 1:
for i, sg in enumerate(scene_graphs):
sg_img = vis.draw_scene_graph(sg['objects'], sg['relationships'])
sg_img_path = os.path.join(args.output_dir, 'sg%06d.png' % count)
imwrite(sg_img_path, sg_img)
count += 1
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 36.299213 | 94 | 0.723861 |
import argparse, json, os
from imageio import imwrite
import torch
from sg2im.model import Sg2ImModel
from sg2im.data.utils import imagenet_deprocess_batch
import sg2im.vis as vis
import pickle
import pprint
from sklearn_crfsuite import CRF
from sklearn_crfsuite import metrics
import gensim
from gensim.models import Word2Vec
import nltk
from nltk import word_tokenize
from nltk.tag.util import untag
from nltk.tree import ParentedTree, Tree
from nltk.corpus import brown, movie_reviews, treebank
from sg2im.drawer import *
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='sg2im-models/vg128.pt')
parser.add_argument('--scene_graphs_json', default='scene_graphs/figure_6_sheep.json')
parser.add_argument('--crf_model_pretrained', default='./simple_crf/crf_model.pkl')
parser.add_argument('--coco_cls_txt', default='./datasets/coco_cls.txt')
parser.add_argument('--word2vec_sample', default='models/word2vec_sample/pruned.word2vec.txt')
parser.add_argument('--output_dir', default='outputs')
parser.add_argument('--draw_scene_graphs', type=int, default=0)
parser.add_argument('--device', default='gpu', choices=['cpu', 'gpu'])
def main(args):
if not os.path.isfile(args.checkpoint):
print('ERROR: Checkpoint file "%s" not found' % args.checkpoint)
print('Maybe you forgot to download pretraind models? Try running:')
print('bash scripts/download_models.sh')
return
if not os.path.isdir(args.output_dir):
print('Output directory "%s" does not exist; creating it' % args.output_dir)
os.makedirs(args.output_dir)
if args.device == 'cpu':
device = torch.device('cpu')
elif args.device == 'gpu':
device = torch.device('cuda:0')
if not torch.cuda.is_available():
print('WARNING: CUDA not available; falling back to CPU')
device = torch.device('cpu')
map_location = 'cpu' if device == torch.device('cpu') else None
checkpoint = torch.load(args.checkpoint, map_location=map_location)
model = Sg2ImModel(**checkpoint['model_kwargs'])
model.load_state_dict(checkpoint['model_state'])
model.eval()
model.to(device)
crf_model_path = args.crf_model_pretrained
crf_model = pickle.load(open(crf_model_path, 'rb'))
cate_list = load_cate(args.coco_cls_txt)
pos_lists, feat_x, feat_y, pca, clf, wn_model = construct_pos_list(args.word2vec_sample)
print("Start drawing something!")
count = 0
sg_list = [{'objects': [], 'relationships': []}]
while 1:
sentence = input("Please input a sentence: ")
token_sentence = word_tokenize(sentence)
t = pos_tag(token_sentence, crf_model)
print(t)
so_list, p_list = spo_extractor(t, cate_list)
if len(so_list) != 2:
print("please make sure that input sentence contain 2 objects in coco_list.")
print("Only find ",so_list)
continue
so_list = so_extractor(so_list, cate_list)
p_list = p_extractor(p_list, pos_lists, feat_x, feat_y, pca, clf, wn_model)
scene_graphs = sg_constructor(so_list, p_list, sg_list)
print(sg_list)
with torch.no_grad():
imgs, boxes_pred, masks_pred, _ = model.forward_json(scene_graphs)
imgs = imagenet_deprocess_batch(imgs)
for i in range(imgs.shape[0]):
img_np = imgs[i].numpy().transpose(1, 2, 0)
img_path = os.path.join(args.output_dir, 'img%06d.png' % count)
imwrite(img_path, img_np)
if args.draw_scene_graphs == 1:
for i, sg in enumerate(scene_graphs):
sg_img = vis.draw_scene_graph(sg['objects'], sg['relationships'])
sg_img_path = os.path.join(args.output_dir, 'sg%06d.png' % count)
imwrite(sg_img_path, sg_img)
count += 1
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| true | true |
1c36429c5af7746ce881193eb02e651c5922a10c | 3,283 | py | Python | project/tools/topics.py | holdenweb/nbtools | cafe088735221f19320450e9f1eb62b83034a422 | [
"MIT"
] | null | null | null | project/tools/topics.py | holdenweb/nbtools | cafe088735221f19320450e9f1eb62b83034a422 | [
"MIT"
] | null | null | null | project/tools/topics.py | holdenweb/nbtools | cafe088735221f19320450e9f1eb62b83034a422 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# topics.py
#
# A program to correspond notebook titles to outline topics.
#
# # # edit, from the README:
"""This program reports on the state of the outline.
- tools/topics.py [word ...] lists all topics in the outline for
which there is a source notebook.
- tools/topics.py -a [word ...] lists all topics.
- tools/topics.py -u [word ...] lists topics for which there is
currently no source notebook.
- tools/topics.py -o lists orphaned notebooks, i.e. files for which there
is currently no corresponding topic in the outline."""
from glob import glob
import os
import sys
import lib
from lib import nullstrip, slugify, get_project_dir
# XXX Currently runs only from the project directory.
# I am inclined to leave it that way for now
# template_env = Environment(loader=FileSystemLoader("data/templates"))
# -t template would be nice, but this will do for now
# src_template = template_env.get_template("base.ipynb")
def matching(word, title_words):
# The empty word is always present
if not len(word):
return True
return any(word in tword for tword in title_words)
def get_topics():
topics = []
for line in nullstrip(open("outline.txt")):
line = line.rstrip().rstrip(" *")
sline = line.lstrip()
topics.append((sline, len(line)-len(sline)))
return topics
def topic_and_file(words, exists=True):
"""Takes a list of words returning the topics that match.
exists: True, file must exist
False, must not exist
None: don't care."""
search_words = [word.lower() for word in words]
topics = get_topics()
slugs = [slugify(topic[0].strip()) for topic in topics]
for (title, indent), slug in zip(topics, slugs):
title_words = set([word.lower() for word in title.split()])
if all(matching(word, title_words) for word in search_words):
if (exists is None) or (os.path.exists(
os.path.join("project", "nbsource", slug+".ipynb")) != exists):
print(" "*indent+title)
def orphaned_topic_files():
topics = get_topics()
slugs = [slugify(topic[0].strip()) for topic in topics]
filenames = glob(os.path.join("nbsource", "*.ipynb"))
file_slugs = [os.path.splitext(os.path.basename(f))[0] for f in filenames]
for slug in slugs:
comment = " ***" if slug not in file_slugs else ""
print(os.path.join("project", "nbsource", slug+'.ipynb'), comment)
if __name__ == "__main__":
# possible -d option for directory?
exists = orphaned = False
# XXX one switch only ... use proper arg parsing
if len(sys.argv) > 1 and sys.argv[1][0] == "-":
if sys.argv[1] == "-u":
exists = True
elif sys.argv[1] == "-o":
orphaned = True
elif sys.argv[1] == "-a":
exists = None
else:
import sys
sys.exit("topics.py [-o | -u | -a] [List of words]")
del sys.argv[1]
os.chdir(get_project_dir())
if orphaned:
orphaned_topic_files()
else:
topic_list = slugify(" ".join(sys.argv[1:])).split("-")
# if topic_list == [""]: # Annoying special case?
# topic_list = []
topic_and_file(topic_list, exists=exists) | 34.925532 | 79 | 0.625952 |
ort lib
from lib import nullstrip, slugify, get_project_dir
def matching(word, title_words):
if not len(word):
return True
return any(word in tword for tword in title_words)
def get_topics():
topics = []
for line in nullstrip(open("outline.txt")):
line = line.rstrip().rstrip(" *")
sline = line.lstrip()
topics.append((sline, len(line)-len(sline)))
return topics
def topic_and_file(words, exists=True):
search_words = [word.lower() for word in words]
topics = get_topics()
slugs = [slugify(topic[0].strip()) for topic in topics]
for (title, indent), slug in zip(topics, slugs):
title_words = set([word.lower() for word in title.split()])
if all(matching(word, title_words) for word in search_words):
if (exists is None) or (os.path.exists(
os.path.join("project", "nbsource", slug+".ipynb")) != exists):
print(" "*indent+title)
def orphaned_topic_files():
topics = get_topics()
slugs = [slugify(topic[0].strip()) for topic in topics]
filenames = glob(os.path.join("nbsource", "*.ipynb"))
file_slugs = [os.path.splitext(os.path.basename(f))[0] for f in filenames]
for slug in slugs:
comment = " ***" if slug not in file_slugs else ""
print(os.path.join("project", "nbsource", slug+'.ipynb'), comment)
if __name__ == "__main__":
exists = orphaned = False
if len(sys.argv) > 1 and sys.argv[1][0] == "-":
if sys.argv[1] == "-u":
exists = True
elif sys.argv[1] == "-o":
orphaned = True
elif sys.argv[1] == "-a":
exists = None
else:
import sys
sys.exit("topics.py [-o | -u | -a] [List of words]")
del sys.argv[1]
os.chdir(get_project_dir())
if orphaned:
orphaned_topic_files()
else:
topic_list = slugify(" ".join(sys.argv[1:])).split("-")
(topic_list, exists=exists) | true | true |
1c3643668c6ce97cfc07d9c375acb289e025e796 | 3,854 | py | Python | access-analyzer/iam-role-findings-resolution/iam_access_analyzer_findings_remediation/iam_access_analyzer_findings_remediation_stack.py | lulukelu/aws-iam-permissions-guardrails | cae485e3d8589c85f55c50c442ce47916345e00d | [
"Apache-2.0"
] | 88 | 2020-04-02T02:56:27.000Z | 2022-03-18T13:22:02.000Z | access-analyzer/iam-role-findings-resolution/iam_access_analyzer_findings_remediation/iam_access_analyzer_findings_remediation_stack.py | lulukelu/aws-iam-permissions-guardrails | cae485e3d8589c85f55c50c442ce47916345e00d | [
"Apache-2.0"
] | 45 | 2020-06-26T11:11:28.000Z | 2021-08-17T15:31:47.000Z | access-analyzer/iam-role-findings-resolution/iam_access_analyzer_findings_remediation/iam_access_analyzer_findings_remediation_stack.py | lulukelu/aws-iam-permissions-guardrails | cae485e3d8589c85f55c50c442ce47916345e00d | [
"Apache-2.0"
] | 32 | 2020-04-02T02:56:28.000Z | 2021-12-20T18:53:04.000Z |
import os, subprocess
from aws_cdk import (
core,
aws_accessanalyzer as accessanalyzer,
aws_iam as iam,
aws_events,
aws_events_targets,
aws_lambda,
aws_sns as sns,
aws_sns_subscriptions as subscriptions,
aws_kms as kms
)
class IamAccessAnalyzerFindingsRemediationStack(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# The code that defines your stack goes here
email_subscription_parameter=core.CfnParameter(self,
"NotificationEmail",
description="Email Address for Notification",
type="String"
)
email_subscription=email_subscription_parameter.value_as_string
boto3_lambda_layer=None
boto3_lambda_layer = self.create_dependencies_layer(
id="boto3layer", requirements_path="./layers/boto3/requirements.txt", output_dir="./layers/boto3"
)
cmk_key=kms.Key(
self,
"SNSEncryptionAtRestKey",
description="SNS Encryption at rest key",
alias="sns-encryption-at-rest",
enable_key_rotation=True,
)
email_topic=sns.Topic(
self,
"AccessAnalyzerNotificationTopic",
display_name="Access Analyzer Finding Notification Topic",
master_key=cmk_key
)
email_topic.add_subscription(subscriptions.EmailSubscription(email_subscription))
access_analyzer_event_bridge_event_handler=aws_lambda.Function(
self,
"access_analyzer_event_bridge_event_handler",
runtime=aws_lambda.Runtime.PYTHON_3_8,
handler="access_analyzer_event_bridge_target.lambda_handler",
code=aws_lambda.AssetCode("./functions/"),
environment={'SNS_TOPIC_ARN' : email_topic.topic_arn},
layers=[boto3_lambda_layer]
)
handler_statement = iam.PolicyStatement(
actions=[
"iam:GetRole",
"iam:UpdateAssumeRolePolicy"
],
effect=iam.Effect.ALLOW,
resources=["arn:aws:iam::{}:role/*".format(core.Stack.of(self).account)]
)
access_analyzer_event_bridge_event_handler.add_to_role_policy(handler_statement)
notification_statement=iam.PolicyStatement(
actions=[
"sns:Publish",
],
effect=iam.Effect.ALLOW,
resources=[email_topic.topic_arn]
)
access_analyzer_event_bridge_event_handler.add_to_role_policy(notification_statement)
cmk_key.grant_encrypt_decrypt(access_analyzer_event_bridge_event_handler)
access_analyzer_finding_rule=aws_events.Rule(
self,
"AccessAnalzyerFindingActiveEventRule",
description="Access Analyzer Finding Event Active",
enabled=True,
event_pattern=aws_events.EventPattern(
source=["aws.access-analyzer"],
detail_type=["Access Analyzer Finding"],
detail={"status":["ACTIVE"], "resourceType": [ "AWS::IAM:Role" ] }
),
targets=[
aws_events_targets.LambdaFunction(access_analyzer_event_bridge_event_handler)
]
)
#https://github.com/aws-samples/aws-cdk-examples/issues/130
def create_dependencies_layer(
self, id: str, requirements_path: str, output_dir: str
) -> aws_lambda.LayerVersion:
# Install requirements for layer
if not os.environ.get("SKIP_PIP"):
subprocess.check_call(
# Note: Pip will create the output dir if it does not exist
f"pip install -r {requirements_path} -t {output_dir}/python".split()
)
return aws_lambda.LayerVersion(
self, id, code=aws_lambda.Code.from_asset(output_dir)
) | 35.357798 | 109 | 0.649974 |
import os, subprocess
from aws_cdk import (
core,
aws_accessanalyzer as accessanalyzer,
aws_iam as iam,
aws_events,
aws_events_targets,
aws_lambda,
aws_sns as sns,
aws_sns_subscriptions as subscriptions,
aws_kms as kms
)
class IamAccessAnalyzerFindingsRemediationStack(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
email_subscription_parameter=core.CfnParameter(self,
"NotificationEmail",
description="Email Address for Notification",
type="String"
)
email_subscription=email_subscription_parameter.value_as_string
boto3_lambda_layer=None
boto3_lambda_layer = self.create_dependencies_layer(
id="boto3layer", requirements_path="./layers/boto3/requirements.txt", output_dir="./layers/boto3"
)
cmk_key=kms.Key(
self,
"SNSEncryptionAtRestKey",
description="SNS Encryption at rest key",
alias="sns-encryption-at-rest",
enable_key_rotation=True,
)
email_topic=sns.Topic(
self,
"AccessAnalyzerNotificationTopic",
display_name="Access Analyzer Finding Notification Topic",
master_key=cmk_key
)
email_topic.add_subscription(subscriptions.EmailSubscription(email_subscription))
access_analyzer_event_bridge_event_handler=aws_lambda.Function(
self,
"access_analyzer_event_bridge_event_handler",
runtime=aws_lambda.Runtime.PYTHON_3_8,
handler="access_analyzer_event_bridge_target.lambda_handler",
code=aws_lambda.AssetCode("./functions/"),
environment={'SNS_TOPIC_ARN' : email_topic.topic_arn},
layers=[boto3_lambda_layer]
)
handler_statement = iam.PolicyStatement(
actions=[
"iam:GetRole",
"iam:UpdateAssumeRolePolicy"
],
effect=iam.Effect.ALLOW,
resources=["arn:aws:iam::{}:role/*".format(core.Stack.of(self).account)]
)
access_analyzer_event_bridge_event_handler.add_to_role_policy(handler_statement)
notification_statement=iam.PolicyStatement(
actions=[
"sns:Publish",
],
effect=iam.Effect.ALLOW,
resources=[email_topic.topic_arn]
)
access_analyzer_event_bridge_event_handler.add_to_role_policy(notification_statement)
cmk_key.grant_encrypt_decrypt(access_analyzer_event_bridge_event_handler)
access_analyzer_finding_rule=aws_events.Rule(
self,
"AccessAnalzyerFindingActiveEventRule",
description="Access Analyzer Finding Event Active",
enabled=True,
event_pattern=aws_events.EventPattern(
source=["aws.access-analyzer"],
detail_type=["Access Analyzer Finding"],
detail={"status":["ACTIVE"], "resourceType": [ "AWS::IAM:Role" ] }
),
targets=[
aws_events_targets.LambdaFunction(access_analyzer_event_bridge_event_handler)
]
)
def create_dependencies_layer(
self, id: str, requirements_path: str, output_dir: str
) -> aws_lambda.LayerVersion:
if not os.environ.get("SKIP_PIP"):
subprocess.check_call(
f"pip install -r {requirements_path} -t {output_dir}/python".split()
)
return aws_lambda.LayerVersion(
self, id, code=aws_lambda.Code.from_asset(output_dir)
) | true | true |
1c3643cbb1c4a08d5011d00b1f81d89b65826645 | 2,598 | py | Python | src/dbconfig.py | chof747/dbversions | e5635b1bf23d2e6f0730d9822a1226b9ff737c2a | [
"BSD-3-Clause"
] | null | null | null | src/dbconfig.py | chof747/dbversions | e5635b1bf23d2e6f0730d9822a1226b9ff737c2a | [
"BSD-3-Clause"
] | null | null | null | src/dbconfig.py | chof747/dbversions | e5635b1bf23d2e6f0730d9822a1226b9ff737c2a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Created on 15. Sep. 2016
@author: chof
'''
from dbversions import Config, DbDump, DBConfig, parseEnvironments
from dbversions.gitanalyzer import ConflictingDBScripts
import getopt
import sys
def usage():
#***************************************************************************
pass
global environment
global cfg
global dbdumps
if __name__ == '__main__':
environment = None
projectpath = '.'
verbosity = 0
outputPath = '.'
newonly = False
try:
command = sys.argv[1]
optlist, args = getopt.getopt(sys.argv[2:], 'nvp:e:o:s:', ["projectpath=", "env=", "script=", "output="])
for option, value in optlist:
if option in ["-p", "--projectpath"]:
projectpath = value
elif option in ["-s", "--script"]:
script = value
elif option in ["-o", "--output"]:
outputPath = value
elif option in ['-e', '--env']:
environment = parseEnvironments(value)
elif option in ['-n']:
newonly = True
elif option in ['-v']:
verbosity = verbosity + 1
else:
assert False, "%s is an unhandled option" % (option)
except getopt.GetoptError as e:
print(e.msg)
usage()
cfg = Config(projectpath)
if environment == None:
environment = cfg.environments
else:
cfg.environments = environment
if verbosity > 0 :
cfg.setLoggingVerbosity(verbosity)
dbconfig = DBConfig(cfg)
dbdumps = DbDump(cfg)
if (command == 'snapshot'):
dbconfig.snapshot()
elif (command == 'restore'):
dbconfig.restore()
elif (command == 'switch'):
dbconfig.switch()
elif (command == 'checkout'):
dbconfig.checkout(newonly)
elif (command == 'list'):
dbconfig.list()
elif(command == 'build'):
dbconfig.build(outputPath)
elif (command == 'merge'):
main = cfg.getHeadOfBranch(args.pop(0))
topic = cfg.getHeadOfBranch(args.pop(0))
try:
dbconfig.merge(main, topic)
except ConflictingDBScripts as e:
cfg.logger.error("Conflicting DB Scripts:")
cfg.logger.error(e.pathA)
cfg.logger.error(e.pathB)
elif (command == 'execute'):
try:
dbconfig.execute(script)
except EnvironmentError as e:
cfg.logger.error(e)
pass | 25.98 | 113 | 0.52271 |
from dbversions import Config, DbDump, DBConfig, parseEnvironments
from dbversions.gitanalyzer import ConflictingDBScripts
import getopt
import sys
def usage():
pass
global environment
global cfg
global dbdumps
if __name__ == '__main__':
environment = None
projectpath = '.'
verbosity = 0
outputPath = '.'
newonly = False
try:
command = sys.argv[1]
optlist, args = getopt.getopt(sys.argv[2:], 'nvp:e:o:s:', ["projectpath=", "env=", "script=", "output="])
for option, value in optlist:
if option in ["-p", "--projectpath"]:
projectpath = value
elif option in ["-s", "--script"]:
script = value
elif option in ["-o", "--output"]:
outputPath = value
elif option in ['-e', '--env']:
environment = parseEnvironments(value)
elif option in ['-n']:
newonly = True
elif option in ['-v']:
verbosity = verbosity + 1
else:
assert False, "%s is an unhandled option" % (option)
except getopt.GetoptError as e:
print(e.msg)
usage()
cfg = Config(projectpath)
if environment == None:
environment = cfg.environments
else:
cfg.environments = environment
if verbosity > 0 :
cfg.setLoggingVerbosity(verbosity)
dbconfig = DBConfig(cfg)
dbdumps = DbDump(cfg)
if (command == 'snapshot'):
dbconfig.snapshot()
elif (command == 'restore'):
dbconfig.restore()
elif (command == 'switch'):
dbconfig.switch()
elif (command == 'checkout'):
dbconfig.checkout(newonly)
elif (command == 'list'):
dbconfig.list()
elif(command == 'build'):
dbconfig.build(outputPath)
elif (command == 'merge'):
main = cfg.getHeadOfBranch(args.pop(0))
topic = cfg.getHeadOfBranch(args.pop(0))
try:
dbconfig.merge(main, topic)
except ConflictingDBScripts as e:
cfg.logger.error("Conflicting DB Scripts:")
cfg.logger.error(e.pathA)
cfg.logger.error(e.pathB)
elif (command == 'execute'):
try:
dbconfig.execute(script)
except EnvironmentError as e:
cfg.logger.error(e)
pass | true | true |
1c3644ded2a9bb433761b81f3b933f36856450f3 | 4,461 | py | Python | setup.py | neillu23/espnet | 4e99009a5cf05b699ae9cb3cce445defe901438c | [
"Apache-2.0"
] | null | null | null | setup.py | neillu23/espnet | 4e99009a5cf05b699ae9cb3cce445defe901438c | [
"Apache-2.0"
] | null | null | null | setup.py | neillu23/espnet | 4e99009a5cf05b699ae9cb3cce445defe901438c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""ESPnet setup script."""
import os
from distutils.version import LooseVersion
from setuptools import find_packages
from setuptools import setup
requirements = {
"install": [
"setuptools>=38.5.1",
"configargparse>=1.2.1",
"typeguard>=2.7.0",
"humanfriendly",
"scipy>=1.4.1",
"filelock",
"librosa>=0.8.0",
"jamo==0.4.1", # For kss
"PyYAML>=5.1.2",
"soundfile>=0.10.2",
"h5py>=2.10.0",
"kaldiio>=2.17.0",
"torch>=1.3.0",
"torch_complex",
"nltk>=3.4.5",
# ASR
"sentencepiece",
"ctc-segmentation<1.8,>=1.6.6",
# TTS
"pyworld>=0.2.10",
"espnet_tts_frontend",
# ENH
"ci_sdr",
"asteroid",
"transformers",
"pytorch_wpe",
],
# train: The modules invoked when training only.
"train": [
"matplotlib==3.1.0",
"pillow>=6.1.0",
"editdistance==0.5.2",
"wandb",
"tensorboard>=1.14",
],
# recipe: The modules actually are not invoked in the main module of espnet,
# but are invoked for the python scripts in each recipe
"recipe": [
"espnet_model_zoo",
"gdown",
"resampy",
"pysptk>=0.1.17",
"morfessor", # for zeroth-korean
"youtube_dl", # for laborotv
"nnmnkwii",
"museval>=0.2.1",
"pystoi>=0.2.2",
"mir-eval>=0.6",
"fastdtw",
"nara_wpe>=0.0.5",
"sacrebleu>=1.5.1",
],
# all: The modules should be optionally installled due to some reason.
# Please consider moving them to "install" occasionally
# NOTE(kamo): The modules in "train" and "recipe" are appended into "all"
"all": [
# NOTE(kamo): Append modules requiring specific pytorch version or torch>1.3.0
"torchaudio",
"torch_optimizer",
"fairscale",
"fairseq",
"gtn",
],
"setup": ["numpy", "pytest-runner"],
"test": [
"pytest>=3.3.0",
"pytest-timeouts>=1.2.1",
"pytest-pythonpath>=0.7.3",
"pytest-cov>=2.7.1",
"hacking>=2.0.0",
"mock>=2.0.0",
"pycodestyle",
"jsondiff>=1.2.0",
"flake8>=3.7.8",
"flake8-docstrings>=1.3.1",
"black",
],
"doc": [
"Sphinx==2.1.2",
"sphinx-rtd-theme>=0.2.4",
"sphinx-argparse>=0.2.5",
"commonmark==0.8.1",
"recommonmark>=0.4.0",
"nbsphinx>=0.4.2",
"sphinx-markdown-tables>=0.0.12",
],
}
requirements["all"].extend(requirements["train"] + requirements["recipe"])
requirements["test"].extend(requirements["train"])
install_requires = requirements["install"]
setup_requires = requirements["setup"]
tests_require = requirements["test"]
extras_require = {
k: v for k, v in requirements.items() if k not in ["install", "setup"]
}
dirname = os.path.dirname(__file__)
version_file = os.path.join(dirname, "espnet", "version.txt")
with open(version_file, "r") as f:
version = f.read().strip()
setup(
name="espnet",
version=version,
url="http://github.com/espnet/espnet",
author="Shinji Watanabe",
author_email="shinjiw@ieee.org",
description="ESPnet: end-to-end speech processing toolkit",
long_description=open(os.path.join(dirname, "README.md"), encoding="utf-8").read(),
long_description_content_type="text/markdown",
license="Apache Software License",
packages=find_packages(include=["espnet*"]),
package_data={"espnet": ["version.txt"]},
# #448: "scripts" is inconvenient for developping because they are copied
# scripts=get_all_scripts('espnet/bin'),
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
python_requires=">=3.7.0",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 30.346939 | 87 | 0.573638 |
import os
from distutils.version import LooseVersion
from setuptools import find_packages
from setuptools import setup
requirements = {
"install": [
"setuptools>=38.5.1",
"configargparse>=1.2.1",
"typeguard>=2.7.0",
"humanfriendly",
"scipy>=1.4.1",
"filelock",
"librosa>=0.8.0",
"jamo==0.4.1",
"PyYAML>=5.1.2",
"soundfile>=0.10.2",
"h5py>=2.10.0",
"kaldiio>=2.17.0",
"torch>=1.3.0",
"torch_complex",
"nltk>=3.4.5",
"sentencepiece",
"ctc-segmentation<1.8,>=1.6.6",
"pyworld>=0.2.10",
"espnet_tts_frontend",
"ci_sdr",
"asteroid",
"transformers",
"pytorch_wpe",
],
"train": [
"matplotlib==3.1.0",
"pillow>=6.1.0",
"editdistance==0.5.2",
"wandb",
"tensorboard>=1.14",
],
"recipe": [
"espnet_model_zoo",
"gdown",
"resampy",
"pysptk>=0.1.17",
"morfessor",
"youtube_dl",
"nnmnkwii",
"museval>=0.2.1",
"pystoi>=0.2.2",
"mir-eval>=0.6",
"fastdtw",
"nara_wpe>=0.0.5",
"sacrebleu>=1.5.1",
],
"all": [
"torchaudio",
"torch_optimizer",
"fairscale",
"fairseq",
"gtn",
],
"setup": ["numpy", "pytest-runner"],
"test": [
"pytest>=3.3.0",
"pytest-timeouts>=1.2.1",
"pytest-pythonpath>=0.7.3",
"pytest-cov>=2.7.1",
"hacking>=2.0.0",
"mock>=2.0.0",
"pycodestyle",
"jsondiff>=1.2.0",
"flake8>=3.7.8",
"flake8-docstrings>=1.3.1",
"black",
],
"doc": [
"Sphinx==2.1.2",
"sphinx-rtd-theme>=0.2.4",
"sphinx-argparse>=0.2.5",
"commonmark==0.8.1",
"recommonmark>=0.4.0",
"nbsphinx>=0.4.2",
"sphinx-markdown-tables>=0.0.12",
],
}
requirements["all"].extend(requirements["train"] + requirements["recipe"])
requirements["test"].extend(requirements["train"])
install_requires = requirements["install"]
setup_requires = requirements["setup"]
tests_require = requirements["test"]
extras_require = {
k: v for k, v in requirements.items() if k not in ["install", "setup"]
}
dirname = os.path.dirname(__file__)
version_file = os.path.join(dirname, "espnet", "version.txt")
with open(version_file, "r") as f:
version = f.read().strip()
setup(
name="espnet",
version=version,
url="http://github.com/espnet/espnet",
author="Shinji Watanabe",
author_email="shinjiw@ieee.org",
description="ESPnet: end-to-end speech processing toolkit",
long_description=open(os.path.join(dirname, "README.md"), encoding="utf-8").read(),
long_description_content_type="text/markdown",
license="Apache Software License",
packages=find_packages(include=["espnet*"]),
package_data={"espnet": ["version.txt"]},
equires,
tests_require=tests_require,
extras_require=extras_require,
python_requires=">=3.7.0",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| true | true |
1c36453888a7b74020e4d6b7d693e00a430e4cf7 | 5,311 | py | Python | watchmen/report/builder/space_filter.py | Indexical-Metrics-Measure-Advisory/watchmen-data-processor | d50b93e92868500552416997707d71720487bd77 | [
"MIT"
] | null | null | null | watchmen/report/builder/space_filter.py | Indexical-Metrics-Measure-Advisory/watchmen-data-processor | d50b93e92868500552416997707d71720487bd77 | [
"MIT"
] | null | null | null | watchmen/report/builder/space_filter.py | Indexical-Metrics-Measure-Advisory/watchmen-data-processor | d50b93e92868500552416997707d71720487bd77 | [
"MIT"
] | null | null | null | from decimal import Decimal
from typing import List
from arrow import arrow
from pypika import Schema, Table, Field
from pypika.terms import Term, Criterion, LiteralValue
from watchmen.common.parameter import Parameter, ParameterJoint
from watchmen.common.utils.data_utils import build_collection_name
from watchmen.console_space.model.console_space import SubjectDataSetFilter
from watchmen.console_space.storage.console_space_storage import load_console_space_by_subject_id
from watchmen.database.datasource.storage.data_source_storage import load_data_source_by_id
from watchmen.pipeline.utils.units_func import get_factor
from watchmen.report.builder.dialects import PrestoQuery
from watchmen.report.builder.utils import build_table_by_topic_id
from watchmen.space.space import SpaceFilter
from watchmen.space.storage.space_storage import get_filters_by_id
from watchmen.topic.storage.topic_schema_storage import get_topic_by_id
def get_topic_sub_query_with_space_filter(console_subject, current_user):
console_space = load_console_space_by_subject_id(console_subject.subjectId, current_user)
filters: List[SpaceFilter] = get_filters_by_id(console_space.spaceId, current_user)
if filters is None:
filters = []
topic_sub_query = {}
for filter in filters:
if filter.enabled:
topic = get_topic_by_id(filter.topicId)
table = build_table_by_topic_id(filter.topicId)
sub_query = PrestoQuery. \
from_(table). \
select('*'). \
where(build_space_filter_where(filter.joint))
# where(table.tenant_id_ == current_user.tenantId). \
topic_sub_query[filter.topicId] = {"alias": topic.name, "query": sub_query}
def get_topic_sub_query_by_topic_id(topic_id):
return topic_sub_query.get(topic_id, None)
return get_topic_sub_query_by_topic_id
def build_space_filter_where(filter: ParameterJoint):
if filter.jointType:
if filter.jointType == "and":
return Criterion.all([build_space_filter_where(item) for item in filter.filters])
elif filter.jointType == "or":
return Criterion.any([build_space_filter_where(item) for item in filter.filters])
else:
return build_space_filter_criterion(filter)
def build_space_filter_criterion(filter: SubjectDataSetFilter) -> Criterion:
operator_ = filter.operator
left = parse_space_filter_parameter(filter.left)
right = parse_space_filter_parameter(filter.right)
lvalue = left["value"]
ltype = left["type"]
rvalue = right["value"]
if ltype == "number":
if operator_ == "in" or operator_ == "not-in":
right_value_list = rvalue.split(",")
right_value_trans_list = []
for value_ in right_value_list:
if value_.isdigit():
right_value_trans_list.append(Decimal(value_))
return build_space_filter_criterion_expression(operator_, lvalue, right_value_trans_list)
else:
right_trans_value = Decimal(rvalue)
return build_space_filter_criterion_expression(operator_, lvalue, right_trans_value)
if ltype == "date":
return build_space_filter_criterion_expression(operator_, lvalue, LiteralValue(
"DATE \'{0}\'".format(arrow.get(rvalue).format('YYYY-MM-DD'))))
elif ltype == "datetime":
return build_space_filter_criterion_expression(operator_, lvalue,
LiteralValue("timestamp \'{0}\'".format(
arrow.get(rvalue).format('YYYY-MM-DD HH:mm:ss'))))
else:
return build_space_filter_criterion_expression(operator_, lvalue, rvalue)
def build_space_filter_criterion_expression(operator_, left, right):
if operator_ == "equals":
return left.eq(right)
elif operator_ == "not-equals":
return left.ne(right)
elif operator_ == 'empty':
return left.isnull()
elif operator_ == 'not-empty':
return left.notnull()
elif operator_ == "more":
return left.gt(right)
elif operator_ == "more-equals":
return left.gte(right)
elif operator_ == "less":
return left.lt(right)
elif operator_ == "less-equals":
return left.lte(right)
elif operator_ == 'in':
return left.isin(right)
elif operator_ == 'not-in':
return left.notin(right)
else:
# TODO more operator support
raise NotImplementedError("filter operator is not supported")
def parse_space_filter_parameter(parameter: Parameter, ):
if parameter.kind == "topic":
topic = get_topic_by_id(parameter.topicId)
topic_col_name = build_collection_name(topic.name)
datasource = load_data_source_by_id(topic.dataSourceId)
catalog_name = datasource.dataSourceCode
schema_name = datasource.name
schema = Schema(schema_name, LiteralValue(catalog_name))
table = Table(topic_col_name, schema)
factor = get_factor(parameter.factorId, topic)
field = Field(factor.name, None, table)
return {"value": field, "type": factor.type}
elif parameter.kind == 'constant':
return {"value": Term.wrap_constant(parameter.value), "type": "text"}
| 42.488 | 101 | 0.695161 | from decimal import Decimal
from typing import List
from arrow import arrow
from pypika import Schema, Table, Field
from pypika.terms import Term, Criterion, LiteralValue
from watchmen.common.parameter import Parameter, ParameterJoint
from watchmen.common.utils.data_utils import build_collection_name
from watchmen.console_space.model.console_space import SubjectDataSetFilter
from watchmen.console_space.storage.console_space_storage import load_console_space_by_subject_id
from watchmen.database.datasource.storage.data_source_storage import load_data_source_by_id
from watchmen.pipeline.utils.units_func import get_factor
from watchmen.report.builder.dialects import PrestoQuery
from watchmen.report.builder.utils import build_table_by_topic_id
from watchmen.space.space import SpaceFilter
from watchmen.space.storage.space_storage import get_filters_by_id
from watchmen.topic.storage.topic_schema_storage import get_topic_by_id
def get_topic_sub_query_with_space_filter(console_subject, current_user):
console_space = load_console_space_by_subject_id(console_subject.subjectId, current_user)
filters: List[SpaceFilter] = get_filters_by_id(console_space.spaceId, current_user)
if filters is None:
filters = []
topic_sub_query = {}
for filter in filters:
if filter.enabled:
topic = get_topic_by_id(filter.topicId)
table = build_table_by_topic_id(filter.topicId)
sub_query = PrestoQuery. \
from_(table). \
select('*'). \
where(build_space_filter_where(filter.joint))
topic_sub_query[filter.topicId] = {"alias": topic.name, "query": sub_query}
def get_topic_sub_query_by_topic_id(topic_id):
return topic_sub_query.get(topic_id, None)
return get_topic_sub_query_by_topic_id
def build_space_filter_where(filter: ParameterJoint):
if filter.jointType:
if filter.jointType == "and":
return Criterion.all([build_space_filter_where(item) for item in filter.filters])
elif filter.jointType == "or":
return Criterion.any([build_space_filter_where(item) for item in filter.filters])
else:
return build_space_filter_criterion(filter)
def build_space_filter_criterion(filter: SubjectDataSetFilter) -> Criterion:
operator_ = filter.operator
left = parse_space_filter_parameter(filter.left)
right = parse_space_filter_parameter(filter.right)
lvalue = left["value"]
ltype = left["type"]
rvalue = right["value"]
if ltype == "number":
if operator_ == "in" or operator_ == "not-in":
right_value_list = rvalue.split(",")
right_value_trans_list = []
for value_ in right_value_list:
if value_.isdigit():
right_value_trans_list.append(Decimal(value_))
return build_space_filter_criterion_expression(operator_, lvalue, right_value_trans_list)
else:
right_trans_value = Decimal(rvalue)
return build_space_filter_criterion_expression(operator_, lvalue, right_trans_value)
if ltype == "date":
return build_space_filter_criterion_expression(operator_, lvalue, LiteralValue(
"DATE \'{0}\'".format(arrow.get(rvalue).format('YYYY-MM-DD'))))
elif ltype == "datetime":
return build_space_filter_criterion_expression(operator_, lvalue,
LiteralValue("timestamp \'{0}\'".format(
arrow.get(rvalue).format('YYYY-MM-DD HH:mm:ss'))))
else:
return build_space_filter_criterion_expression(operator_, lvalue, rvalue)
def build_space_filter_criterion_expression(operator_, left, right):
if operator_ == "equals":
return left.eq(right)
elif operator_ == "not-equals":
return left.ne(right)
elif operator_ == 'empty':
return left.isnull()
elif operator_ == 'not-empty':
return left.notnull()
elif operator_ == "more":
return left.gt(right)
elif operator_ == "more-equals":
return left.gte(right)
elif operator_ == "less":
return left.lt(right)
elif operator_ == "less-equals":
return left.lte(right)
elif operator_ == 'in':
return left.isin(right)
elif operator_ == 'not-in':
return left.notin(right)
else:
raise NotImplementedError("filter operator is not supported")
def parse_space_filter_parameter(parameter: Parameter, ):
if parameter.kind == "topic":
topic = get_topic_by_id(parameter.topicId)
topic_col_name = build_collection_name(topic.name)
datasource = load_data_source_by_id(topic.dataSourceId)
catalog_name = datasource.dataSourceCode
schema_name = datasource.name
schema = Schema(schema_name, LiteralValue(catalog_name))
table = Table(topic_col_name, schema)
factor = get_factor(parameter.factorId, topic)
field = Field(factor.name, None, table)
return {"value": field, "type": factor.type}
elif parameter.kind == 'constant':
return {"value": Term.wrap_constant(parameter.value), "type": "text"}
| true | true |
1c364644e7015d895b1bddc6ef81f5f8810fc610 | 99 | py | Python | sample_problems/problems_with_solution98.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | sample_problems/problems_with_solution98.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | sample_problems/problems_with_solution98.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | # Write a Python program to get the system time.
import time
print()
print(time.ctime())
print()
| 14.142857 | 49 | 0.717172 |
import time
print()
print(time.ctime())
print()
| true | true |
1c3647cc6c38c0850bbc8da9cb7d392b6885fea7 | 2,438 | py | Python | urlfilters.py | Aquei/ImgCrawler | d30acd1d30e868e3885aae35aa24efc47f416c04 | [
"Apache-2.0"
] | null | null | null | urlfilters.py | Aquei/ImgCrawler | d30acd1d30e868e3885aae35aa24efc47f416c04 | [
"Apache-2.0"
] | null | null | null | urlfilters.py | Aquei/ImgCrawler | d30acd1d30e868e3885aae35aa24efc47f416c04 | [
"Apache-2.0"
] | null | null | null | import utl
import re
from urllib.parse import urlparse, urljoin
def load_blacklist(path=None):
"""ブラックリストをタプルで返す関数を返す"""
blacklist = []
def get_blacklist():
nonlocal blacklist
if blacklist:
return blacklist
file_path = path
if file_path is None:
file_path = utl.default_config_path('blacklist.txt')
f = utl.load_file_fallback(file_path)
blacklist = utl.txt2tuple(f)
return blacklist
return get_blacklist
def load_ext(path=None):
ext = []
def get_ext():
nonlocal ext
path2 = path
#二回目以降はキャッシュを返す
if ext:
return ext
if path is None:
path2 = utl.default_config_path('ext.txt')
ext_list = []
f = utl.load_file_fallback(path2)
ext = utl.txt2tuple(f)
return ext
return get_ext
def get_scheme(url):
scheme = urlparse(url).scheme
if scheme:
return scheme
def pre_proc(url):
"""特定のURLパターンでダウンロードするためのURLに変更する"""
url = url.strip()
scheme = get_scheme(url)
#imgurのページ
result = re.match('^https?://imgur\.com/(?P<imgur_id>[a-zA-Z0-9]+)$', url)
if result:
imgur_id = result.group('imgur_id')
return scheme + '://imgur.com/download/' + imgur_id + '/'
#imgurのアルバムページ
result = re.match('^https?://imgur\.com/a/(?P<imgur_id>[a-zA-Z0-9]+)$', url)
if result:
imgur_id = result.group('imgur_id')
return scheme + '://s.imgur.com/a/' + imgur_id + '/zip'
#livedoorブログ
result = re.match('https?://livedoor\.blogimg\.jp/.*/(?P<file_name>[a-z0-9]+)-s\.(?P<ext>.{3,4})$', url)
if result:
file_name = result.group('file_name')
ext = result.group('ext')
filename = file_name + '.' + ext
return urljoin(url, filename)
blacklist = load_blacklist()()
ext = load_ext()()
def url_filter(url, blacklist=blacklist, ext=ext):
url = url.strip()
scheme = get_scheme(url).lower()
if not 'http' in scheme:
print(url, 'is invalid url')
return
#ブラックリストチェック
for black in blacklist:
if black in url:
return
#最初にimgurなどのurlをチェックする
result = pre_proc(url)
if result:
return result
#指定された拡張子で終わるか調べる
file_path = urlparse(url).path
if file_path.endswith(ext):
return url
| 17.666667 | 108 | 0.575472 | import utl
import re
from urllib.parse import urlparse, urljoin
def load_blacklist(path=None):
blacklist = []
def get_blacklist():
nonlocal blacklist
if blacklist:
return blacklist
file_path = path
if file_path is None:
file_path = utl.default_config_path('blacklist.txt')
f = utl.load_file_fallback(file_path)
blacklist = utl.txt2tuple(f)
return blacklist
return get_blacklist
def load_ext(path=None):
ext = []
def get_ext():
nonlocal ext
path2 = path
if ext:
return ext
if path is None:
path2 = utl.default_config_path('ext.txt')
ext_list = []
f = utl.load_file_fallback(path2)
ext = utl.txt2tuple(f)
return ext
return get_ext
def get_scheme(url):
scheme = urlparse(url).scheme
if scheme:
return scheme
def pre_proc(url):
url = url.strip()
scheme = get_scheme(url)
result = re.match('^https?://imgur\.com/(?P<imgur_id>[a-zA-Z0-9]+)$', url)
if result:
imgur_id = result.group('imgur_id')
return scheme + '://imgur.com/download/' + imgur_id + '/'
result = re.match('^https?://imgur\.com/a/(?P<imgur_id>[a-zA-Z0-9]+)$', url)
if result:
imgur_id = result.group('imgur_id')
return scheme + '://s.imgur.com/a/' + imgur_id + '/zip'
result = re.match('https?://livedoor\.blogimg\.jp/.*/(?P<file_name>[a-z0-9]+)-s\.(?P<ext>.{3,4})$', url)
if result:
file_name = result.group('file_name')
ext = result.group('ext')
filename = file_name + '.' + ext
return urljoin(url, filename)
blacklist = load_blacklist()()
ext = load_ext()()
def url_filter(url, blacklist=blacklist, ext=ext):
url = url.strip()
scheme = get_scheme(url).lower()
if not 'http' in scheme:
print(url, 'is invalid url')
return
for black in blacklist:
if black in url:
return
result = pre_proc(url)
if result:
return result
file_path = urlparse(url).path
if file_path.endswith(ext):
return url
| true | true |
1c3648839456bc8d70c124a29bf01dc1e4524461 | 3,465 | py | Python | otter/test/indexer/test_poller.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 20 | 2015-02-11T16:32:07.000Z | 2019-11-12T03:27:54.000Z | otter/test/indexer/test_poller.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 1,145 | 2015-01-01T00:00:47.000Z | 2022-02-11T03:40:39.000Z | otter/test/indexer/test_poller.py | codebyravi/otter | d58077ba4af24a586ae0a0becaf6da96b716a597 | [
"Apache-2.0"
] | 29 | 2015-01-08T15:00:11.000Z | 2021-02-16T16:33:53.000Z | """
Tests for :mod:`otter.indexer.poller`
"""
import mock
from zope.interface import implements
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.defer import succeed
from twisted.internet.task import Cooperator
from twisted.web.iweb import IResponse
from twisted.web.client import Agent, ResponseDone
from twisted.web.http_headers import Headers
from twisted.application.internet import TimerService
from otter.indexer.poller import FeedPollerService
from otter.test.utils import fixture
class FakeResponse(object):
"""
A fake response implements the same interface a real
:class:`twisted.web.client.Response` implements.
"""
implements(IResponse)
version = ('HTTP', 1, 1)
def __init__(self, code, headers, body):
self.code = code
self.phrase = 'N/A'
self.headers = headers
self.length = len(body)
self._body = body
def deliverBody(self, protocol):
"""Methods that writes the body to the given protocol"""
protocol.dataReceived(self._body)
protocol.connectionLost(ResponseDone())
def feed_response(fixture_name):
"""
Load a fixture into the body of a fake response.
:return: ``Deferred`` that callbacks with the contents of said fixture
"""
return succeed(FakeResponse(
200,
Headers({}),
fixture(fixture_name)))
class FeedPollerServiceTests(SynchronousTestCase):
"""
Tests for :class:`otter.indexer.poller.FeedPollerService`
"""
def setUp(self):
"""
Create a FeedPollerService with a mock agent, TimerService,
and cooperator that do not use the real reactor
"""
self.handler = mock.Mock()
self.agent = mock.Mock(Agent)
self.timer = mock.Mock(TimerService)
self.cooperator = Cooperator(
scheduler=lambda x: x(),
started=True
)
self.poller = FeedPollerService(
self.agent, 'http://example.com/feed',
[self.handler],
TimerService=self.timer,
coiterate=self.cooperator.coiterate
)
self.poll = self.timer.mock_calls[0][1][1]
def test_startService(self):
"""
``startService`` calls the TimerService's ``startService``
"""
self.poller.startService()
self.timer.return_value.startService.assert_called_once_with()
def test_stopService(self):
"""
``stopService`` calls the TimerService's ``stopService``
"""
self.poller.stopService()
self.timer.return_value.stopService.assert_called_once_with()
def test_poll(self):
"""
During a polling interval, a request is made to the URL specified
to the constructor of the FeedPollerService, and the response from the
server is parsed into atom entries, which are then passed to the
handler.
"""
self.agent.request.return_value = feed_response('simple.atom')
self.poll()
self.agent.request.assert_called_once_with(
'GET',
'http://example.com/feed',
Headers({}),
None
)
self.handler.assert_called_once_with(mock.ANY)
entry = self.handler.mock_calls[0][1][0]
self.assertEqual(
entry.find('./{http://www.w3.org/2005/Atom}id').text,
'urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a'
)
| 27.943548 | 78 | 0.638384 |
import mock
from zope.interface import implements
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.defer import succeed
from twisted.internet.task import Cooperator
from twisted.web.iweb import IResponse
from twisted.web.client import Agent, ResponseDone
from twisted.web.http_headers import Headers
from twisted.application.internet import TimerService
from otter.indexer.poller import FeedPollerService
from otter.test.utils import fixture
class FakeResponse(object):
implements(IResponse)
version = ('HTTP', 1, 1)
def __init__(self, code, headers, body):
self.code = code
self.phrase = 'N/A'
self.headers = headers
self.length = len(body)
self._body = body
def deliverBody(self, protocol):
protocol.dataReceived(self._body)
protocol.connectionLost(ResponseDone())
def feed_response(fixture_name):
return succeed(FakeResponse(
200,
Headers({}),
fixture(fixture_name)))
class FeedPollerServiceTests(SynchronousTestCase):
def setUp(self):
self.handler = mock.Mock()
self.agent = mock.Mock(Agent)
self.timer = mock.Mock(TimerService)
self.cooperator = Cooperator(
scheduler=lambda x: x(),
started=True
)
self.poller = FeedPollerService(
self.agent, 'http://example.com/feed',
[self.handler],
TimerService=self.timer,
coiterate=self.cooperator.coiterate
)
self.poll = self.timer.mock_calls[0][1][1]
def test_startService(self):
self.poller.startService()
self.timer.return_value.startService.assert_called_once_with()
def test_stopService(self):
self.poller.stopService()
self.timer.return_value.stopService.assert_called_once_with()
def test_poll(self):
self.agent.request.return_value = feed_response('simple.atom')
self.poll()
self.agent.request.assert_called_once_with(
'GET',
'http://example.com/feed',
Headers({}),
None
)
self.handler.assert_called_once_with(mock.ANY)
entry = self.handler.mock_calls[0][1][0]
self.assertEqual(
entry.find('./{http://www.w3.org/2005/Atom}id').text,
'urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a'
)
| true | true |
1c3648c6c88c2f9e85435359ea2b4408c7ffda6a | 10,301 | py | Python | tests/hwsim/test_p2p_device.py | asriadi/hostap | 6a5f578cac559e25ce139ca9f27c6b998006cdec | [
"Unlicense"
] | null | null | null | tests/hwsim/test_p2p_device.py | asriadi/hostap | 6a5f578cac559e25ce139ca9f27c6b998006cdec | [
"Unlicense"
] | null | null | null | tests/hwsim/test_p2p_device.py | asriadi/hostap | 6a5f578cac559e25ce139ca9f27c6b998006cdec | [
"Unlicense"
] | null | null | null | # cfg80211 P2P Device
# Copyright (c) 2013-2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
from wpasupplicant import WpaSupplicant
from p2p_utils import *
from test_nfc_p2p import set_ip_addr_info, check_ip_addr, grpform_events
from hwsim import HWSimRadio
import hostapd
import hwsim_utils
def test_p2p_device_grpform(dev, apdev):
"""P2P group formation with driver using cfg80211 P2P Device"""
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=wpas, r_intent=0)
check_grpform_results(i_res, r_res)
wpas.dump_monitor()
remove_group(dev[0], wpas)
wpas.dump_monitor()
res = wpas.global_request("IFNAME=p2p-dev-" + iface + " STATUS-DRIVER")
lines = res.splitlines()
found = False
for l in lines:
try:
[name,value] = l.split('=', 1)
if name == "wdev_id":
found = True
break
except ValueError:
pass
if not found:
raise Exception("wdev_id not found")
def test_p2p_device_grpform2(dev, apdev):
"""P2P group formation with driver using cfg80211 P2P Device (reverse)"""
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
[i_res, r_res] = go_neg_pin_authorized(i_dev=wpas, i_intent=15,
r_dev=dev[0], r_intent=0)
check_grpform_results(i_res, r_res)
wpas.dump_monitor()
remove_group(wpas, dev[0])
wpas.dump_monitor()
def test_p2p_device_group_remove(dev, apdev):
"""P2P group removal via the P2P ctrl interface with driver using cfg80211 P2P Device"""
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=wpas, r_intent=0)
check_grpform_results(i_res, r_res)
# Issue the remove request on the interface which will be removed
p2p_iface_wpas = WpaSupplicant(ifname=r_res['ifname'])
res = p2p_iface_wpas.request("P2P_GROUP_REMOVE *")
if "OK" not in res:
raise Exception("Failed to remove P2P group")
ev = wpas.wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("Group removal event not received")
if not wpas.global_ping():
raise Exception("Could not ping global ctrl_iface after group removal")
def test_p2p_device_concurrent_scan(dev, apdev):
"""Concurrent P2P and station mode scans with driver using cfg80211 P2P Device"""
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
wpas.p2p_find()
time.sleep(0.1)
wpas.request("SCAN")
ev = wpas.wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=15)
if ev is None:
raise Exception("Station mode scan did not start")
def test_p2p_device_nfc_invite(dev, apdev):
"""P2P NFC invitiation with driver using cfg80211 P2P Device"""
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
set_ip_addr_info(dev[0])
logger.info("Start autonomous GO")
dev[0].p2p_start_go()
logger.info("Write NFC Tag on the P2P Client")
res = wpas.global_request("P2P_LISTEN")
if "FAIL" in res:
raise Exception("Failed to start Listen mode")
wpas.dump_monitor()
pw = wpas.global_request("WPS_NFC_TOKEN NDEF").rstrip()
if "FAIL" in pw:
raise Exception("Failed to generate password token")
res = wpas.global_request("P2P_SET nfc_tag 1").rstrip()
if "FAIL" in res:
raise Exception("Failed to enable NFC Tag for P2P static handover")
sel = wpas.global_request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip()
if "FAIL" in sel:
raise Exception("Failed to generate NFC connection handover select")
wpas.dump_monitor()
logger.info("Read NFC Tag on the GO to trigger invitation")
res = dev[0].global_request("WPS_NFC_TAG_READ " + sel)
if "FAIL" in res:
raise Exception("Failed to provide NFC tag contents to wpa_supplicant")
ev = wpas.wait_global_event(grpform_events, timeout=20)
if ev is None:
raise Exception("Joining the group timed out")
res = wpas.group_form_result(ev)
wpas.dump_monitor()
hwsim_utils.test_connectivity_p2p(dev[0], wpas)
check_ip_addr(res)
wpas.dump_monitor()
def test_p2p_device_misuses(dev, apdev):
"""cfg80211 P2P Device misuses"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
# Add a normal network profile to the P2P Device management only
# interface to verify that it does not get used.
id = int(wpas.global_request('IFNAME=p2p-dev-%s ADD_NETWORK' % iface).strip())
wpas.global_request('IFNAME=p2p-dev-%s SET_NETWORK %d ssid "open"' % (iface, id))
wpas.global_request('IFNAME=p2p-dev-%s SET_NETWORK %d key_mgmt NONE' % (iface, id))
wpas.global_request('IFNAME=p2p-dev-%s ENABLE_NETWORK %d' % (iface, id))
# Scan requests get ignored on p2p-dev
wpas.global_request('IFNAME=p2p-dev-%s SCAN' % iface)
dev[0].p2p_start_go(freq=2412)
addr = dev[0].p2p_interface_addr()
wpas.scan_for_bss(addr, freq=2412)
wpas.connect("open", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(wpas, hapd)
pin = wpas.wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
res = wpas.p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60,
social=True, freq=2412)
hwsim_utils.test_connectivity_p2p(dev[0], wpas)
# Optimize scan-after-disconnect
wpas.group_request("SET_NETWORK 0 scan_freq 2412")
dev[0].group_request("DISASSOCIATE " + wpas.p2p_interface_addr())
ev = wpas.wait_group_event(["CTRL-EVENT-DISCONNECT"])
if ev is None:
raise Exception("Did not see disconnect event on P2P group interface")
dev[0].remove_group()
ev = wpas.wait_group_event(["CTRL-EVENT-SCAN-STARTED"], timeout=5)
if ev is None:
raise Exception("Scan not started")
ev = wpas.wait_group_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=15)
if ev is None:
raise Exception("Scan not completed")
time.sleep(1)
hwsim_utils.test_connectivity(wpas, hapd)
ev = hapd.wait_event([ "AP-STA-DISCONNECTED" ], timeout=0.1)
if ev is not None:
raise Exception("Unexpected disconnection event received from hostapd")
ev = wpas.wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected disconnection event received from wpa_supplicant")
wpas.request("DISCONNECT")
wpas.wait_disconnected()
def test_p2p_device_incorrect_command_interface(dev, apdev):
"""cfg80211 P2P Device and P2P_* command on incorrect interface"""
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
dev[0].p2p_listen()
wpas.request('P2P_FIND type=social')
ev = wpas.wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Peer not found")
ev = wpas.wait_event(["P2P-DEVICE-FOUND"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected P2P-DEVICE-FOUND event on station interface")
wpas.dump_monitor()
pin = wpas.wps_read_pin()
dev[0].p2p_go_neg_auth(wpas.p2p_dev_addr(), pin, "enter", go_intent=14,
freq=2412)
wpas.request('P2P_STOP_FIND')
wpas.dump_monitor()
if "OK" not in wpas.request('P2P_CONNECT ' + dev[0].p2p_dev_addr() + ' ' + pin + ' display go_intent=1'):
raise Exception("P2P_CONNECT failed")
ev = wpas.wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
wpas.group_form_result(ev)
wpas.dump_monitor()
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out(2)")
dev[0].group_form_result(ev)
dev[0].remove_group()
wpas.wait_go_ending_session()
wpas.dump_monitor()
def test_p2p_device_incorrect_command_interface2(dev, apdev):
"""cfg80211 P2P Device and P2P_GROUP_ADD command on incorrect interface"""
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
if "OK" not in wpas.request('P2P_GROUP_ADD'):
raise Exception("P2P_GROUP_ADD failed")
ev = wpas.wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
res = wpas.group_form_result(ev)
wpas.dump_monitor()
logger.info("Group results: " + str(res))
wpas.remove_group()
if not res['ifname'].startswith('p2p-' + iface + '-'):
raise Exception("Unexpected group ifname: " + res['ifname'])
wpas.dump_monitor()
| 42.742739 | 113 | 0.637026 |
import logging
logger = logging.getLogger()
import time
from wpasupplicant import WpaSupplicant
from p2p_utils import *
from test_nfc_p2p import set_ip_addr_info, check_ip_addr, grpform_events
from hwsim import HWSimRadio
import hostapd
import hwsim_utils
def test_p2p_device_grpform(dev, apdev):
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=wpas, r_intent=0)
check_grpform_results(i_res, r_res)
wpas.dump_monitor()
remove_group(dev[0], wpas)
wpas.dump_monitor()
res = wpas.global_request("IFNAME=p2p-dev-" + iface + " STATUS-DRIVER")
lines = res.splitlines()
found = False
for l in lines:
try:
[name,value] = l.split('=', 1)
if name == "wdev_id":
found = True
break
except ValueError:
pass
if not found:
raise Exception("wdev_id not found")
def test_p2p_device_grpform2(dev, apdev):
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
[i_res, r_res] = go_neg_pin_authorized(i_dev=wpas, i_intent=15,
r_dev=dev[0], r_intent=0)
check_grpform_results(i_res, r_res)
wpas.dump_monitor()
remove_group(wpas, dev[0])
wpas.dump_monitor()
def test_p2p_device_group_remove(dev, apdev):
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=wpas, r_intent=0)
check_grpform_results(i_res, r_res)
p2p_iface_wpas = WpaSupplicant(ifname=r_res['ifname'])
res = p2p_iface_wpas.request("P2P_GROUP_REMOVE *")
if "OK" not in res:
raise Exception("Failed to remove P2P group")
ev = wpas.wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("Group removal event not received")
if not wpas.global_ping():
raise Exception("Could not ping global ctrl_iface after group removal")
def test_p2p_device_concurrent_scan(dev, apdev):
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
wpas.p2p_find()
time.sleep(0.1)
wpas.request("SCAN")
ev = wpas.wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=15)
if ev is None:
raise Exception("Station mode scan did not start")
def test_p2p_device_nfc_invite(dev, apdev):
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
set_ip_addr_info(dev[0])
logger.info("Start autonomous GO")
dev[0].p2p_start_go()
logger.info("Write NFC Tag on the P2P Client")
res = wpas.global_request("P2P_LISTEN")
if "FAIL" in res:
raise Exception("Failed to start Listen mode")
wpas.dump_monitor()
pw = wpas.global_request("WPS_NFC_TOKEN NDEF").rstrip()
if "FAIL" in pw:
raise Exception("Failed to generate password token")
res = wpas.global_request("P2P_SET nfc_tag 1").rstrip()
if "FAIL" in res:
raise Exception("Failed to enable NFC Tag for P2P static handover")
sel = wpas.global_request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip()
if "FAIL" in sel:
raise Exception("Failed to generate NFC connection handover select")
wpas.dump_monitor()
logger.info("Read NFC Tag on the GO to trigger invitation")
res = dev[0].global_request("WPS_NFC_TAG_READ " + sel)
if "FAIL" in res:
raise Exception("Failed to provide NFC tag contents to wpa_supplicant")
ev = wpas.wait_global_event(grpform_events, timeout=20)
if ev is None:
raise Exception("Joining the group timed out")
res = wpas.group_form_result(ev)
wpas.dump_monitor()
hwsim_utils.test_connectivity_p2p(dev[0], wpas)
check_ip_addr(res)
wpas.dump_monitor()
def test_p2p_device_misuses(dev, apdev):
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
id = int(wpas.global_request('IFNAME=p2p-dev-%s ADD_NETWORK' % iface).strip())
wpas.global_request('IFNAME=p2p-dev-%s SET_NETWORK %d ssid "open"' % (iface, id))
wpas.global_request('IFNAME=p2p-dev-%s SET_NETWORK %d key_mgmt NONE' % (iface, id))
wpas.global_request('IFNAME=p2p-dev-%s ENABLE_NETWORK %d' % (iface, id))
wpas.global_request('IFNAME=p2p-dev-%s SCAN' % iface)
dev[0].p2p_start_go(freq=2412)
addr = dev[0].p2p_interface_addr()
wpas.scan_for_bss(addr, freq=2412)
wpas.connect("open", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(wpas, hapd)
pin = wpas.wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
res = wpas.p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60,
social=True, freq=2412)
hwsim_utils.test_connectivity_p2p(dev[0], wpas)
wpas.group_request("SET_NETWORK 0 scan_freq 2412")
dev[0].group_request("DISASSOCIATE " + wpas.p2p_interface_addr())
ev = wpas.wait_group_event(["CTRL-EVENT-DISCONNECT"])
if ev is None:
raise Exception("Did not see disconnect event on P2P group interface")
dev[0].remove_group()
ev = wpas.wait_group_event(["CTRL-EVENT-SCAN-STARTED"], timeout=5)
if ev is None:
raise Exception("Scan not started")
ev = wpas.wait_group_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=15)
if ev is None:
raise Exception("Scan not completed")
time.sleep(1)
hwsim_utils.test_connectivity(wpas, hapd)
ev = hapd.wait_event([ "AP-STA-DISCONNECTED" ], timeout=0.1)
if ev is not None:
raise Exception("Unexpected disconnection event received from hostapd")
ev = wpas.wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected disconnection event received from wpa_supplicant")
wpas.request("DISCONNECT")
wpas.wait_disconnected()
def test_p2p_device_incorrect_command_interface(dev, apdev):
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
dev[0].p2p_listen()
wpas.request('P2P_FIND type=social')
ev = wpas.wait_global_event(["P2P-DEVICE-FOUND"], timeout=10)
if ev is None:
raise Exception("Peer not found")
ev = wpas.wait_event(["P2P-DEVICE-FOUND"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected P2P-DEVICE-FOUND event on station interface")
wpas.dump_monitor()
pin = wpas.wps_read_pin()
dev[0].p2p_go_neg_auth(wpas.p2p_dev_addr(), pin, "enter", go_intent=14,
freq=2412)
wpas.request('P2P_STOP_FIND')
wpas.dump_monitor()
if "OK" not in wpas.request('P2P_CONNECT ' + dev[0].p2p_dev_addr() + ' ' + pin + ' display go_intent=1'):
raise Exception("P2P_CONNECT failed")
ev = wpas.wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
wpas.group_form_result(ev)
wpas.dump_monitor()
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out(2)")
dev[0].group_form_result(ev)
dev[0].remove_group()
wpas.wait_go_ending_session()
wpas.dump_monitor()
def test_p2p_device_incorrect_command_interface2(dev, apdev):
with HWSimRadio(use_p2p_device=True) as (radio, iface):
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add(iface)
if "OK" not in wpas.request('P2P_GROUP_ADD'):
raise Exception("P2P_GROUP_ADD failed")
ev = wpas.wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
res = wpas.group_form_result(ev)
wpas.dump_monitor()
logger.info("Group results: " + str(res))
wpas.remove_group()
if not res['ifname'].startswith('p2p-' + iface + '-'):
raise Exception("Unexpected group ifname: " + res['ifname'])
wpas.dump_monitor()
| true | true |
1c3648ec3279a1f0e854684b559c8178aba6aede | 468 | py | Python | data/scripts/templates/object/tangible/shipcontrol/shared_shipcontrol_falcon.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/shipcontrol/shared_shipcontrol_falcon.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/shipcontrol/shared_shipcontrol_falcon.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/shipcontrol/shared_shipcontrol_falcon.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","shipcontrol_falcon")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.529412 | 78 | 0.739316 | true | true | |
1c36499a2f23ae8c2c7dad89df8911f1d10add28 | 1,203 | py | Python | packages/pyre/xml/AttributeDescriptor.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/xml/AttributeDescriptor.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/xml/AttributeDescriptor.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
from .Descriptor import Descriptor
class AttributeDescriptor(Descriptor):
"""
This class serves as the resting place for element metadata provided by the user during
DTD formation. It is used by DTD-derived metaclasses to decorate the handlers of the various
XML elements
This capability is not yet fully developed.
"""
# attribute metadata
name = None # set by Named
# the attriibute type
# may be one of CDATA, ID, IDREFS, NMTOKEN, NMTOKENS, ENTITY, ENTITIES, NOTATION, XML
# or a tuple of valid choices; see pyre.xml.enumerated()
type = None
# attribute requirements:
# pyre.xml.IMPLIED:
# the attribute is optional
# pyre.xml.REQUIRED:
# the attribute is required
# signal an error if the element does not specify a value
# pyre.xml.FIXED:
# the default value is the only possible value for the attribute
# signal an error if the document contains anything else
presence = None
# the default value
default = None # the default value for the attribute
# end of file
| 26.152174 | 96 | 0.67581 |
from .Descriptor import Descriptor
class AttributeDescriptor(Descriptor):
name = None
type = None
presence = None
default = None
| true | true |
1c364a1886e096911462c29d931172fd9f994844 | 4,677 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20200701/get_virtual_router_peering.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20200701/get_virtual_router_peering.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20200701/get_virtual_router_peering.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetVirtualRouterPeeringResult',
'AwaitableGetVirtualRouterPeeringResult',
'get_virtual_router_peering',
]
@pulumi.output_type
class GetVirtualRouterPeeringResult:
"""
Virtual Router Peering resource.
"""
def __init__(__self__, etag=None, name=None, peer_asn=None, peer_ip=None, provisioning_state=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peer_asn and not isinstance(peer_asn, int):
raise TypeError("Expected argument 'peer_asn' to be a int")
pulumi.set(__self__, "peer_asn", peer_asn)
if peer_ip and not isinstance(peer_ip, str):
raise TypeError("Expected argument 'peer_ip' to be a str")
pulumi.set(__self__, "peer_ip", peer_ip)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the virtual router peering that is unique within a virtual router.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerAsn")
def peer_asn(self) -> Optional[int]:
"""
Peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peerIp")
def peer_ip(self) -> Optional[str]:
"""
Peer IP.
"""
return pulumi.get(self, "peer_ip")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Peering type.
"""
return pulumi.get(self, "type")
class AwaitableGetVirtualRouterPeeringResult(GetVirtualRouterPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualRouterPeeringResult(
etag=self.etag,
name=self.name,
peer_asn=self.peer_asn,
peer_ip=self.peer_ip,
provisioning_state=self.provisioning_state,
type=self.type)
def get_virtual_router_peering(peering_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_router_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualRouterPeeringResult:
"""
Use this data source to access information about an existing resource.
:param str peering_name: The name of the Virtual Router Peering.
:param str resource_group_name: The name of the resource group.
:param str virtual_router_name: The name of the Virtual Router.
"""
__args__ = dict()
__args__['peeringName'] = peering_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualRouterName'] = virtual_router_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200701:getVirtualRouterPeering', __args__, opts=opts, typ=GetVirtualRouterPeeringResult).value
return AwaitableGetVirtualRouterPeeringResult(
etag=__ret__.etag,
name=__ret__.name,
peer_asn=__ret__.peer_asn,
peer_ip=__ret__.peer_ip,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 35.165414 | 156 | 0.647424 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetVirtualRouterPeeringResult',
'AwaitableGetVirtualRouterPeeringResult',
'get_virtual_router_peering',
]
@pulumi.output_type
class GetVirtualRouterPeeringResult:
def __init__(__self__, etag=None, name=None, peer_asn=None, peer_ip=None, provisioning_state=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peer_asn and not isinstance(peer_asn, int):
raise TypeError("Expected argument 'peer_asn' to be a int")
pulumi.set(__self__, "peer_asn", peer_asn)
if peer_ip and not isinstance(peer_ip, str):
raise TypeError("Expected argument 'peer_ip' to be a str")
pulumi.set(__self__, "peer_ip", peer_ip)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerAsn")
def peer_asn(self) -> Optional[int]:
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peerIp")
def peer_ip(self) -> Optional[str]:
return pulumi.get(self, "peer_ip")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetVirtualRouterPeeringResult(GetVirtualRouterPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualRouterPeeringResult(
etag=self.etag,
name=self.name,
peer_asn=self.peer_asn,
peer_ip=self.peer_ip,
provisioning_state=self.provisioning_state,
type=self.type)
def get_virtual_router_peering(peering_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_router_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualRouterPeeringResult:
__args__ = dict()
__args__['peeringName'] = peering_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualRouterName'] = virtual_router_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200701:getVirtualRouterPeering', __args__, opts=opts, typ=GetVirtualRouterPeeringResult).value
return AwaitableGetVirtualRouterPeeringResult(
etag=__ret__.etag,
name=__ret__.name,
peer_asn=__ret__.peer_asn,
peer_ip=__ret__.peer_ip,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| true | true |
1c364a6388c8dc792e21bc5b830da7ebd076b86f | 659 | py | Python | var/spack/repos/builtin/packages/py-scinum/package.py | BenWibking/spack | 49b3b43a4a9375210b578635d9240875a5f3106b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/py-scinum/package.py | BenWibking/spack | 49b3b43a4a9375210b578635d9240875a5f3106b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/py-scinum/package.py | joequant/spack | e028ee0d5903045e1cdeb57550cbff61f2ffb2fa | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyScinum(PythonPackage):
"""Scientific numbers with multiple uncertainties and
correlation-aware, gaussian propagation and numpy"""
homepage = "https://github.com/riga/scinum"
pypi = "scinum/scinum-1.2.0.tar.gz"
version('1.2.0', sha256='31802d9b580f3a89c0876f34432851bc4def9cb2844d6f3c8e044480f2dd2f91')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| 32.95 | 95 | 0.728376 |
from spack import *
class PyScinum(PythonPackage):
homepage = "https://github.com/riga/scinum"
pypi = "scinum/scinum-1.2.0.tar.gz"
version('1.2.0', sha256='31802d9b580f3a89c0876f34432851bc4def9cb2844d6f3c8e044480f2dd2f91')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| true | true |
1c364b1d9d88e31d1351040522f94d18b6188909 | 1,755 | py | Python | p399_evaluate_division.py | feigaochn/leetcode | abf0877fae02aa9c2549051f0b68df0ace952512 | [
"MIT"
] | null | null | null | p399_evaluate_division.py | feigaochn/leetcode | abf0877fae02aa9c2549051f0b68df0ace952512 | [
"MIT"
] | null | null | null | p399_evaluate_division.py | feigaochn/leetcode | abf0877fae02aa9c2549051f0b68df0ace952512 | [
"MIT"
] | null | null | null | class Solution:
def calcEquation(self, equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
from collections import defaultdict
division = defaultdict(dict)
for (up, down), ratio in zip(equations, values):
division[up][down] = ratio
if abs(ratio) > 1e-3:
division[down][up] = 1.0 / ratio
result = []
for up, down in queries:
if up not in division or down not in division:
result.append(-1.0)
elif up in division and down in division[up]:
result.append(division[up][down])
else:
# bfs
frontier = set(division[up].keys())
while frontier:
mid = frontier.pop()
for mid2 in division[mid]:
if mid2 not in division[up]:
division[up][mid2] = division[up][mid] * division[
mid][mid2]
frontier.add(mid2)
if mid2 == down:
frontier.clear()
break
result.append(division[up][down]
if down in division[up] else -1.0)
return result
fn = Solution().calcEquation
print(
fn(equations=[["a", "b"], ["b", "c"]],
values=[2.0, 3.0],
queries=[["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"]]))
print(
fn([["a", "b"], ["c", "d"]], [1.0, 1.0],
[["a", "c"], ["b", "d"], ["b", "a"], ["d", "c"]]))
| 34.411765 | 78 | 0.426781 | class Solution:
def calcEquation(self, equations, values, queries):
from collections import defaultdict
division = defaultdict(dict)
for (up, down), ratio in zip(equations, values):
division[up][down] = ratio
if abs(ratio) > 1e-3:
division[down][up] = 1.0 / ratio
result = []
for up, down in queries:
if up not in division or down not in division:
result.append(-1.0)
elif up in division and down in division[up]:
result.append(division[up][down])
else:
frontier = set(division[up].keys())
while frontier:
mid = frontier.pop()
for mid2 in division[mid]:
if mid2 not in division[up]:
division[up][mid2] = division[up][mid] * division[
mid][mid2]
frontier.add(mid2)
if mid2 == down:
frontier.clear()
break
result.append(division[up][down]
if down in division[up] else -1.0)
return result
fn = Solution().calcEquation
print(
fn(equations=[["a", "b"], ["b", "c"]],
values=[2.0, 3.0],
queries=[["a", "c"], ["b", "a"], ["a", "e"], ["a", "a"], ["x", "x"]]))
print(
fn([["a", "b"], ["c", "d"]], [1.0, 1.0],
[["a", "c"], ["b", "d"], ["b", "a"], ["d", "c"]]))
| true | true |
1c364c02c70f8f02149d219d56573d7fd037e68c | 484 | py | Python | setup.py | openprocurement/robot_tests.broker.25h8 | 619ffd180a8f051ef46d62767d54f4796baa122c | [
"Apache-2.0"
] | null | null | null | setup.py | openprocurement/robot_tests.broker.25h8 | 619ffd180a8f051ef46d62767d54f4796baa122c | [
"Apache-2.0"
] | 1 | 2017-12-18T13:44:01.000Z | 2017-12-18T13:44:01.000Z | setup.py | openprocurement/robot_tests.broker.25h8 | 619ffd180a8f051ef46d62767d54f4796baa122c | [
"Apache-2.0"
] | 3 | 2018-06-11T10:30:05.000Z | 2019-08-07T07:55:40.000Z | from setuptools import setup
broker_name = '25h8'
pkg_name = 'robot_tests.broker.{}'.format(broker_name)
description = '{} broker for OpenProcurement Robot tests'.format(broker_name)
setup(name=pkg_name,
version='0.0.dev1',
description=description,
author='',
author_email='',
url='https://github.com/openprocurement/{}'.format(pkg_name),
packages=[pkg_name],
package_dir={pkg_name: '.'},
package_data={pkg_name: ['*.robot']}
)
| 28.470588 | 77 | 0.661157 | from setuptools import setup
broker_name = '25h8'
pkg_name = 'robot_tests.broker.{}'.format(broker_name)
description = '{} broker for OpenProcurement Robot tests'.format(broker_name)
setup(name=pkg_name,
version='0.0.dev1',
description=description,
author='',
author_email='',
url='https://github.com/openprocurement/{}'.format(pkg_name),
packages=[pkg_name],
package_dir={pkg_name: '.'},
package_data={pkg_name: ['*.robot']}
)
| true | true |
1c364e9bbe94cf32b79e21647cfb3f8d4c807887 | 836 | py | Python | glance/image_cache/cleaner.py | citrix-openstack-build/glance | 5048ceea989d93c8819d2dc6377803fc74620d14 | [
"Apache-2.0"
] | 1 | 2018-05-03T03:52:39.000Z | 2018-05-03T03:52:39.000Z | glance/image_cache/cleaner.py | citrix-openstack-build/glance | 5048ceea989d93c8819d2dc6377803fc74620d14 | [
"Apache-2.0"
] | null | null | null | glance/image_cache/cleaner.py | citrix-openstack-build/glance | 5048ceea989d93c8819d2dc6377803fc74620d14 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cleans up any invalid cache entries
"""
from glance.image_cache import base
class Cleaner(base.CacheApp):
def run(self):
self.cache.clean()
| 27.866667 | 78 | 0.7189 |
from glance.image_cache import base
class Cleaner(base.CacheApp):
def run(self):
self.cache.clean()
| true | true |
1c36506797ede50a7b230fbe77edfc0534f3d72f | 6,058 | py | Python | site/mysite/facerecognition/tools.py | ditastaszewski/C17705971-DT228-FYP | 5e4be0aad9b6d86180c01a3291952e3de0bec156 | [
"MIT"
] | null | null | null | site/mysite/facerecognition/tools.py | ditastaszewski/C17705971-DT228-FYP | 5e4be0aad9b6d86180c01a3291952e3de0bec156 | [
"MIT"
] | null | null | null | site/mysite/facerecognition/tools.py | ditastaszewski/C17705971-DT228-FYP | 5e4be0aad9b6d86180c01a3291952e3de0bec156 | [
"MIT"
] | null | null | null | import cv2
import matplotlib.pyplot as plot
from matplotlib import pyplot as plt
from matplotlib import image as image
import easygui
import numpy as np
import glob
import tensorflow as tf
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import preprocess_input
from keras import optimizers
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
import PIL
from PIL import ImageFont, ImageDraw, Image
from django.conf import settings
staticPath = settings.STATICFILES_DIRS[0]
faceLabels = ['face', 'non-face']
characterLabels = ['Flandre', 'Marisa', 'Reimu', 'Remilia', 'Sakuya']
def loadModel(modelPath, classes):
#ResNet50 model for the face classification
model = Sequential()
# 1st layer as the lumpsum weights from resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
# NOTE that this layer will be set below as NOT TRAINABLE, i.e., use it as is
#model.add(ResNet50(include_top = False, pooling = RESNET50_POOLING_AVERAGE, weights = weightsPath))
model.add(ResNet50(include_top = False, pooling = 'avg'))
# 2nd layer as Dense for 2-class classification
model.add(Dense(classes, activation = 'softmax'))
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgd, loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.load_weights(modelPath)
return model
def getFacePrediction(img, labels):
img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)
img = img.reshape((1,) + img.shape)
predictions = faceModel.predict(img, steps=1)
#print(predictions)
#print(verbosePredictions(predictions, labels))
if predictions[0][0] > predictions[0][1]:
return 1
else:
return 0
def getCharacterPrediction(img, labels):
img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)
img = img.reshape((1,) + img.shape)
predictions = touhouModel.predict(img, steps=1)
#print(predictions)
#print(verbosePredictions(predictions, labels))
highestPrediction = np.amax(predictions[0])
predictionPercentage = highestPrediction * 100
predictionIndex = np.argmax(predictions[0])
character = labels[predictionIndex]
return character, predictionPercentage
def verbosePredictions(predictions, labels):
predictString = ""
for i in range(0, len(predictions[0])):
predictString += "%s-%.2f%% " % (labels[i], predictions[0][i] * 100)
return predictString
def getFaces(inpath, outpath, classifyCharacters):
#Get the width and height
img = cv2.imread(inpath)
markedImg = img.copy()
height = img.shape[0]
width = img.shape[1]
potentialFaces = 0
actualFaces = 0
#rectangle width
rW = 2
faceCascade = cv2.CascadeClassifier(staticPath + "/code/lbpcascade_animeface.xml")
faces = faceCascade.detectMultiScale(img,
# detector options
scaleFactor = 1.01,
minNeighbors = 3,
minSize = (32, 32))
charactersFound = dict.fromkeys(characterLabels, 0)
for (x, y, w, h) in faces:
potentialFaces += 1
#cv2.rectangle(markedImg, (x,y), (x + w, y + h), (0,0,255), rW)
prediction = 0
#print(potentialFaces)
prediction = getFacePrediction(img[y:y+h, x:x+w], faceLabels)
#cv2.rectangle(markedImg, (lx,ly), (rx, ry), (255,0,0), rW)
if prediction == 1:
#print("detected")
outputImg = img.copy()
actualFaces += 1
#See which charcter it is if we are going to classify the characters
if classifyCharacters:
character, characterPrediction = getCharacterPrediction(outputImg[y:y+h, x:x+w], characterLabels)
resultString = "%s-%.2f%%" % (character, characterPrediction)
#Increment the counter for how many times the character was found in the image
charactersFound[character] += 1
fontSize = 40
font = ImageFont.truetype("arial.ttf", fontSize)
while font.getsize(resultString)[0] > w:
fontSize -= 1
font = ImageFont.truetype("arial.ttf", fontSize)
fW, fH = font.getsize(resultString)[0], font.getsize(resultString)[1]
markedImgHSV = cv2.cvtColor(markedImg, cv2.COLOR_BGR2HSV)
markedImgHSV[y+h-fH:y+h,x:x+w,2] = markedImgHSV[y+h-fH:y+h,x:x+w,2] * 0.5
markedImg = cv2.cvtColor(markedImgHSV, cv2.COLOR_HSV2BGR)
cv2.rectangle(markedImg, (x,y), (x+w, y+h), (255,255,255), rW, lineType=cv2.LINE_AA)
cv2.rectangle(markedImg, (x,y), (x+w, y+h), (0,0,0), rW - 1, lineType=cv2.LINE_AA)
tempImg = Image.fromarray(markedImg)
draw = ImageDraw.Draw(tempImg)
draw.text((x+rW, y+h-rW), resultString, font=font, anchor='lb')
#draw.text((x+rW+1, y+rW+1), str(potentialFaces), font=font, anchor='lt')
markedImg = np.asarray(tempImg)
else:
cv2.rectangle(markedImg, (x,y), (x+w, y+h), (0,255,0), rW * 2, lineType=cv2.LINE_AA)
#cv2.putText(markedImg, str(potentialFaces), (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
cv2.imwrite(outpath, markedImg)
return actualFaces, charactersFound
faceModel = loadModel(staticPath + "/code/faceModel.hdf5", 2)
touhouModel = loadModel(staticPath + "/code/touhouModel.hdf5", 5) | 38.100629 | 113 | 0.622483 | import cv2
import matplotlib.pyplot as plot
from matplotlib import pyplot as plt
from matplotlib import image as image
import easygui
import numpy as np
import glob
import tensorflow as tf
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import preprocess_input
from keras import optimizers
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
import PIL
from PIL import ImageFont, ImageDraw, Image
from django.conf import settings
staticPath = settings.STATICFILES_DIRS[0]
faceLabels = ['face', 'non-face']
characterLabels = ['Flandre', 'Marisa', 'Reimu', 'Remilia', 'Sakuya']
def loadModel(modelPath, classes):
model = Sequential()
model.add(ResNet50(include_top = False, pooling = 'avg'))
model.add(Dense(classes, activation = 'softmax'))
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgd, loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.load_weights(modelPath)
return model
def getFacePrediction(img, labels):
img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)
img = img.reshape((1,) + img.shape)
predictions = faceModel.predict(img, steps=1)
if predictions[0][0] > predictions[0][1]:
return 1
else:
return 0
def getCharacterPrediction(img, labels):
img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)
img = img.reshape((1,) + img.shape)
predictions = touhouModel.predict(img, steps=1)
highestPrediction = np.amax(predictions[0])
predictionPercentage = highestPrediction * 100
predictionIndex = np.argmax(predictions[0])
character = labels[predictionIndex]
return character, predictionPercentage
def verbosePredictions(predictions, labels):
predictString = ""
for i in range(0, len(predictions[0])):
predictString += "%s-%.2f%% " % (labels[i], predictions[0][i] * 100)
return predictString
def getFaces(inpath, outpath, classifyCharacters):
img = cv2.imread(inpath)
markedImg = img.copy()
height = img.shape[0]
width = img.shape[1]
potentialFaces = 0
actualFaces = 0
rW = 2
faceCascade = cv2.CascadeClassifier(staticPath + "/code/lbpcascade_animeface.xml")
faces = faceCascade.detectMultiScale(img,
scaleFactor = 1.01,
minNeighbors = 3,
minSize = (32, 32))
charactersFound = dict.fromkeys(characterLabels, 0)
for (x, y, w, h) in faces:
potentialFaces += 1
prediction = 0
prediction = getFacePrediction(img[y:y+h, x:x+w], faceLabels)
if prediction == 1:
outputImg = img.copy()
actualFaces += 1
if classifyCharacters:
character, characterPrediction = getCharacterPrediction(outputImg[y:y+h, x:x+w], characterLabels)
resultString = "%s-%.2f%%" % (character, characterPrediction)
charactersFound[character] += 1
fontSize = 40
font = ImageFont.truetype("arial.ttf", fontSize)
while font.getsize(resultString)[0] > w:
fontSize -= 1
font = ImageFont.truetype("arial.ttf", fontSize)
fW, fH = font.getsize(resultString)[0], font.getsize(resultString)[1]
markedImgHSV = cv2.cvtColor(markedImg, cv2.COLOR_BGR2HSV)
markedImgHSV[y+h-fH:y+h,x:x+w,2] = markedImgHSV[y+h-fH:y+h,x:x+w,2] * 0.5
markedImg = cv2.cvtColor(markedImgHSV, cv2.COLOR_HSV2BGR)
cv2.rectangle(markedImg, (x,y), (x+w, y+h), (255,255,255), rW, lineType=cv2.LINE_AA)
cv2.rectangle(markedImg, (x,y), (x+w, y+h), (0,0,0), rW - 1, lineType=cv2.LINE_AA)
tempImg = Image.fromarray(markedImg)
draw = ImageDraw.Draw(tempImg)
draw.text((x+rW, y+h-rW), resultString, font=font, anchor='lb')
markedImg = np.asarray(tempImg)
else:
cv2.rectangle(markedImg, (x,y), (x+w, y+h), (0,255,0), rW * 2, lineType=cv2.LINE_AA)
cv2.imwrite(outpath, markedImg)
return actualFaces, charactersFound
faceModel = loadModel(staticPath + "/code/faceModel.hdf5", 2)
touhouModel = loadModel(staticPath + "/code/touhouModel.hdf5", 5) | true | true |
1c3651fde848b19dc6ea7ba74c0eea8e504854f8 | 768 | py | Python | resources/_buggycode-peer-review/cs08-01-ykk/buggy.py | kyliehuch/w21 | 26f3e0af4dbed4e3b3e892c931dce57a3362a054 | [
"MIT"
] | null | null | null | resources/_buggycode-peer-review/cs08-01-ykk/buggy.py | kyliehuch/w21 | 26f3e0af4dbed4e3b3e892c931dce57a3362a054 | [
"MIT"
] | 16 | 2021-02-05T22:26:21.000Z | 2021-03-08T17:51:43.000Z | resources/_buggycode-peer-review/cs08-01-ykk/buggy.py | kyliehuch/w21 | 26f3e0af4dbed4e3b3e892c931dce57a3362a054 | [
"MIT"
] | 15 | 2021-02-04T17:16:50.000Z | 2021-02-19T17:50:14.000Z | ''' Code truncated to focus on the most important part '''
def exact_change(user_total):
nickels = 0
if user_total <= 0:
print('no change')
if user_total >= 5:
if (user_total//5) == 1:
nickles = user_total//5
user_total = user_total%5
print(nickles, 'nickel')
elif (user_total//5) > 1:
nickles = user_total//5
user_total = user_total%5
print(nickles, 'nickels')
if user_total >= 1:
if user_total == 1:
print(user_total, 'penny')
elif user_total > 1:
print(user_total, 'pennies')
return nickels
if __name__ == '__main__':
input_val = int(input())
''' Type your code here. '''
exact_change(input_val)
| 26.482759 | 58 | 0.55599 | def exact_change(user_total):
nickels = 0
if user_total <= 0:
print('no change')
if user_total >= 5:
if (user_total//5) == 1:
nickles = user_total//5
user_total = user_total%5
print(nickles, 'nickel')
elif (user_total//5) > 1:
nickles = user_total//5
user_total = user_total%5
print(nickles, 'nickels')
if user_total >= 1:
if user_total == 1:
print(user_total, 'penny')
elif user_total > 1:
print(user_total, 'pennies')
return nickels
if __name__ == '__main__':
input_val = int(input())
exact_change(input_val)
| true | true |
1c3652c55ecdecf5b744bdddc6db9d0876006d9d | 898 | py | Python | temboo/core/Library/Xively/Devices/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Xively/Devices/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Xively/Devices/__init__.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | from temboo.Library.Xively.Devices.ActivateDevice import ActivateDevice, ActivateDeviceInputSet, ActivateDeviceResultSet, ActivateDeviceChoreographyExecution
from temboo.Library.Xively.Devices.CreateDevice import CreateDevice, CreateDeviceInputSet, CreateDeviceResultSet, CreateDeviceChoreographyExecution
from temboo.Library.Xively.Devices.DeleteDevice import DeleteDevice, DeleteDeviceInputSet, DeleteDeviceResultSet, DeleteDeviceChoreographyExecution
from temboo.Library.Xively.Devices.ListAllDevices import ListAllDevices, ListAllDevicesInputSet, ListAllDevicesResultSet, ListAllDevicesChoreographyExecution
from temboo.Library.Xively.Devices.ReadDevice import ReadDevice, ReadDeviceInputSet, ReadDeviceResultSet, ReadDeviceChoreographyExecution
from temboo.Library.Xively.Devices.UpdateDevice import UpdateDevice, UpdateDeviceInputSet, UpdateDeviceResultSet, UpdateDeviceChoreographyExecution
| 128.285714 | 157 | 0.906459 | from temboo.Library.Xively.Devices.ActivateDevice import ActivateDevice, ActivateDeviceInputSet, ActivateDeviceResultSet, ActivateDeviceChoreographyExecution
from temboo.Library.Xively.Devices.CreateDevice import CreateDevice, CreateDeviceInputSet, CreateDeviceResultSet, CreateDeviceChoreographyExecution
from temboo.Library.Xively.Devices.DeleteDevice import DeleteDevice, DeleteDeviceInputSet, DeleteDeviceResultSet, DeleteDeviceChoreographyExecution
from temboo.Library.Xively.Devices.ListAllDevices import ListAllDevices, ListAllDevicesInputSet, ListAllDevicesResultSet, ListAllDevicesChoreographyExecution
from temboo.Library.Xively.Devices.ReadDevice import ReadDevice, ReadDeviceInputSet, ReadDeviceResultSet, ReadDeviceChoreographyExecution
from temboo.Library.Xively.Devices.UpdateDevice import UpdateDevice, UpdateDeviceInputSet, UpdateDeviceResultSet, UpdateDeviceChoreographyExecution
| true | true |
1c3652cae9464671978b45d983a6501876dc365f | 5,289 | py | Python | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/TestSuite_Main_Optimized.py | CStudios15/o3de | 9dc85000a3ec1a6c6633d718f5c455ab11a46818 | [
"Apache-2.0",
"MIT"
] | 8 | 2021-08-31T02:14:19.000Z | 2021-12-28T19:20:59.000Z | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/TestSuite_Main_Optimized.py | RoddieKieley/o3de | e804fd2a4241b039a42d9fa54eaae17dc94a7a92 | [
"Apache-2.0",
"MIT"
] | 8 | 2021-07-12T13:55:00.000Z | 2021-10-04T14:53:21.000Z | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/TestSuite_Main_Optimized.py | RoddieKieley/o3de | e804fd2a4241b039a42d9fa54eaae17dc94a7a92 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-16T05:06:18.000Z | 2021-09-16T05:06:18.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import pytest
import ly_test_tools.environment.file_system as file_system
import ly_test_tools._internal.pytest_plugin as internal_plugin
from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorParallelTest, EditorTestSuite
@pytest.mark.SUITE_periodic
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(EditorTestSuite):
class test_LandscapeCanvas_SlotConnections_UpdateComponentReferences(EditorSharedTest):
from .EditorScripts import SlotConnections_UpdateComponentReferences as test_module
class test_LandscapeCanvas_GradientMixer_NodeConstruction(EditorSharedTest):
from .EditorScripts import GradientMixer_NodeConstruction as test_module
class test_LandscapeCanvas_AreaNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import AreaNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_AreaNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_AreaNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_LayerExtenderNodes_ComponentEntitySync(EditorSharedTest):
from .EditorScripts import LayerExtenderNodes_ComponentEntitySync as test_module
class test_LandscapeCanvas_Edit_DisabledNodeDuplication(EditorSharedTest):
from .EditorScripts import Edit_DisabledNodeDuplication as test_module
class test_LandscapeCanvas_Edit_UndoNodeDelete_SliceEntity(EditorSharedTest):
from .EditorScripts import Edit_UndoNodeDelete_SliceEntity as test_module
class test_LandscapeCanvas_NewGraph_CreatedSuccessfully(EditorSharedTest):
from .EditorScripts import NewGraph_CreatedSuccessfully as test_module
class test_LandscapeCanvas_Component_AddedRemoved(EditorSharedTest):
from .EditorScripts import Component_AddedRemoved as test_module
class test_LandscapeCanvas_GraphClosed_OnLevelChange(EditorSharedTest):
from .EditorScripts import GraphClosed_OnLevelChange as test_module
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/2201")
class test_LandscapeCanvas_GraphClosed_OnEntityDelete(EditorSharedTest):
from .EditorScripts import GraphClosed_OnEntityDelete as test_module
class test_LandscapeCanvas_GraphClosed_TabbedGraphClosesIndependently(EditorSharedTest):
from .EditorScripts import GraphClosed_TabbedGraph as test_module
class test_LandscapeCanvas_Slice_CreateInstantiate(EditorSingleTest):
from .EditorScripts import Slice_CreateInstantiate as test_module
# Custom teardown to remove slice asset created during test
def teardown(self, request, workspace, editor, editor_test_results, launcher_platform):
file_system.delete([os.path.join(workspace.paths.engine_root(), "AutomatedTesting", "slices",
"TestSlice.slice")], True, True)
class test_LandscapeCanvas_GradientModifierNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientModifierNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_GradientNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import GradientNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_GradientNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityRemovedOnNodeDelete as test_module
@pytest.mark.skipif("debug" == os.path.basename(internal_plugin.build_directory),
reason="https://github.com/o3de/o3de/issues/4872")
class test_LandscapeCanvas_GraphUpdates_UpdateComponents(EditorSharedTest):
from .EditorScripts import GraphUpdates_UpdateComponents as test_module
class test_LandscapeCanvas_ComponentUpdates_UpdateGraph(EditorSharedTest):
from .EditorScripts import ComponentUpdates_UpdateGraph as test_module
class test_LandscapeCanvas_LayerBlender_NodeConstruction(EditorSharedTest):
from .EditorScripts import LayerBlender_NodeConstruction as test_module
class test_LandscapeCanvas_ShapeNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_ShapeNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityRemovedOnNodeDelete as test_module | 53.969388 | 114 | 0.825298 |
import os
import pytest
import ly_test_tools.environment.file_system as file_system
import ly_test_tools._internal.pytest_plugin as internal_plugin
from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorParallelTest, EditorTestSuite
@pytest.mark.SUITE_periodic
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(EditorTestSuite):
class test_LandscapeCanvas_SlotConnections_UpdateComponentReferences(EditorSharedTest):
from .EditorScripts import SlotConnections_UpdateComponentReferences as test_module
class test_LandscapeCanvas_GradientMixer_NodeConstruction(EditorSharedTest):
from .EditorScripts import GradientMixer_NodeConstruction as test_module
class test_LandscapeCanvas_AreaNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import AreaNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_AreaNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_AreaNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import AreaNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_LayerExtenderNodes_ComponentEntitySync(EditorSharedTest):
from .EditorScripts import LayerExtenderNodes_ComponentEntitySync as test_module
class test_LandscapeCanvas_Edit_DisabledNodeDuplication(EditorSharedTest):
from .EditorScripts import Edit_DisabledNodeDuplication as test_module
class test_LandscapeCanvas_Edit_UndoNodeDelete_SliceEntity(EditorSharedTest):
from .EditorScripts import Edit_UndoNodeDelete_SliceEntity as test_module
class test_LandscapeCanvas_NewGraph_CreatedSuccessfully(EditorSharedTest):
from .EditorScripts import NewGraph_CreatedSuccessfully as test_module
class test_LandscapeCanvas_Component_AddedRemoved(EditorSharedTest):
from .EditorScripts import Component_AddedRemoved as test_module
class test_LandscapeCanvas_GraphClosed_OnLevelChange(EditorSharedTest):
from .EditorScripts import GraphClosed_OnLevelChange as test_module
@pytest.mark.xfail(reason="https://github.com/o3de/o3de/issues/2201")
class test_LandscapeCanvas_GraphClosed_OnEntityDelete(EditorSharedTest):
from .EditorScripts import GraphClosed_OnEntityDelete as test_module
class test_LandscapeCanvas_GraphClosed_TabbedGraphClosesIndependently(EditorSharedTest):
from .EditorScripts import GraphClosed_TabbedGraph as test_module
class test_LandscapeCanvas_Slice_CreateInstantiate(EditorSingleTest):
from .EditorScripts import Slice_CreateInstantiate as test_module
def teardown(self, request, workspace, editor, editor_test_results, launcher_platform):
file_system.delete([os.path.join(workspace.paths.engine_root(), "AutomatedTesting", "slices",
"TestSlice.slice")], True, True)
class test_LandscapeCanvas_GradientModifierNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientModifierNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientModifierNodes_EntityRemovedOnNodeDelete as test_module
class test_LandscapeCanvas_GradientNodes_DependentComponentsAdded(EditorSharedTest):
from .EditorScripts import GradientNodes_DependentComponentsAdded as test_module
class test_LandscapeCanvas_GradientNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_GradientNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import GradientNodes_EntityRemovedOnNodeDelete as test_module
@pytest.mark.skipif("debug" == os.path.basename(internal_plugin.build_directory),
reason="https://github.com/o3de/o3de/issues/4872")
class test_LandscapeCanvas_GraphUpdates_UpdateComponents(EditorSharedTest):
from .EditorScripts import GraphUpdates_UpdateComponents as test_module
class test_LandscapeCanvas_ComponentUpdates_UpdateGraph(EditorSharedTest):
from .EditorScripts import ComponentUpdates_UpdateGraph as test_module
class test_LandscapeCanvas_LayerBlender_NodeConstruction(EditorSharedTest):
from .EditorScripts import LayerBlender_NodeConstruction as test_module
class test_LandscapeCanvas_ShapeNodes_EntityCreatedOnNodeAdd(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityCreatedOnNodeAdd as test_module
class test_LandscapeCanvas_ShapeNodes_EntityRemovedOnNodeDelete(EditorSharedTest):
from .EditorScripts import ShapeNodes_EntityRemovedOnNodeDelete as test_module | true | true |
1c3653f0edbfcb961f80eb43f0d07900b176a568 | 3,024 | py | Python | genomicpipeline/main.py | ManuelArcieri/GenomicPipeline | 3bf29341484e26edbebcc2c23ff42e2d35457a48 | [
"MIT"
] | null | null | null | genomicpipeline/main.py | ManuelArcieri/GenomicPipeline | 3bf29341484e26edbebcc2c23ff42e2d35457a48 | [
"MIT"
] | null | null | null | genomicpipeline/main.py | ManuelArcieri/GenomicPipeline | 3bf29341484e26edbebcc2c23ff42e2d35457a48 | [
"MIT"
] | null | null | null | import os.path
import subprocess
from pipeline import Pipeline
from sys import argv, stderr
VERSION = '0.1.12'
def main():
if len(argv) <= 1 or argv[1].lower() in ('help', '-h', '--help'):
print_help()
elif len(argv) == 3 and argv[1] == 'run': # gep run pipeline.toml
if os.path.isfile(argv[2]):
pipeline = Pipeline.load_from_toml_file(argv[2])
pipeline.run_entire_pipeline()
pipeline.save_to_toml_file(argv[2])
print(f'All jobs have been queued.\nType "gep status {argv[2]}" to display the current status of the jobs.')
else:
print(f'The provided file does not exist: {argv[2]}', file = stderr)
exit(2)
elif len(argv) == 3 and argv[1] == 'step': # gep step pipeline.toml
if os.path.isfile(argv[2]):
pipeline = Pipeline.load_from_toml_file(argv[2])
pipeline.run_next_step()
pipeline.save_to_toml_file(argv[2])
print(f'All jobs of the next step have been queued.\nType "gep status {argv[2]}" to display the current status of the jobs.')
else:
print(f'The provided file does not exist: {argv[2]}', file = stderr)
exit(2)
elif len(argv) == 3 and argv[1] == 'status': # gep status pipeline.toml
if os.path.isfile(argv[2]):
pipeline = Pipeline.load_from_toml_file(argv[2])
pipeline.update_jobs_status()
pipeline.save_to_toml_file(argv[2])
pipeline.print_jobs_table()
else:
print(f'The provided file does not exist: {argv[2]}', file = stderr)
exit(2)
elif len(argv) == 4 and argv[1] == 'requeue' and argv[2] == 'failed': # gep requeue failed pipeline.toml
if os.path.isfile(argv[3]):
pipeline = Pipeline.load_from_toml_file(argv[3])
pipeline.update_jobs_status()
pipeline.requeue_failed_jobs()
pipeline.update_jobs_status()
pipeline.save_to_toml_file(argv[3])
else:
print(f'The provided file does not exist: {argv[3]}', file = stderr)
exit(2)
elif len(argv) == 2 and argv[1] == 'upgrade': # gep upgrade
cmd = 'bash "$HOME"/GenomicPipeline/install.sh'
process = subprocess.Popen(cmd, shell = True)
process.wait()
else:
print_help()
def print_help():
print(f'Genomic Pipeline {VERSION}')
print('https://github.com/ManuelArcieri/GenomicPipeline')
print('\nUsage:\n')
print('- gep run [PIPELINE FILE]')
print(' Runs all the steps of the specified pipeline (.toml) file\n')
print('- gep step [PIPELINE FILE]')
print(' Runs a single step of the specified pipeline (.toml) file\n')
print('- gep status [PIPELINE FILE]')
print(' Prints the status of all jobs of the specified pipeline (.toml) file\n')
print('- gep upgrade')
print(' Upgrades Genomic Pipeline to its latest version\n')
if __name__ == '__main__':
main()
| 37.8 | 137 | 0.602513 | import os.path
import subprocess
from pipeline import Pipeline
from sys import argv, stderr
VERSION = '0.1.12'
def main():
if len(argv) <= 1 or argv[1].lower() in ('help', '-h', '--help'):
print_help()
elif len(argv) == 3 and argv[1] == 'run':
if os.path.isfile(argv[2]):
pipeline = Pipeline.load_from_toml_file(argv[2])
pipeline.run_entire_pipeline()
pipeline.save_to_toml_file(argv[2])
print(f'All jobs have been queued.\nType "gep status {argv[2]}" to display the current status of the jobs.')
else:
print(f'The provided file does not exist: {argv[2]}', file = stderr)
exit(2)
elif len(argv) == 3 and argv[1] == 'step':
if os.path.isfile(argv[2]):
pipeline = Pipeline.load_from_toml_file(argv[2])
pipeline.run_next_step()
pipeline.save_to_toml_file(argv[2])
print(f'All jobs of the next step have been queued.\nType "gep status {argv[2]}" to display the current status of the jobs.')
else:
print(f'The provided file does not exist: {argv[2]}', file = stderr)
exit(2)
elif len(argv) == 3 and argv[1] == 'status':
if os.path.isfile(argv[2]):
pipeline = Pipeline.load_from_toml_file(argv[2])
pipeline.update_jobs_status()
pipeline.save_to_toml_file(argv[2])
pipeline.print_jobs_table()
else:
print(f'The provided file does not exist: {argv[2]}', file = stderr)
exit(2)
elif len(argv) == 4 and argv[1] == 'requeue' and argv[2] == 'failed':
if os.path.isfile(argv[3]):
pipeline = Pipeline.load_from_toml_file(argv[3])
pipeline.update_jobs_status()
pipeline.requeue_failed_jobs()
pipeline.update_jobs_status()
pipeline.save_to_toml_file(argv[3])
else:
print(f'The provided file does not exist: {argv[3]}', file = stderr)
exit(2)
elif len(argv) == 2 and argv[1] == 'upgrade':
cmd = 'bash "$HOME"/GenomicPipeline/install.sh'
process = subprocess.Popen(cmd, shell = True)
process.wait()
else:
print_help()
def print_help():
print(f'Genomic Pipeline {VERSION}')
print('https://github.com/ManuelArcieri/GenomicPipeline')
print('\nUsage:\n')
print('- gep run [PIPELINE FILE]')
print(' Runs all the steps of the specified pipeline (.toml) file\n')
print('- gep step [PIPELINE FILE]')
print(' Runs a single step of the specified pipeline (.toml) file\n')
print('- gep status [PIPELINE FILE]')
print(' Prints the status of all jobs of the specified pipeline (.toml) file\n')
print('- gep upgrade')
print(' Upgrades Genomic Pipeline to its latest version\n')
if __name__ == '__main__':
main()
| true | true |
1c36540741d6bd3c7d4cefad1ccd0ce5ff628364 | 3,681 | py | Python | syloga/transform/basic.py | xaedes/python-symbolic-logic-to-gate | a0dc9be9e04290008cf709fac789d224ab8c14b0 | [
"MIT"
] | null | null | null | syloga/transform/basic.py | xaedes/python-symbolic-logic-to-gate | a0dc9be9e04290008cf709fac789d224ab8c14b0 | [
"MIT"
] | null | null | null | syloga/transform/basic.py | xaedes/python-symbolic-logic-to-gate | a0dc9be9e04290008cf709fac789d224ab8c14b0 | [
"MIT"
] | null | null | null |
from syloga.ast.containers import Tuple
from syloga.ast.core import Expression
from syloga.ast.core import BooleanValue
from syloga.ast.traversal import iter_args
from syloga.utils.functional import identity
from syloga.utils.predicates import is_mappable_collection
def to_expr(argument):
recurse = expr
result = lambda x:x
if type(argument) == bool:
return result(BooleanValue(argument))
elif type(argument) == tuple:
return result(Tuple(*argument))
return result(map_collection_or_pass(recurse, argument))
def map_args(func, expression):
return map(func, iter_args(expression))
def map_collection(func, expression):
return type(expression)(map_args(func, expression))
def map_collection_or_pass(func, expression):
if is_mappable_collection(expression):
return map_collection(func, expression)
else:
return expression
def map_expression_args(function, expression, recurse_collection=True):
if isinstance(expression, Expression):
#try:
return expression.func(*map(function, expression.args))
#except:
#print(expression)
#raise
elif recurse_collection:
return map_collection_or_pass(function, expression)
else:
return expression
def replace(expression, needle, replacement, pre_recurse=identity, post_recurse=identity, result=identity, *args, **kwargs):
def recurse(expr):
return post_recurse(
replace(
pre_recurse(expr),
needle,
*args,
replacement=replacement,
pre_recurse=pre_recurse,
post_recurse=post_recurse,
result=result,
**kwargs
)
)
if type(replacement) == type:
replacement = lambda expr, recurse: replacement(*map(recurse,expr.args))
if (type(needle) == type) and isinstance(expression, needle):
return result(replacement(expression, recurse))
elif (type(needle) != type) and callable(needle) and needle(expression):
return result(replacement(expression, recurse))
elif isinstance(expression, list):
return result(Tuple(*map(recurse,expression)))
else:
return result(map_expression_args(recurse, expression, recurse_collection=True))
def replace_with_context(expression, needle, replacement, context=None, new_context=None, pre_recurse=identity, post_recurse=identity, result=identity, *args, **kwargs):
if new_context is None: new_context = lambda x,c: x
def recurse(expr):
return post_recurse(
replace_with_context(
pre_recurse(expr),
needle,
*args,
context=new_context(expression, context),
replacement=replacement,
pre_recurse=pre_recurse,
post_recurse=post_recurse,
result=result,
**kwargs
)
)
if type(replacement) == type:
replacement = lambda expr, recurse, context: replacement(*map(recurse,expr.args))
if (type(needle) == type) and isinstance(expression, needle):
return result(replacement(expression, recurse, context))
elif (type(needle) != type) and callable(needle) and needle(expression, context):
return result(replacement(expression, recurse, context))
elif isinstance(expression, list):
return result(Tuple(*map(recurse,expression)))
else:
return result(map_expression_args(recurse, expression, recurse_collection=True))
| 33.770642 | 169 | 0.64602 |
from syloga.ast.containers import Tuple
from syloga.ast.core import Expression
from syloga.ast.core import BooleanValue
from syloga.ast.traversal import iter_args
from syloga.utils.functional import identity
from syloga.utils.predicates import is_mappable_collection
def to_expr(argument):
recurse = expr
result = lambda x:x
if type(argument) == bool:
return result(BooleanValue(argument))
elif type(argument) == tuple:
return result(Tuple(*argument))
return result(map_collection_or_pass(recurse, argument))
def map_args(func, expression):
return map(func, iter_args(expression))
def map_collection(func, expression):
return type(expression)(map_args(func, expression))
def map_collection_or_pass(func, expression):
if is_mappable_collection(expression):
return map_collection(func, expression)
else:
return expression
def map_expression_args(function, expression, recurse_collection=True):
if isinstance(expression, Expression):
return expression.func(*map(function, expression.args))
elif recurse_collection:
return map_collection_or_pass(function, expression)
else:
return expression
def replace(expression, needle, replacement, pre_recurse=identity, post_recurse=identity, result=identity, *args, **kwargs):
def recurse(expr):
return post_recurse(
replace(
pre_recurse(expr),
needle,
*args,
replacement=replacement,
pre_recurse=pre_recurse,
post_recurse=post_recurse,
result=result,
**kwargs
)
)
if type(replacement) == type:
replacement = lambda expr, recurse: replacement(*map(recurse,expr.args))
if (type(needle) == type) and isinstance(expression, needle):
return result(replacement(expression, recurse))
elif (type(needle) != type) and callable(needle) and needle(expression):
return result(replacement(expression, recurse))
elif isinstance(expression, list):
return result(Tuple(*map(recurse,expression)))
else:
return result(map_expression_args(recurse, expression, recurse_collection=True))
def replace_with_context(expression, needle, replacement, context=None, new_context=None, pre_recurse=identity, post_recurse=identity, result=identity, *args, **kwargs):
if new_context is None: new_context = lambda x,c: x
def recurse(expr):
return post_recurse(
replace_with_context(
pre_recurse(expr),
needle,
*args,
context=new_context(expression, context),
replacement=replacement,
pre_recurse=pre_recurse,
post_recurse=post_recurse,
result=result,
**kwargs
)
)
if type(replacement) == type:
replacement = lambda expr, recurse, context: replacement(*map(recurse,expr.args))
if (type(needle) == type) and isinstance(expression, needle):
return result(replacement(expression, recurse, context))
elif (type(needle) != type) and callable(needle) and needle(expression, context):
return result(replacement(expression, recurse, context))
elif isinstance(expression, list):
return result(Tuple(*map(recurse,expression)))
else:
return result(map_expression_args(recurse, expression, recurse_collection=True))
| true | true |
1c36542e427a102c4c0111ca410cff035926df71 | 162 | py | Python | plot_data.py | achilleas-k/brian-scripts | 4d2d8c9a53e7202b60c78716e8b1a9d521293c54 | [
"Apache-2.0"
] | null | null | null | plot_data.py | achilleas-k/brian-scripts | 4d2d8c9a53e7202b60c78716e8b1a9d521293c54 | [
"Apache-2.0"
] | null | null | null | plot_data.py | achilleas-k/brian-scripts | 4d2d8c9a53e7202b60c78716e8b1a9d521293c54 | [
"Apache-2.0"
] | null | null | null | from sys import argv
import pickle
import matplotlib.pyplot as plt
filename = argv[1]
(s,j,m) = pickle.load(open(filename,'r'))
plt.contourf(s,j,m)
plt.show()
| 14.727273 | 41 | 0.716049 | from sys import argv
import pickle
import matplotlib.pyplot as plt
filename = argv[1]
(s,j,m) = pickle.load(open(filename,'r'))
plt.contourf(s,j,m)
plt.show()
| true | true |
1c3655008cca7501ece7e023b88f9937a5b0209b | 1,546 | py | Python | xldiff/cli.py | zondo/pyxldiff | ed5f04d50255f2c98abd0e778100fe0348ba4295 | [
"MIT"
] | null | null | null | xldiff/cli.py | zondo/pyxldiff | ed5f04d50255f2c98abd0e778100fe0348ba4295 | [
"MIT"
] | null | null | null | xldiff/cli.py | zondo/pyxldiff | ed5f04d50255f2c98abd0e778100fe0348ba4295 | [
"MIT"
] | null | null | null | """Command line interface.
"""
import sys
import docopt
from . import __progname__, __version__
from .source import text_source
from .diff import diff_lines
USAGE = """
Usage: {prog} [options] FILE [FILE]
Description:
Excel spreadsheet diff program. With one arg, just shows a text
representation of the spreadsheet. With two args, shows a diff
between the text representations of both of them.
Output options:
-o FILE Write to file instead of stdout
Other options:
-t, --trace Print traceback on error
-h, --help This help message
"""
def main(args=sys.argv[1:]):
usage = USAGE.format(prog=__progname__)
version = f"{__progname__} {__version__}"
opts = docopt.docopt(usage, argv=args, version=version)
try:
run(opts)
except Exception as exc:
if opts["--trace"]:
raise
else:
sys.exit("%s: error: %s" % (__progname__, str(exc)))
def run(opts):
"Run the program."
files = opts["FILE"]
outfile = opts["-o"]
if len(files) == 1:
# One arg -- show text contents.
path = files[0]
output = text_source(path).lines()
elif len(files) == 2:
# Two args -- show diff.
file1, file2 = files
src1 = text_source(file1)
src2 = text_source(file2)
output = diff_lines(src1, src2)
if outfile:
f = open(outfile, "w")
else:
f = sys.stdout
for line in output:
f.write(line + "\n")
if __name__ == "__main__":
main()
| 22.085714 | 68 | 0.598965 |
import sys
import docopt
from . import __progname__, __version__
from .source import text_source
from .diff import diff_lines
USAGE = """
Usage: {prog} [options] FILE [FILE]
Description:
Excel spreadsheet diff program. With one arg, just shows a text
representation of the spreadsheet. With two args, shows a diff
between the text representations of both of them.
Output options:
-o FILE Write to file instead of stdout
Other options:
-t, --trace Print traceback on error
-h, --help This help message
"""
def main(args=sys.argv[1:]):
usage = USAGE.format(prog=__progname__)
version = f"{__progname__} {__version__}"
opts = docopt.docopt(usage, argv=args, version=version)
try:
run(opts)
except Exception as exc:
if opts["--trace"]:
raise
else:
sys.exit("%s: error: %s" % (__progname__, str(exc)))
def run(opts):
files = opts["FILE"]
outfile = opts["-o"]
if len(files) == 1:
path = files[0]
output = text_source(path).lines()
elif len(files) == 2:
file1, file2 = files
src1 = text_source(file1)
src2 = text_source(file2)
output = diff_lines(src1, src2)
if outfile:
f = open(outfile, "w")
else:
f = sys.stdout
for line in output:
f.write(line + "\n")
if __name__ == "__main__":
main()
| true | true |
1c36559d4793679bfd1640b6b5b9c3a0c38be73a | 534 | py | Python | extension/backbones/simple_backbone.py | CFM-MSG/SDN | f309602dc2bb73117355003f3744f8e5450dbccc | [
"MIT"
] | null | null | null | extension/backbones/simple_backbone.py | CFM-MSG/SDN | f309602dc2bb73117355003f3744f8e5450dbccc | [
"MIT"
] | null | null | null | extension/backbones/simple_backbone.py | CFM-MSG/SDN | f309602dc2bb73117355003f3744f8e5450dbccc | [
"MIT"
] | null | null | null | import torch
class simple_backbone(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Sequential(torch.nn.Conv2d(1, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 128, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2))
def forward(self, images):
output = self.conv(images)
return output
| 38.142857 | 75 | 0.43633 | import torch
class simple_backbone(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Sequential(torch.nn.Conv2d(1, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 128, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2))
def forward(self, images):
output = self.conv(images)
return output
| true | true |
1c3655ffb6eccc5b12bc1dbcde5f0c55de50bfd8 | 3,796 | py | Python | sumochip/webapp.py | laurivosandi/sumochip | b1dad5c8d066a52fc2f0d2cfcd81f84c6826534a | [
"MIT"
] | 19 | 2016-06-12T13:29:42.000Z | 2017-05-19T16:47:16.000Z | sumochip/webapp.py | eik-robo/sumochip | b1dad5c8d066a52fc2f0d2cfcd81f84c6826534a | [
"MIT"
] | 20 | 2016-06-12T12:40:49.000Z | 2016-12-27T17:34:55.000Z | sumochip/webapp.py | laurivosandi/sumochip | b1dad5c8d066a52fc2f0d2cfcd81f84c6826534a | [
"MIT"
] | 15 | 2016-05-19T08:07:52.000Z | 2021-07-14T06:33:18.000Z | from __future__ import print_function
from flask import Flask, render_template
from sumorobot import Sumorobot, SensorThread, lock
from flask_sockets import Sockets
from threading import Thread
from time import sleep
import imp
import json
import os
codeTemplate = """
from threading import Thread
from time import sleep
class AutonomousThread(Thread):
def __init__(self, sumorobot):
Thread.__init__(self)
self.sumorobot = sumorobot
def run(self):
self.running = True
print("Starting AutonomousThread")
while self.running:
self.step()
sleep(0.01)
print("AutonomousThread was stopped")
self.sumorobot.stop()
def step(self):
sumorobot = self.sumorobot
isEnemy = sumorobot.isEnemy
isLine = sumorobot.isLine
"""
sumorobot = Sumorobot()
codeThread = None
codeText = ""
codeBytecode = None
app = Flask(__name__)
try:
with open("/etc/machine-id", "r") as fh:
app.config['SECRET_KEY'] = fh.read()
except:
app.config['SECRET_KEY'] = 'secret!'
sockets = Sockets(app)
@app.route('/')
def index():
print("HTTP request")
return render_template('index.html')
@sockets.route('/')
def command(ws):
global codeThread
global codeText
global codeBytecode
while not ws.closed:
command = ws.receive()
if command:
print('Command: ' + command)
if command == '0':
print("Stop")
sumorobot.stop()
elif command == '1':
print("Forward")
sumorobot.forward()
elif command == '2':
print("Back")
sumorobot.back()
elif command == '3':
print("Right")
sumorobot.right()
elif command == '4':
print("Left")
sumorobot.left()
elif command == 'sensors':
print("keegi kysib sensoreid")
sensors = SensorThread(ws, sumorobot)
elif command == 'getSavedCode':
with open("code.txt", "r") as fh:
code = fh.read()
print(code)
ws.send(json.dumps({'savedCode':code}))
codeText = code
fullCodeText = codeTemplate + "".join((" "*8 + line + "\n" for line in codeText.split("\n")))
print(fullCodeText)
codeBytecode = compile(codeText, "<SumorobotCode>", "exec")
elif command == 'executeCode':
if codeThread:
codeThread.running = False
slave = {}
exec(codeBytecode, slave)
codeThread = slave["AutonomousThread"](sumorobot)
codeThread.daemon = True
codeThread.start()
sumorobot.sensor_power = True
elif command == 'stopCode':
if codeThread:
codeThread.running = False
print("code execution stopped")
sumorobot.sensor_power = False
else:
print("Code to be saved:")
print(command)
with open("code.txt", "w") as fh:
fh.write(str(command))
codeText = str(command)
fullCodeText = codeTemplate + "".join((" "*8 + line + "\n" for line in codeText.split("\n")))
print(fullCodeText)
codeBytecode = compile(fullCodeText, "<SumorobotCode>", "exec")
print('Saved')
def main():
lock()
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
ip, port = ('0.0.0.0', 5001)
if os.getuid() == 0:
port = 80
server = pywsgi.WSGIServer((ip, port), app, handler_class=WebSocketHandler)
print("Starting server at http://{}:{}".format(ip, port))
server.serve_forever()
if __name__ == '__main__':
main()
| 29.889764 | 109 | 0.570074 | from __future__ import print_function
from flask import Flask, render_template
from sumorobot import Sumorobot, SensorThread, lock
from flask_sockets import Sockets
from threading import Thread
from time import sleep
import imp
import json
import os
codeTemplate = """
from threading import Thread
from time import sleep
class AutonomousThread(Thread):
def __init__(self, sumorobot):
Thread.__init__(self)
self.sumorobot = sumorobot
def run(self):
self.running = True
print("Starting AutonomousThread")
while self.running:
self.step()
sleep(0.01)
print("AutonomousThread was stopped")
self.sumorobot.stop()
def step(self):
sumorobot = self.sumorobot
isEnemy = sumorobot.isEnemy
isLine = sumorobot.isLine
"""
sumorobot = Sumorobot()
codeThread = None
codeText = ""
codeBytecode = None
app = Flask(__name__)
try:
with open("/etc/machine-id", "r") as fh:
app.config['SECRET_KEY'] = fh.read()
except:
app.config['SECRET_KEY'] = 'secret!'
sockets = Sockets(app)
@app.route('/')
def index():
print("HTTP request")
return render_template('index.html')
@sockets.route('/')
def command(ws):
global codeThread
global codeText
global codeBytecode
while not ws.closed:
command = ws.receive()
if command:
print('Command: ' + command)
if command == '0':
print("Stop")
sumorobot.stop()
elif command == '1':
print("Forward")
sumorobot.forward()
elif command == '2':
print("Back")
sumorobot.back()
elif command == '3':
print("Right")
sumorobot.right()
elif command == '4':
print("Left")
sumorobot.left()
elif command == 'sensors':
print("keegi kysib sensoreid")
sensors = SensorThread(ws, sumorobot)
elif command == 'getSavedCode':
with open("code.txt", "r") as fh:
code = fh.read()
print(code)
ws.send(json.dumps({'savedCode':code}))
codeText = code
fullCodeText = codeTemplate + "".join((" "*8 + line + "\n" for line in codeText.split("\n")))
print(fullCodeText)
codeBytecode = compile(codeText, "<SumorobotCode>", "exec")
elif command == 'executeCode':
if codeThread:
codeThread.running = False
slave = {}
exec(codeBytecode, slave)
codeThread = slave["AutonomousThread"](sumorobot)
codeThread.daemon = True
codeThread.start()
sumorobot.sensor_power = True
elif command == 'stopCode':
if codeThread:
codeThread.running = False
print("code execution stopped")
sumorobot.sensor_power = False
else:
print("Code to be saved:")
print(command)
with open("code.txt", "w") as fh:
fh.write(str(command))
codeText = str(command)
fullCodeText = codeTemplate + "".join((" "*8 + line + "\n" for line in codeText.split("\n")))
print(fullCodeText)
codeBytecode = compile(fullCodeText, "<SumorobotCode>", "exec")
print('Saved')
def main():
lock()
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
ip, port = ('0.0.0.0', 5001)
if os.getuid() == 0:
port = 80
server = pywsgi.WSGIServer((ip, port), app, handler_class=WebSocketHandler)
print("Starting server at http://{}:{}".format(ip, port))
server.serve_forever()
if __name__ == '__main__':
main()
| true | true |
1c36561dd62255f753f2832ed72553d544dcd801 | 1,262 | py | Python | web_scr_13_task.py | Vishalm20/IMDB-Scraper | 5755ff4898a213126450e9a4ec1d3e153d0a5af5 | [
"Apache-2.0"
] | 1 | 2021-05-25T20:00:30.000Z | 2021-05-25T20:00:30.000Z | web_scr_13_task.py | vishalmajumdar95/IMDB-Scraper | 5755ff4898a213126450e9a4ec1d3e153d0a5af5 | [
"Apache-2.0"
] | null | null | null | web_scr_13_task.py | vishalmajumdar95/IMDB-Scraper | 5755ff4898a213126450e9a4ec1d3e153d0a5af5 | [
"Apache-2.0"
] | null | null | null | import json
from pprint import pprint
file=open('imdb_list_5.json','r')
moviedettail_file=json.load(file)
file.close()
main_data_list=[]
dic={}
moviecast_file=open('imdb_list_12.json','r')
movie_cast=json.load(moviecast_file)
def get_movie_list_details():
for i,j in zip(moviedettail_file,movie_cast):
dic['movie Name']=i
dic['cast']=j
main_data_list.append(dic.copy())
f=open('imdb_list_13.json','w')
json.dump(main_data_list,f,indent=4)
f.close()
# return main_data_list
get_movie_list_details()
# import os,json,time,random
# from pprint import pprint
# from web_scr_1_task import *
# from web_scr_12_task import *
# def cach():
# movies=scrape_top_list()
# all_data=[]
# for i in movies:
# url=i["movie_links"][-10:-1]
# with open(url+".json","r") as file:
# data=json.load(file)
# j_url=url+".json"
# if not(os.path.exists(j_url)):
# print ("hi")
# url2="https://www.imdb.com/title/"+url
# data["cast"]=scrape_movie_cast(url2)
# with open(j_url,"w+") as file:
# d_data=json.dumps(data)
# file.write(d_data)
# all_data.append(data)
# return all_data
# pprint(cach()) | 29.348837 | 52 | 0.616482 | import json
from pprint import pprint
file=open('imdb_list_5.json','r')
moviedettail_file=json.load(file)
file.close()
main_data_list=[]
dic={}
moviecast_file=open('imdb_list_12.json','r')
movie_cast=json.load(moviecast_file)
def get_movie_list_details():
for i,j in zip(moviedettail_file,movie_cast):
dic['movie Name']=i
dic['cast']=j
main_data_list.append(dic.copy())
f=open('imdb_list_13.json','w')
json.dump(main_data_list,f,indent=4)
f.close()
get_movie_list_details()
| true | true |
1c3656c9c2faa6b8f28d336ad6dbc3d834307ef8 | 7,851 | py | Python | tests/units/transformers/test_relevant_feature_augmenter.py | pfornia/tsfresh | 9550f84b8a920cfe53d9b6ca47eedeca619725cf | [
"MIT"
] | 1 | 2021-03-16T15:08:04.000Z | 2021-03-16T15:08:04.000Z | tests/units/transformers/test_relevant_feature_augmenter.py | pfornia/tsfresh | 9550f84b8a920cfe53d9b6ca47eedeca619725cf | [
"MIT"
] | null | null | null | tests/units/transformers/test_relevant_feature_augmenter.py | pfornia/tsfresh | 9550f84b8a920cfe53d9b6ca47eedeca619725cf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from tests.fixtures import DataTestCase
import mock
from tsfresh.feature_extraction import MinimalFCParameters
from tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter
from tests.fixtures import warning_free
class RelevantFeatureAugmenterTestCase(DataTestCase):
def setUp(self):
self.test_df = self.create_test_data_sample()
fc_parameters = {"length": None}
self.kind_to_fc_parameters = {"a": fc_parameters.copy(),
"b": fc_parameters.copy()}
def test_not_fitted(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
self.assertRaises(RuntimeError, augmenter.transform, X)
def test_no_timeseries(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
y = pd.Series(dtype="float64")
self.assertRaises(RuntimeError, augmenter.fit, X, y)
self.assertRaises(RuntimeError, augmenter.fit_transform, X, y)
def test_nothing_relevant(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
column_value="val", column_id="id", column_sort="sort",
column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = pd.DataFrame(index=[10, 500])
augmenter.set_timeseries_container(self.test_df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(list(transformed_X.columns), [])
self.assertEqual(list(transformed_X.index), list(X.index))
self.assertEqual(list(fit_transformed_X.columns), [])
self.assertEqual(list(fit_transformed_X.index), list(X.index))
def test_filter_only_tsfresh_features_true(self):
"""
The boolean flag `filter_only_tsfresh_features` makes sure that only the time series based features are
filtered. This unit tests checks that
"""
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=True,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = pd.DataFrame(index=[10, 500])
X["pre_feature"] = 0
augmenter.set_timeseries_container(self.test_df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(sum(["pre_feature" == column for column in transformed_X.columns]), 1)
self.assertEqual(sum(["pre_feature" == column for column in fit_transformed_X.columns]), 1)
def test_filter_only_tsfresh_features_false(self):
"""
The boolean flag `filter_only_tsfresh_features` makes sure that only the time series based features are
filtered. This unit tests checks that
"""
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=False,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
df, y = self.create_test_data_sample_with_target()
X = pd.DataFrame(index=np.unique(df.id))
X["pre_drop"] = 0
X["pre_keep"] = y
augmenter.set_timeseries_container(df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(sum(["pre_keep" == column for column in transformed_X.columns]), 1)
self.assertEqual(sum(["pre_drop" == column for column in transformed_X.columns]), 0)
self.assertEqual(sum(["pre_keep" == column for column in fit_transformed_X.columns]), 1)
self.assertEqual(sum(["pre_drop" == column for column in fit_transformed_X.columns]), 0)
@mock.patch('tsfresh.transformers.feature_selector.calculate_relevance_table')
def test_does_impute(self, calculate_relevance_table_mock):
df = pd.DataFrame([[1, 1, 1], [2, 1, 1]], columns=['id', 'time', 'value'])
X = pd.DataFrame(index=[1])
y = pd.Series([0, 1])
fc_parameters = {"autocorrelation": [{'lag': 2}]}
calculate_relevance_table_mock.return_value = pd.DataFrame(columns=['feature', 'p_value', 'relevant'])
augmenter = RelevantFeatureAugmenter(column_id='id', column_sort='time', default_fc_parameters=fc_parameters)
augmenter.set_timeseries_container(df)
with warning_free():
augmenter.fit(X, y)
assert calculate_relevance_table_mock.call_count == 1
assert not calculate_relevance_table_mock.call_args[0][0].isnull().any().any()
def test_no_ids_present(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=False,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
df, y = self.create_test_data_sample_with_target()
X_with_wrong_ids = pd.DataFrame(index=[-999])
augmenter.set_timeseries_container(df)
self.assertRaisesRegex(AttributeError, r"The ids of the time series container",
augmenter.fit, X_with_wrong_ids, y)
self.assertRaisesRegex(AttributeError, r"The ids of the time series container",
augmenter.fit_transform, X_with_wrong_ids, y)
def test_multiclass_selection(self):
augmenter = RelevantFeatureAugmenter(
column_value="val",
column_id="id",
column_sort="sort",
column_kind="kind",
multiclass=True,
n_significant=3,
)
df, y = self.create_test_data_sample_with_multiclass_target()
X = pd.DataFrame(index=np.unique(df.id))
augmenter.set_timeseries_container(df)
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(len(fit_transformed_X.columns), 4)
def test_relevant_augmentor_cross_validated():
"""
Validates that the RelevantFeatureAugmenter can be cloned in pipelines, see issue 537
"""
n = 16 # number of samples, needs to be divisable by 4
index = range(n)
df_ts = pd.DataFrame({"time": [10, 11] * n, "id": np.repeat(index, 2),
"value": [0, 1] * (n // 4) + [1, 2] * (n // 4) + # class 0
[10, 11] * (n // 4) + [12, 14] * (n // 4)})
y = pd.Series(data=[0] * (n // 2) + [1] * (n // 2), index=index)
X = pd.DataFrame(index=index)
augmenter = RelevantFeatureAugmenter(column_id='id', column_sort='time', timeseries_container=df_ts,
default_fc_parameters=MinimalFCParameters(),
disable_progressbar=True, show_warnings=False, fdr_level=0.90)
pipeline = Pipeline([('augmenter', augmenter),
('classifier', RandomForestClassifier(random_state=1))])
scores = model_selection.cross_val_score(pipeline, X, y, cv=2)
assert (scores == np.array([1, 1])).all()
| 43.860335 | 120 | 0.638263 |
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from tests.fixtures import DataTestCase
import mock
from tsfresh.feature_extraction import MinimalFCParameters
from tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter
from tests.fixtures import warning_free
class RelevantFeatureAugmenterTestCase(DataTestCase):
def setUp(self):
self.test_df = self.create_test_data_sample()
fc_parameters = {"length": None}
self.kind_to_fc_parameters = {"a": fc_parameters.copy(),
"b": fc_parameters.copy()}
def test_not_fitted(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
self.assertRaises(RuntimeError, augmenter.transform, X)
def test_no_timeseries(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
y = pd.Series(dtype="float64")
self.assertRaises(RuntimeError, augmenter.fit, X, y)
self.assertRaises(RuntimeError, augmenter.fit_transform, X, y)
def test_nothing_relevant(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
column_value="val", column_id="id", column_sort="sort",
column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = pd.DataFrame(index=[10, 500])
augmenter.set_timeseries_container(self.test_df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(list(transformed_X.columns), [])
self.assertEqual(list(transformed_X.index), list(X.index))
self.assertEqual(list(fit_transformed_X.columns), [])
self.assertEqual(list(fit_transformed_X.index), list(X.index))
def test_filter_only_tsfresh_features_true(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=True,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = pd.DataFrame(index=[10, 500])
X["pre_feature"] = 0
augmenter.set_timeseries_container(self.test_df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(sum(["pre_feature" == column for column in transformed_X.columns]), 1)
self.assertEqual(sum(["pre_feature" == column for column in fit_transformed_X.columns]), 1)
def test_filter_only_tsfresh_features_false(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=False,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
df, y = self.create_test_data_sample_with_target()
X = pd.DataFrame(index=np.unique(df.id))
X["pre_drop"] = 0
X["pre_keep"] = y
augmenter.set_timeseries_container(df)
augmenter.fit(X, y)
transformed_X = augmenter.transform(X.copy())
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(sum(["pre_keep" == column for column in transformed_X.columns]), 1)
self.assertEqual(sum(["pre_drop" == column for column in transformed_X.columns]), 0)
self.assertEqual(sum(["pre_keep" == column for column in fit_transformed_X.columns]), 1)
self.assertEqual(sum(["pre_drop" == column for column in fit_transformed_X.columns]), 0)
@mock.patch('tsfresh.transformers.feature_selector.calculate_relevance_table')
def test_does_impute(self, calculate_relevance_table_mock):
df = pd.DataFrame([[1, 1, 1], [2, 1, 1]], columns=['id', 'time', 'value'])
X = pd.DataFrame(index=[1])
y = pd.Series([0, 1])
fc_parameters = {"autocorrelation": [{'lag': 2}]}
calculate_relevance_table_mock.return_value = pd.DataFrame(columns=['feature', 'p_value', 'relevant'])
augmenter = RelevantFeatureAugmenter(column_id='id', column_sort='time', default_fc_parameters=fc_parameters)
augmenter.set_timeseries_container(df)
with warning_free():
augmenter.fit(X, y)
assert calculate_relevance_table_mock.call_count == 1
assert not calculate_relevance_table_mock.call_args[0][0].isnull().any().any()
def test_no_ids_present(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
filter_only_tsfresh_features=False,
column_value="val", column_id="id", column_sort="sort", column_kind="kind")
df, y = self.create_test_data_sample_with_target()
X_with_wrong_ids = pd.DataFrame(index=[-999])
augmenter.set_timeseries_container(df)
self.assertRaisesRegex(AttributeError, r"The ids of the time series container",
augmenter.fit, X_with_wrong_ids, y)
self.assertRaisesRegex(AttributeError, r"The ids of the time series container",
augmenter.fit_transform, X_with_wrong_ids, y)
def test_multiclass_selection(self):
augmenter = RelevantFeatureAugmenter(
column_value="val",
column_id="id",
column_sort="sort",
column_kind="kind",
multiclass=True,
n_significant=3,
)
df, y = self.create_test_data_sample_with_multiclass_target()
X = pd.DataFrame(index=np.unique(df.id))
augmenter.set_timeseries_container(df)
fit_transformed_X = augmenter.fit_transform(X, y)
self.assertEqual(len(fit_transformed_X.columns), 4)
def test_relevant_augmentor_cross_validated():
n = 16
index = range(n)
df_ts = pd.DataFrame({"time": [10, 11] * n, "id": np.repeat(index, 2),
"value": [0, 1] * (n // 4) + [1, 2] * (n // 4) +
[10, 11] * (n // 4) + [12, 14] * (n // 4)})
y = pd.Series(data=[0] * (n // 2) + [1] * (n // 2), index=index)
X = pd.DataFrame(index=index)
augmenter = RelevantFeatureAugmenter(column_id='id', column_sort='time', timeseries_container=df_ts,
default_fc_parameters=MinimalFCParameters(),
disable_progressbar=True, show_warnings=False, fdr_level=0.90)
pipeline = Pipeline([('augmenter', augmenter),
('classifier', RandomForestClassifier(random_state=1))])
scores = model_selection.cross_val_score(pipeline, X, y, cv=2)
assert (scores == np.array([1, 1])).all()
| true | true |
1c365743040750360d8b79dfa0adc356f0359a7c | 1,317 | py | Python | proteus/tests/cylinder2D/sbm_method/pressureInitial_n.py | acatwithacomputer/proteus | 80dfad95da6ab4d18a88a035f55c26b03540a864 | [
"MIT"
] | null | null | null | proteus/tests/cylinder2D/sbm_method/pressureInitial_n.py | acatwithacomputer/proteus | 80dfad95da6ab4d18a88a035f55c26b03540a864 | [
"MIT"
] | 13 | 2018-02-08T23:22:59.000Z | 2020-12-06T19:40:32.000Z | proteus/tests/cylinder2D/sbm_method/pressureInitial_n.py | acatwithacomputer/proteus | 80dfad95da6ab4d18a88a035f55c26b03540a864 | [
"MIT"
] | 1 | 2020-02-17T03:25:34.000Z | 2020-02-17T03:25:34.000Z | from __future__ import absolute_import
from proteus import *
from proteus.default_n import *
try:
from .pressureInitial_p import *
except:
from pressureInitial_p import *
triangleOptions = triangleOptions
femSpaces = {0:pbasis}
stepController=FixedStep
#numericalFluxType = NumericalFlux.ConstantAdvection_Diffusion_SIPG_exterior #weak boundary conditions (upwind ?)
matrix = LinearAlgebraTools.SparseMatrix
if useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
else:
multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
parallelPartitioningType = parallelPartitioningType
nLayersOfOverlapForParallel = nLayersOfOverlapForParallel
nonlinearSmoother = None
linearSmoother = None
numericalFluxType = NumericalFlux.ConstantAdvection_exterior
linear_solver_options_prefix = 'pinit_'
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
#linear solve rtolerance
linTolFac = 0.0
l_atol_res = 0.01*phi_nl_atol_res
tolFac = 0.0
nl_atol_res = phi_nl_atol_res
nonlinearSolverConvergenceTest = 'r'
levelNonlinearSolverConvergenceTest = 'r'
linearSolverConvergenceTest = 'r-true'
maxLineSearches=0
periodicDirichletConditions=None
conservativeFlux=None
| 26.34 | 113 | 0.81473 | from __future__ import absolute_import
from proteus import *
from proteus.default_n import *
try:
from .pressureInitial_p import *
except:
from pressureInitial_p import *
triangleOptions = triangleOptions
femSpaces = {0:pbasis}
stepController=FixedStep
atrix
if useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
else:
multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
parallelPartitioningType = parallelPartitioningType
nLayersOfOverlapForParallel = nLayersOfOverlapForParallel
nonlinearSmoother = None
linearSmoother = None
numericalFluxType = NumericalFlux.ConstantAdvection_exterior
linear_solver_options_prefix = 'pinit_'
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
linTolFac = 0.0
l_atol_res = 0.01*phi_nl_atol_res
tolFac = 0.0
nl_atol_res = phi_nl_atol_res
nonlinearSolverConvergenceTest = 'r'
levelNonlinearSolverConvergenceTest = 'r'
linearSolverConvergenceTest = 'r-true'
maxLineSearches=0
periodicDirichletConditions=None
conservativeFlux=None
| true | true |
1c3657b1d9b17cec3143d974c079361403a78ce5 | 20,204 | py | Python | maint_tools/test_docstrings.py | xieliaing/scikit-learn | 9b210ae8ffdc40e210f30f24656779ac690b899a | [
"BSD-3-Clause"
] | null | null | null | maint_tools/test_docstrings.py | xieliaing/scikit-learn | 9b210ae8ffdc40e210f30f24656779ac690b899a | [
"BSD-3-Clause"
] | null | null | null | maint_tools/test_docstrings.py | xieliaing/scikit-learn | 9b210ae8ffdc40e210f30f24656779ac690b899a | [
"BSD-3-Clause"
] | null | null | null | import re
from inspect import signature
import pkgutil
import inspect
import importlib
from typing import Optional
import pytest
from sklearn.utils import all_estimators
import sklearn
numpydoc_validation = pytest.importorskip("numpydoc.validate")
# List of modules ignored when checking for numpydoc validation.
DOCSTRING_IGNORE_LIST = [
"KNNImputer",
"LabelPropagation",
"LabelSpreading",
"LocallyLinearEmbedding",
"MultiTaskElasticNet",
"MultiTaskElasticNetCV",
"MultiTaskLassoCV",
"OrthogonalMatchingPursuitCV",
"PassiveAggressiveRegressor",
"SpectralBiclustering",
"SpectralCoclustering",
"SpectralEmbedding",
"StackingRegressor",
]
FUNCTION_DOCSTRING_IGNORE_LIST = [
"sklearn._config.config_context",
"sklearn._config.get_config",
"sklearn.base.clone",
"sklearn.cluster._affinity_propagation.affinity_propagation",
"sklearn.cluster._agglomerative.linkage_tree",
"sklearn.cluster._kmeans.k_means",
"sklearn.cluster._kmeans.kmeans_plusplus",
"sklearn.cluster._mean_shift.estimate_bandwidth",
"sklearn.cluster._mean_shift.get_bin_seeds",
"sklearn.cluster._mean_shift.mean_shift",
"sklearn.cluster._optics.cluster_optics_dbscan",
"sklearn.cluster._optics.cluster_optics_xi",
"sklearn.cluster._optics.compute_optics_graph",
"sklearn.cluster._spectral.spectral_clustering",
"sklearn.compose._column_transformer.make_column_transformer",
"sklearn.covariance._empirical_covariance.empirical_covariance",
"sklearn.covariance._empirical_covariance.log_likelihood",
"sklearn.covariance._graph_lasso.graphical_lasso",
"sklearn.covariance._robust_covariance.fast_mcd",
"sklearn.covariance._shrunk_covariance.ledoit_wolf",
"sklearn.covariance._shrunk_covariance.ledoit_wolf_shrinkage",
"sklearn.covariance._shrunk_covariance.shrunk_covariance",
"sklearn.datasets._base.get_data_home",
"sklearn.datasets._base.load_boston",
"sklearn.datasets._base.load_breast_cancer",
"sklearn.datasets._base.load_diabetes",
"sklearn.datasets._base.load_digits",
"sklearn.datasets._base.load_files",
"sklearn.datasets._base.load_iris",
"sklearn.datasets._base.load_linnerud",
"sklearn.datasets._base.load_sample_image",
"sklearn.datasets._base.load_wine",
"sklearn.datasets._california_housing.fetch_california_housing",
"sklearn.datasets._covtype.fetch_covtype",
"sklearn.datasets._kddcup99.fetch_kddcup99",
"sklearn.datasets._lfw.fetch_lfw_pairs",
"sklearn.datasets._lfw.fetch_lfw_people",
"sklearn.datasets._olivetti_faces.fetch_olivetti_faces",
"sklearn.datasets._openml.fetch_openml",
"sklearn.datasets._rcv1.fetch_rcv1",
"sklearn.datasets._samples_generator.make_biclusters",
"sklearn.datasets._samples_generator.make_blobs",
"sklearn.datasets._samples_generator.make_checkerboard",
"sklearn.datasets._samples_generator.make_classification",
"sklearn.datasets._samples_generator.make_gaussian_quantiles",
"sklearn.datasets._samples_generator.make_hastie_10_2",
"sklearn.datasets._samples_generator.make_multilabel_classification",
"sklearn.datasets._samples_generator.make_regression",
"sklearn.datasets._samples_generator.make_sparse_coded_signal",
"sklearn.datasets._samples_generator.make_sparse_spd_matrix",
"sklearn.datasets._samples_generator.make_spd_matrix",
"sklearn.datasets._species_distributions.fetch_species_distributions",
"sklearn.datasets._svmlight_format_io.dump_svmlight_file",
"sklearn.datasets._svmlight_format_io.load_svmlight_file",
"sklearn.datasets._svmlight_format_io.load_svmlight_files",
"sklearn.datasets._twenty_newsgroups.fetch_20newsgroups",
"sklearn.decomposition._dict_learning.dict_learning",
"sklearn.decomposition._dict_learning.dict_learning_online",
"sklearn.decomposition._dict_learning.sparse_encode",
"sklearn.decomposition._fastica.fastica",
"sklearn.decomposition._nmf.non_negative_factorization",
"sklearn.externals._packaging.version.parse",
"sklearn.feature_extraction.image.extract_patches_2d",
"sklearn.feature_extraction.image.grid_to_graph",
"sklearn.feature_extraction.image.img_to_graph",
"sklearn.feature_extraction.text.strip_accents_ascii",
"sklearn.feature_extraction.text.strip_accents_unicode",
"sklearn.feature_extraction.text.strip_tags",
"sklearn.feature_selection._univariate_selection.chi2",
"sklearn.feature_selection._univariate_selection.f_oneway",
"sklearn.feature_selection._univariate_selection.r_regression",
"sklearn.inspection._partial_dependence.partial_dependence",
"sklearn.inspection._plot.partial_dependence.plot_partial_dependence",
"sklearn.isotonic.isotonic_regression",
"sklearn.linear_model._least_angle.lars_path",
"sklearn.linear_model._least_angle.lars_path_gram",
"sklearn.linear_model._omp.orthogonal_mp",
"sklearn.linear_model._omp.orthogonal_mp_gram",
"sklearn.linear_model._ridge.ridge_regression",
"sklearn.manifold._locally_linear.locally_linear_embedding",
"sklearn.manifold._t_sne.trustworthiness",
"sklearn.metrics._classification.accuracy_score",
"sklearn.metrics._classification.balanced_accuracy_score",
"sklearn.metrics._classification.brier_score_loss",
"sklearn.metrics._classification.classification_report",
"sklearn.metrics._classification.cohen_kappa_score",
"sklearn.metrics._classification.confusion_matrix",
"sklearn.metrics._classification.f1_score",
"sklearn.metrics._classification.fbeta_score",
"sklearn.metrics._classification.hamming_loss",
"sklearn.metrics._classification.hinge_loss",
"sklearn.metrics._classification.jaccard_score",
"sklearn.metrics._classification.log_loss",
"sklearn.metrics._classification.precision_recall_fscore_support",
"sklearn.metrics._classification.precision_score",
"sklearn.metrics._classification.recall_score",
"sklearn.metrics._classification.zero_one_loss",
"sklearn.metrics._plot.confusion_matrix.plot_confusion_matrix",
"sklearn.metrics._plot.det_curve.plot_det_curve",
"sklearn.metrics._plot.precision_recall_curve.plot_precision_recall_curve",
"sklearn.metrics._plot.roc_curve.plot_roc_curve",
"sklearn.metrics._ranking.auc",
"sklearn.metrics._ranking.average_precision_score",
"sklearn.metrics._ranking.coverage_error",
"sklearn.metrics._ranking.dcg_score",
"sklearn.metrics._ranking.label_ranking_average_precision_score",
"sklearn.metrics._ranking.label_ranking_loss",
"sklearn.metrics._ranking.ndcg_score",
"sklearn.metrics._ranking.precision_recall_curve",
"sklearn.metrics._ranking.roc_auc_score",
"sklearn.metrics._ranking.roc_curve",
"sklearn.metrics._ranking.top_k_accuracy_score",
"sklearn.metrics._regression.max_error",
"sklearn.metrics._regression.mean_absolute_error",
"sklearn.metrics._regression.mean_pinball_loss",
"sklearn.metrics._scorer.make_scorer",
"sklearn.metrics.cluster._bicluster.consensus_score",
"sklearn.metrics.cluster._supervised.adjusted_mutual_info_score",
"sklearn.metrics.cluster._supervised.adjusted_rand_score",
"sklearn.metrics.cluster._supervised.completeness_score",
"sklearn.metrics.cluster._supervised.entropy",
"sklearn.metrics.cluster._supervised.fowlkes_mallows_score",
"sklearn.metrics.cluster._supervised.homogeneity_completeness_v_measure",
"sklearn.metrics.cluster._supervised.homogeneity_score",
"sklearn.metrics.cluster._supervised.mutual_info_score",
"sklearn.metrics.cluster._supervised.normalized_mutual_info_score",
"sklearn.metrics.cluster._supervised.pair_confusion_matrix",
"sklearn.metrics.cluster._supervised.rand_score",
"sklearn.metrics.cluster._supervised.v_measure_score",
"sklearn.metrics.cluster._unsupervised.davies_bouldin_score",
"sklearn.metrics.cluster._unsupervised.silhouette_samples",
"sklearn.metrics.cluster._unsupervised.silhouette_score",
"sklearn.metrics.pairwise.additive_chi2_kernel",
"sklearn.metrics.pairwise.check_paired_arrays",
"sklearn.metrics.pairwise.check_pairwise_arrays",
"sklearn.metrics.pairwise.chi2_kernel",
"sklearn.metrics.pairwise.cosine_distances",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.metrics.pairwise.distance_metrics",
"sklearn.metrics.pairwise.euclidean_distances",
"sklearn.metrics.pairwise.haversine_distances",
"sklearn.metrics.pairwise.kernel_metrics",
"sklearn.metrics.pairwise.laplacian_kernel",
"sklearn.metrics.pairwise.linear_kernel",
"sklearn.metrics.pairwise.manhattan_distances",
"sklearn.metrics.pairwise.nan_euclidean_distances",
"sklearn.metrics.pairwise.paired_cosine_distances",
"sklearn.metrics.pairwise.paired_distances",
"sklearn.metrics.pairwise.paired_euclidean_distances",
"sklearn.metrics.pairwise.paired_manhattan_distances",
"sklearn.metrics.pairwise.pairwise_distances_argmin",
"sklearn.metrics.pairwise.pairwise_distances_argmin_min",
"sklearn.metrics.pairwise.pairwise_distances_chunked",
"sklearn.metrics.pairwise.pairwise_kernels",
"sklearn.metrics.pairwise.polynomial_kernel",
"sklearn.metrics.pairwise.rbf_kernel",
"sklearn.metrics.pairwise.sigmoid_kernel",
"sklearn.model_selection._split.check_cv",
"sklearn.model_selection._split.train_test_split",
"sklearn.model_selection._validation.cross_val_predict",
"sklearn.model_selection._validation.cross_val_score",
"sklearn.model_selection._validation.cross_validate",
"sklearn.model_selection._validation.learning_curve",
"sklearn.model_selection._validation.permutation_test_score",
"sklearn.model_selection._validation.validation_curve",
"sklearn.neighbors._graph.kneighbors_graph",
"sklearn.neighbors._graph.radius_neighbors_graph",
"sklearn.pipeline.make_union",
"sklearn.preprocessing._data.binarize",
"sklearn.preprocessing._data.maxabs_scale",
"sklearn.preprocessing._data.normalize",
"sklearn.preprocessing._data.power_transform",
"sklearn.preprocessing._data.quantile_transform",
"sklearn.preprocessing._data.robust_scale",
"sklearn.preprocessing._data.scale",
"sklearn.preprocessing._label.label_binarize",
"sklearn.random_projection.johnson_lindenstrauss_min_dim",
"sklearn.svm._bounds.l1_min_c",
"sklearn.tree._export.plot_tree",
"sklearn.utils.axis0_safe_slice",
"sklearn.utils.check_pandas_support",
"sklearn.utils.extmath.cartesian",
"sklearn.utils.extmath.density",
"sklearn.utils.extmath.fast_logdet",
"sklearn.utils.extmath.randomized_range_finder",
"sklearn.utils.extmath.randomized_svd",
"sklearn.utils.extmath.safe_sparse_dot",
"sklearn.utils.extmath.squared_norm",
"sklearn.utils.extmath.stable_cumsum",
"sklearn.utils.extmath.svd_flip",
"sklearn.utils.extmath.weighted_mode",
"sklearn.utils.fixes.delayed",
"sklearn.utils.fixes.linspace",
# To be fixed in upstream issue:
# https://github.com/joblib/threadpoolctl/issues/108
"sklearn.utils.fixes.threadpool_info",
"sklearn.utils.fixes.threadpool_limits",
"sklearn.utils.gen_batches",
"sklearn.utils.gen_even_slices",
"sklearn.utils.get_chunk_n_rows",
"sklearn.utils.graph.graph_shortest_path",
"sklearn.utils.graph.single_source_shortest_path_length",
"sklearn.utils.is_scalar_nan",
"sklearn.utils.metaestimators.available_if",
"sklearn.utils.metaestimators.if_delegate_has_method",
"sklearn.utils.multiclass.check_classification_targets",
"sklearn.utils.multiclass.class_distribution",
"sklearn.utils.multiclass.type_of_target",
"sklearn.utils.multiclass.unique_labels",
"sklearn.utils.resample",
"sklearn.utils.safe_mask",
"sklearn.utils.safe_sqr",
"sklearn.utils.shuffle",
"sklearn.utils.sparsefuncs.count_nonzero",
"sklearn.utils.sparsefuncs.csc_median_axis_0",
"sklearn.utils.sparsefuncs.incr_mean_variance_axis",
"sklearn.utils.sparsefuncs.inplace_swap_column",
"sklearn.utils.sparsefuncs.inplace_swap_row",
"sklearn.utils.sparsefuncs.inplace_swap_row_csc",
"sklearn.utils.sparsefuncs.inplace_swap_row_csr",
"sklearn.utils.sparsefuncs.mean_variance_axis",
"sklearn.utils.sparsefuncs.min_max_axis",
"sklearn.utils.tosequence",
"sklearn.utils.validation.as_float_array",
"sklearn.utils.validation.assert_all_finite",
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.validation.check_memory",
"sklearn.utils.validation.check_random_state",
"sklearn.utils.validation.column_or_1d",
"sklearn.utils.validation.has_fit_parameter",
"sklearn.utils.validation.indexable",
]
FUNCTION_DOCSTRING_IGNORE_LIST = set(FUNCTION_DOCSTRING_IGNORE_LIST)
def get_all_methods():
estimators = all_estimators()
for name, Estimator in estimators:
if name.startswith("_"):
# skip private classes
continue
methods = []
for name in dir(Estimator):
if name.startswith("_"):
continue
method_obj = getattr(Estimator, name)
if hasattr(method_obj, "__call__") or isinstance(method_obj, property):
methods.append(name)
methods.append(None)
for method in sorted(methods, key=lambda x: str(x)):
yield Estimator, method
def _is_checked_function(item):
if not inspect.isfunction(item):
return False
if item.__name__.startswith("_"):
return False
mod = item.__module__
if not mod.startswith("sklearn.") or mod.endswith("estimator_checks"):
return False
return True
def get_all_functions_names():
"""Get all public functions define in the sklearn module"""
modules_to_ignore = {
"tests",
"externals",
"setup",
"conftest",
"experimental",
"estimator_checks",
}
all_functions_names = set()
for module_finder, module_name, ispkg in pkgutil.walk_packages(
path=sklearn.__path__, prefix="sklearn."
):
module_parts = module_name.split(".")
if (
any(part in modules_to_ignore for part in module_parts)
or "._" in module_name
):
continue
module = importlib.import_module(module_name)
functions = inspect.getmembers(module, _is_checked_function)
for name, func in functions:
full_name = f"{func.__module__}.{func.__name__}"
all_functions_names.add(full_name)
return sorted(all_functions_names)
def filter_errors(errors, method, Estimator=None):
"""
Ignore some errors based on the method type.
These rules are specific for scikit-learn."""
for code, message in errors:
# We ignore following error code,
# - RT02: The first line of the Returns section
# should contain only the type, ..
# (as we may need refer to the name of the returned
# object)
# - GL01: Docstring text (summary) should start in the line
# immediately after the opening quotes (not in the same line,
# or leaving a blank line in between)
# - GL02: If there's a blank line, it should be before the
# first line of the Returns section, not after (it allows to have
# short docstrings for properties).
if code in ["RT02", "GL01", "GL02"]:
continue
# Ignore PR02: Unknown parameters for properties. We sometimes use
# properties for ducktyping, i.e. SGDClassifier.predict_proba
if code == "PR02" and Estimator is not None and method is not None:
method_obj = getattr(Estimator, method)
if isinstance(method_obj, property):
continue
# Following codes are only taken into account for the
# top level class docstrings:
# - ES01: No extended summary found
# - SA01: See Also section not found
# - EX01: No examples section found
if method is not None and code in ["EX01", "SA01", "ES01"]:
continue
yield code, message
def repr_errors(res, estimator=None, method: Optional[str] = None) -> str:
"""Pretty print original docstring and the obtained errors
Parameters
----------
res : dict
result of numpydoc.validate.validate
estimator : {estimator, None}
estimator object or None
method : str
if estimator is not None, either the method name or None.
Returns
-------
str
String representation of the error.
"""
if method is None:
if hasattr(estimator, "__init__"):
method = "__init__"
elif estimator is None:
raise ValueError("At least one of estimator, method should be provided")
else:
raise NotImplementedError
if estimator is not None:
obj = getattr(estimator, method)
try:
obj_signature = signature(obj)
except TypeError:
# In particular we can't parse the signature of properties
obj_signature = (
"\nParsing of the method signature failed, "
"possibly because this is a property."
)
obj_name = estimator.__name__ + "." + method
else:
obj_signature = ""
obj_name = method
msg = "\n\n" + "\n\n".join(
[
str(res["file"]),
obj_name + str(obj_signature),
res["docstring"],
"# Errors",
"\n".join(
" - {}: {}".format(code, message) for code, message in res["errors"]
),
]
)
return msg
@pytest.mark.parametrize("function_name", get_all_functions_names())
def test_function_docstring(function_name, request):
"""Check function docstrings using numpydoc."""
if function_name in FUNCTION_DOCSTRING_IGNORE_LIST:
request.applymarker(
pytest.mark.xfail(run=False, reason="TODO pass numpydoc validation")
)
res = numpydoc_validation.validate(function_name)
res["errors"] = list(filter_errors(res["errors"], method="function"))
if res["errors"]:
msg = repr_errors(res, method=f"Tested function: {function_name}")
raise ValueError(msg)
@pytest.mark.parametrize("Estimator, method", get_all_methods())
def test_docstring(Estimator, method, request):
base_import_path = Estimator.__module__
import_path = [base_import_path, Estimator.__name__]
if method is not None:
import_path.append(method)
import_path = ".".join(import_path)
if Estimator.__name__ in DOCSTRING_IGNORE_LIST:
request.applymarker(
pytest.mark.xfail(run=False, reason="TODO pass numpydoc validation")
)
res = numpydoc_validation.validate(import_path)
res["errors"] = list(filter_errors(res["errors"], method, Estimator=Estimator))
if res["errors"]:
msg = repr_errors(res, Estimator, method)
raise ValueError(msg)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description="Validate docstring with numpydoc.")
parser.add_argument("import_path", help="Import path to validate")
args = parser.parse_args()
res = numpydoc_validation.validate(args.import_path)
import_path_sections = args.import_path.split(".")
# When applied to classes, detect class method. For functions
# method = None.
# TODO: this detection can be improved. Currently we assume that we have
# class # methods if the second path element before last is in camel case.
if len(import_path_sections) >= 2 and re.match(
r"(?:[A-Z][a-z]*)+", import_path_sections[-2]
):
method = import_path_sections[-1]
else:
method = None
res["errors"] = list(filter_errors(res["errors"], method))
if res["errors"]:
msg = repr_errors(res, method=args.import_path)
print(msg)
sys.exit(1)
else:
print("All docstring checks passed for {}!".format(args.import_path))
| 40.488978 | 85 | 0.725896 | import re
from inspect import signature
import pkgutil
import inspect
import importlib
from typing import Optional
import pytest
from sklearn.utils import all_estimators
import sklearn
numpydoc_validation = pytest.importorskip("numpydoc.validate")
DOCSTRING_IGNORE_LIST = [
"KNNImputer",
"LabelPropagation",
"LabelSpreading",
"LocallyLinearEmbedding",
"MultiTaskElasticNet",
"MultiTaskElasticNetCV",
"MultiTaskLassoCV",
"OrthogonalMatchingPursuitCV",
"PassiveAggressiveRegressor",
"SpectralBiclustering",
"SpectralCoclustering",
"SpectralEmbedding",
"StackingRegressor",
]
FUNCTION_DOCSTRING_IGNORE_LIST = [
"sklearn._config.config_context",
"sklearn._config.get_config",
"sklearn.base.clone",
"sklearn.cluster._affinity_propagation.affinity_propagation",
"sklearn.cluster._agglomerative.linkage_tree",
"sklearn.cluster._kmeans.k_means",
"sklearn.cluster._kmeans.kmeans_plusplus",
"sklearn.cluster._mean_shift.estimate_bandwidth",
"sklearn.cluster._mean_shift.get_bin_seeds",
"sklearn.cluster._mean_shift.mean_shift",
"sklearn.cluster._optics.cluster_optics_dbscan",
"sklearn.cluster._optics.cluster_optics_xi",
"sklearn.cluster._optics.compute_optics_graph",
"sklearn.cluster._spectral.spectral_clustering",
"sklearn.compose._column_transformer.make_column_transformer",
"sklearn.covariance._empirical_covariance.empirical_covariance",
"sklearn.covariance._empirical_covariance.log_likelihood",
"sklearn.covariance._graph_lasso.graphical_lasso",
"sklearn.covariance._robust_covariance.fast_mcd",
"sklearn.covariance._shrunk_covariance.ledoit_wolf",
"sklearn.covariance._shrunk_covariance.ledoit_wolf_shrinkage",
"sklearn.covariance._shrunk_covariance.shrunk_covariance",
"sklearn.datasets._base.get_data_home",
"sklearn.datasets._base.load_boston",
"sklearn.datasets._base.load_breast_cancer",
"sklearn.datasets._base.load_diabetes",
"sklearn.datasets._base.load_digits",
"sklearn.datasets._base.load_files",
"sklearn.datasets._base.load_iris",
"sklearn.datasets._base.load_linnerud",
"sklearn.datasets._base.load_sample_image",
"sklearn.datasets._base.load_wine",
"sklearn.datasets._california_housing.fetch_california_housing",
"sklearn.datasets._covtype.fetch_covtype",
"sklearn.datasets._kddcup99.fetch_kddcup99",
"sklearn.datasets._lfw.fetch_lfw_pairs",
"sklearn.datasets._lfw.fetch_lfw_people",
"sklearn.datasets._olivetti_faces.fetch_olivetti_faces",
"sklearn.datasets._openml.fetch_openml",
"sklearn.datasets._rcv1.fetch_rcv1",
"sklearn.datasets._samples_generator.make_biclusters",
"sklearn.datasets._samples_generator.make_blobs",
"sklearn.datasets._samples_generator.make_checkerboard",
"sklearn.datasets._samples_generator.make_classification",
"sklearn.datasets._samples_generator.make_gaussian_quantiles",
"sklearn.datasets._samples_generator.make_hastie_10_2",
"sklearn.datasets._samples_generator.make_multilabel_classification",
"sklearn.datasets._samples_generator.make_regression",
"sklearn.datasets._samples_generator.make_sparse_coded_signal",
"sklearn.datasets._samples_generator.make_sparse_spd_matrix",
"sklearn.datasets._samples_generator.make_spd_matrix",
"sklearn.datasets._species_distributions.fetch_species_distributions",
"sklearn.datasets._svmlight_format_io.dump_svmlight_file",
"sklearn.datasets._svmlight_format_io.load_svmlight_file",
"sklearn.datasets._svmlight_format_io.load_svmlight_files",
"sklearn.datasets._twenty_newsgroups.fetch_20newsgroups",
"sklearn.decomposition._dict_learning.dict_learning",
"sklearn.decomposition._dict_learning.dict_learning_online",
"sklearn.decomposition._dict_learning.sparse_encode",
"sklearn.decomposition._fastica.fastica",
"sklearn.decomposition._nmf.non_negative_factorization",
"sklearn.externals._packaging.version.parse",
"sklearn.feature_extraction.image.extract_patches_2d",
"sklearn.feature_extraction.image.grid_to_graph",
"sklearn.feature_extraction.image.img_to_graph",
"sklearn.feature_extraction.text.strip_accents_ascii",
"sklearn.feature_extraction.text.strip_accents_unicode",
"sklearn.feature_extraction.text.strip_tags",
"sklearn.feature_selection._univariate_selection.chi2",
"sklearn.feature_selection._univariate_selection.f_oneway",
"sklearn.feature_selection._univariate_selection.r_regression",
"sklearn.inspection._partial_dependence.partial_dependence",
"sklearn.inspection._plot.partial_dependence.plot_partial_dependence",
"sklearn.isotonic.isotonic_regression",
"sklearn.linear_model._least_angle.lars_path",
"sklearn.linear_model._least_angle.lars_path_gram",
"sklearn.linear_model._omp.orthogonal_mp",
"sklearn.linear_model._omp.orthogonal_mp_gram",
"sklearn.linear_model._ridge.ridge_regression",
"sklearn.manifold._locally_linear.locally_linear_embedding",
"sklearn.manifold._t_sne.trustworthiness",
"sklearn.metrics._classification.accuracy_score",
"sklearn.metrics._classification.balanced_accuracy_score",
"sklearn.metrics._classification.brier_score_loss",
"sklearn.metrics._classification.classification_report",
"sklearn.metrics._classification.cohen_kappa_score",
"sklearn.metrics._classification.confusion_matrix",
"sklearn.metrics._classification.f1_score",
"sklearn.metrics._classification.fbeta_score",
"sklearn.metrics._classification.hamming_loss",
"sklearn.metrics._classification.hinge_loss",
"sklearn.metrics._classification.jaccard_score",
"sklearn.metrics._classification.log_loss",
"sklearn.metrics._classification.precision_recall_fscore_support",
"sklearn.metrics._classification.precision_score",
"sklearn.metrics._classification.recall_score",
"sklearn.metrics._classification.zero_one_loss",
"sklearn.metrics._plot.confusion_matrix.plot_confusion_matrix",
"sklearn.metrics._plot.det_curve.plot_det_curve",
"sklearn.metrics._plot.precision_recall_curve.plot_precision_recall_curve",
"sklearn.metrics._plot.roc_curve.plot_roc_curve",
"sklearn.metrics._ranking.auc",
"sklearn.metrics._ranking.average_precision_score",
"sklearn.metrics._ranking.coverage_error",
"sklearn.metrics._ranking.dcg_score",
"sklearn.metrics._ranking.label_ranking_average_precision_score",
"sklearn.metrics._ranking.label_ranking_loss",
"sklearn.metrics._ranking.ndcg_score",
"sklearn.metrics._ranking.precision_recall_curve",
"sklearn.metrics._ranking.roc_auc_score",
"sklearn.metrics._ranking.roc_curve",
"sklearn.metrics._ranking.top_k_accuracy_score",
"sklearn.metrics._regression.max_error",
"sklearn.metrics._regression.mean_absolute_error",
"sklearn.metrics._regression.mean_pinball_loss",
"sklearn.metrics._scorer.make_scorer",
"sklearn.metrics.cluster._bicluster.consensus_score",
"sklearn.metrics.cluster._supervised.adjusted_mutual_info_score",
"sklearn.metrics.cluster._supervised.adjusted_rand_score",
"sklearn.metrics.cluster._supervised.completeness_score",
"sklearn.metrics.cluster._supervised.entropy",
"sklearn.metrics.cluster._supervised.fowlkes_mallows_score",
"sklearn.metrics.cluster._supervised.homogeneity_completeness_v_measure",
"sklearn.metrics.cluster._supervised.homogeneity_score",
"sklearn.metrics.cluster._supervised.mutual_info_score",
"sklearn.metrics.cluster._supervised.normalized_mutual_info_score",
"sklearn.metrics.cluster._supervised.pair_confusion_matrix",
"sklearn.metrics.cluster._supervised.rand_score",
"sklearn.metrics.cluster._supervised.v_measure_score",
"sklearn.metrics.cluster._unsupervised.davies_bouldin_score",
"sklearn.metrics.cluster._unsupervised.silhouette_samples",
"sklearn.metrics.cluster._unsupervised.silhouette_score",
"sklearn.metrics.pairwise.additive_chi2_kernel",
"sklearn.metrics.pairwise.check_paired_arrays",
"sklearn.metrics.pairwise.check_pairwise_arrays",
"sklearn.metrics.pairwise.chi2_kernel",
"sklearn.metrics.pairwise.cosine_distances",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.metrics.pairwise.distance_metrics",
"sklearn.metrics.pairwise.euclidean_distances",
"sklearn.metrics.pairwise.haversine_distances",
"sklearn.metrics.pairwise.kernel_metrics",
"sklearn.metrics.pairwise.laplacian_kernel",
"sklearn.metrics.pairwise.linear_kernel",
"sklearn.metrics.pairwise.manhattan_distances",
"sklearn.metrics.pairwise.nan_euclidean_distances",
"sklearn.metrics.pairwise.paired_cosine_distances",
"sklearn.metrics.pairwise.paired_distances",
"sklearn.metrics.pairwise.paired_euclidean_distances",
"sklearn.metrics.pairwise.paired_manhattan_distances",
"sklearn.metrics.pairwise.pairwise_distances_argmin",
"sklearn.metrics.pairwise.pairwise_distances_argmin_min",
"sklearn.metrics.pairwise.pairwise_distances_chunked",
"sklearn.metrics.pairwise.pairwise_kernels",
"sklearn.metrics.pairwise.polynomial_kernel",
"sklearn.metrics.pairwise.rbf_kernel",
"sklearn.metrics.pairwise.sigmoid_kernel",
"sklearn.model_selection._split.check_cv",
"sklearn.model_selection._split.train_test_split",
"sklearn.model_selection._validation.cross_val_predict",
"sklearn.model_selection._validation.cross_val_score",
"sklearn.model_selection._validation.cross_validate",
"sklearn.model_selection._validation.learning_curve",
"sklearn.model_selection._validation.permutation_test_score",
"sklearn.model_selection._validation.validation_curve",
"sklearn.neighbors._graph.kneighbors_graph",
"sklearn.neighbors._graph.radius_neighbors_graph",
"sklearn.pipeline.make_union",
"sklearn.preprocessing._data.binarize",
"sklearn.preprocessing._data.maxabs_scale",
"sklearn.preprocessing._data.normalize",
"sklearn.preprocessing._data.power_transform",
"sklearn.preprocessing._data.quantile_transform",
"sklearn.preprocessing._data.robust_scale",
"sklearn.preprocessing._data.scale",
"sklearn.preprocessing._label.label_binarize",
"sklearn.random_projection.johnson_lindenstrauss_min_dim",
"sklearn.svm._bounds.l1_min_c",
"sklearn.tree._export.plot_tree",
"sklearn.utils.axis0_safe_slice",
"sklearn.utils.check_pandas_support",
"sklearn.utils.extmath.cartesian",
"sklearn.utils.extmath.density",
"sklearn.utils.extmath.fast_logdet",
"sklearn.utils.extmath.randomized_range_finder",
"sklearn.utils.extmath.randomized_svd",
"sklearn.utils.extmath.safe_sparse_dot",
"sklearn.utils.extmath.squared_norm",
"sklearn.utils.extmath.stable_cumsum",
"sklearn.utils.extmath.svd_flip",
"sklearn.utils.extmath.weighted_mode",
"sklearn.utils.fixes.delayed",
"sklearn.utils.fixes.linspace",
"sklearn.utils.fixes.threadpool_info",
"sklearn.utils.fixes.threadpool_limits",
"sklearn.utils.gen_batches",
"sklearn.utils.gen_even_slices",
"sklearn.utils.get_chunk_n_rows",
"sklearn.utils.graph.graph_shortest_path",
"sklearn.utils.graph.single_source_shortest_path_length",
"sklearn.utils.is_scalar_nan",
"sklearn.utils.metaestimators.available_if",
"sklearn.utils.metaestimators.if_delegate_has_method",
"sklearn.utils.multiclass.check_classification_targets",
"sklearn.utils.multiclass.class_distribution",
"sklearn.utils.multiclass.type_of_target",
"sklearn.utils.multiclass.unique_labels",
"sklearn.utils.resample",
"sklearn.utils.safe_mask",
"sklearn.utils.safe_sqr",
"sklearn.utils.shuffle",
"sklearn.utils.sparsefuncs.count_nonzero",
"sklearn.utils.sparsefuncs.csc_median_axis_0",
"sklearn.utils.sparsefuncs.incr_mean_variance_axis",
"sklearn.utils.sparsefuncs.inplace_swap_column",
"sklearn.utils.sparsefuncs.inplace_swap_row",
"sklearn.utils.sparsefuncs.inplace_swap_row_csc",
"sklearn.utils.sparsefuncs.inplace_swap_row_csr",
"sklearn.utils.sparsefuncs.mean_variance_axis",
"sklearn.utils.sparsefuncs.min_max_axis",
"sklearn.utils.tosequence",
"sklearn.utils.validation.as_float_array",
"sklearn.utils.validation.assert_all_finite",
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.validation.check_memory",
"sklearn.utils.validation.check_random_state",
"sklearn.utils.validation.column_or_1d",
"sklearn.utils.validation.has_fit_parameter",
"sklearn.utils.validation.indexable",
]
FUNCTION_DOCSTRING_IGNORE_LIST = set(FUNCTION_DOCSTRING_IGNORE_LIST)
def get_all_methods():
estimators = all_estimators()
for name, Estimator in estimators:
if name.startswith("_"):
continue
methods = []
for name in dir(Estimator):
if name.startswith("_"):
continue
method_obj = getattr(Estimator, name)
if hasattr(method_obj, "__call__") or isinstance(method_obj, property):
methods.append(name)
methods.append(None)
for method in sorted(methods, key=lambda x: str(x)):
yield Estimator, method
def _is_checked_function(item):
if not inspect.isfunction(item):
return False
if item.__name__.startswith("_"):
return False
mod = item.__module__
if not mod.startswith("sklearn.") or mod.endswith("estimator_checks"):
return False
return True
def get_all_functions_names():
modules_to_ignore = {
"tests",
"externals",
"setup",
"conftest",
"experimental",
"estimator_checks",
}
all_functions_names = set()
for module_finder, module_name, ispkg in pkgutil.walk_packages(
path=sklearn.__path__, prefix="sklearn."
):
module_parts = module_name.split(".")
if (
any(part in modules_to_ignore for part in module_parts)
or "._" in module_name
):
continue
module = importlib.import_module(module_name)
functions = inspect.getmembers(module, _is_checked_function)
for name, func in functions:
full_name = f"{func.__module__}.{func.__name__}"
all_functions_names.add(full_name)
return sorted(all_functions_names)
def filter_errors(errors, method, Estimator=None):
for code, message in errors:
# first line of the Returns section, not after (it allows to have
# short docstrings for properties).
if code in ["RT02", "GL01", "GL02"]:
continue
# Ignore PR02: Unknown parameters for properties. We sometimes use
# properties for ducktyping, i.e. SGDClassifier.predict_proba
if code == "PR02" and Estimator is not None and method is not None:
method_obj = getattr(Estimator, method)
if isinstance(method_obj, property):
continue
# Following codes are only taken into account for the
# top level class docstrings:
# - ES01: No extended summary found
# - SA01: See Also section not found
# - EX01: No examples section found
if method is not None and code in ["EX01", "SA01", "ES01"]:
continue
yield code, message
def repr_errors(res, estimator=None, method: Optional[str] = None) -> str:
if method is None:
if hasattr(estimator, "__init__"):
method = "__init__"
elif estimator is None:
raise ValueError("At least one of estimator, method should be provided")
else:
raise NotImplementedError
if estimator is not None:
obj = getattr(estimator, method)
try:
obj_signature = signature(obj)
except TypeError:
# In particular we can't parse the signature of properties
obj_signature = (
"\nParsing of the method signature failed, "
"possibly because this is a property."
)
obj_name = estimator.__name__ + "." + method
else:
obj_signature = ""
obj_name = method
msg = "\n\n" + "\n\n".join(
[
str(res["file"]),
obj_name + str(obj_signature),
res["docstring"],
"# Errors",
"\n".join(
" - {}: {}".format(code, message) for code, message in res["errors"]
),
]
)
return msg
@pytest.mark.parametrize("function_name", get_all_functions_names())
def test_function_docstring(function_name, request):
if function_name in FUNCTION_DOCSTRING_IGNORE_LIST:
request.applymarker(
pytest.mark.xfail(run=False, reason="TODO pass numpydoc validation")
)
res = numpydoc_validation.validate(function_name)
res["errors"] = list(filter_errors(res["errors"], method="function"))
if res["errors"]:
msg = repr_errors(res, method=f"Tested function: {function_name}")
raise ValueError(msg)
@pytest.mark.parametrize("Estimator, method", get_all_methods())
def test_docstring(Estimator, method, request):
base_import_path = Estimator.__module__
import_path = [base_import_path, Estimator.__name__]
if method is not None:
import_path.append(method)
import_path = ".".join(import_path)
if Estimator.__name__ in DOCSTRING_IGNORE_LIST:
request.applymarker(
pytest.mark.xfail(run=False, reason="TODO pass numpydoc validation")
)
res = numpydoc_validation.validate(import_path)
res["errors"] = list(filter_errors(res["errors"], method, Estimator=Estimator))
if res["errors"]:
msg = repr_errors(res, Estimator, method)
raise ValueError(msg)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description="Validate docstring with numpydoc.")
parser.add_argument("import_path", help="Import path to validate")
args = parser.parse_args()
res = numpydoc_validation.validate(args.import_path)
import_path_sections = args.import_path.split(".")
[A-Z][a-z]*)+", import_path_sections[-2]
):
method = import_path_sections[-1]
else:
method = None
res["errors"] = list(filter_errors(res["errors"], method))
if res["errors"]:
msg = repr_errors(res, method=args.import_path)
print(msg)
sys.exit(1)
else:
print("All docstring checks passed for {}!".format(args.import_path))
| true | true |
1c365878a4155067c9c49a7af13aa77faccf9450 | 590 | py | Python | feed/migrations/0007_auto_20200818_1706.py | wivn/feed-reader | 1b4524fcdfc79391a5cf982ce9c5681e600f4303 | [
"MIT"
] | null | null | null | feed/migrations/0007_auto_20200818_1706.py | wivn/feed-reader | 1b4524fcdfc79391a5cf982ce9c5681e600f4303 | [
"MIT"
] | null | null | null | feed/migrations/0007_auto_20200818_1706.py | wivn/feed-reader | 1b4524fcdfc79391a5cf982ce9c5681e600f4303 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-08-18 17:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('feed', '0006_subscription_user'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 26.818182 | 121 | 0.677966 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('feed', '0006_subscription_user'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true | true |
1c36595fca969f527c397bc1ab6b9c325648d142 | 1,790 | py | Python | deployment-manager/config.py | alessandro11/celero-devops | 5c180044145defa04ebe0dd4ba8b69ee50194317 | [
"MIT"
] | 1 | 2021-01-21T03:40:07.000Z | 2021-01-21T03:40:07.000Z | deployment-manager/config.py | alessandro11/celero-devops | 5c180044145defa04ebe0dd4ba8b69ee50194317 | [
"MIT"
] | null | null | null | deployment-manager/config.py | alessandro11/celero-devops | 5c180044145defa04ebe0dd4ba8b69ee50194317 | [
"MIT"
] | null | null | null | # Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This template creates a Runtime Configurator with the associated resources.
"""
def generate_config(context):
""" Entry point for the deployment resources. """
resources = []
properties = context.properties
project_id = properties.get('projectId', context.env['project'])
name = properties.get('config', context.env['name'])
properties['postgres']['image'] = 'gcr.io/{}/{}'.format(project_id, \
properties['postgres']['image'])
properties['worker']['image'] = 'gcr.io/{}/{}'.format(project_id, \
properties['worker']['image'])
properties['webserver']['image'] = 'gcr.io/{}/{}'.format(project_id, \
properties['webserver']['image'])
outputs = [
{ 'name': 'region', 'value': properties['region'] },
{ 'name': 'zone', 'value': properties['zone'] },
{ 'name': 'postgres', 'value': properties['postgres'] },
{ 'name': 'worker', 'value': properties['worker'] },
{ 'name': 'webserver', 'value': properties['webserver'] }
]
return {'resources': resources, 'outputs': outputs}
| 41.627907 | 77 | 0.618436 |
def generate_config(context):
resources = []
properties = context.properties
project_id = properties.get('projectId', context.env['project'])
name = properties.get('config', context.env['name'])
properties['postgres']['image'] = 'gcr.io/{}/{}'.format(project_id, \
properties['postgres']['image'])
properties['worker']['image'] = 'gcr.io/{}/{}'.format(project_id, \
properties['worker']['image'])
properties['webserver']['image'] = 'gcr.io/{}/{}'.format(project_id, \
properties['webserver']['image'])
outputs = [
{ 'name': 'region', 'value': properties['region'] },
{ 'name': 'zone', 'value': properties['zone'] },
{ 'name': 'postgres', 'value': properties['postgres'] },
{ 'name': 'worker', 'value': properties['worker'] },
{ 'name': 'webserver', 'value': properties['webserver'] }
]
return {'resources': resources, 'outputs': outputs}
| true | true |
1c365a56869fe64d5538bf619c64fc0207b4e3a6 | 401 | py | Python | chrome/common/extensions/docs/server2/test_branch_utility.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-02-03T05:19:48.000Z | 2021-11-15T15:07:21.000Z | chrome/common/extensions/docs/server2/test_branch_utility.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/common/extensions/docs/server2/test_branch_utility.py | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from branch_utility import BranchUtility
class TestBranchUtility(object):
'''Mimics BranchUtility to return valid-ish data without needing omahaproxy
data.
'''
def GetBranchForChannel(self, channel_name):
return channel_name
| 30.846154 | 77 | 0.780549 |
from branch_utility import BranchUtility
class TestBranchUtility(object):
def GetBranchForChannel(self, channel_name):
return channel_name
| true | true |
1c365a9df09866636f3a6bfa4ef78be8dd8ff624 | 4,887 | py | Python | official/modeling/multitask/task_sampler.py | KiryanovKD/models | e17080247e3c9b3301680f61b8f4815c22509e7e | [
"Apache-2.0"
] | 4 | 2019-11-02T14:47:46.000Z | 2022-01-14T10:43:02.000Z | official/modeling/multitask/task_sampler.py | KiryanovKD/models | e17080247e3c9b3301680f61b8f4815c22509e7e | [
"Apache-2.0"
] | 6 | 2021-10-05T18:53:55.000Z | 2022-03-29T21:37:00.000Z | official/modeling/multitask/task_sampler.py | KiryanovKD/models | e17080247e3c9b3301680f61b8f4815c22509e7e | [
"Apache-2.0"
] | 2 | 2021-11-30T21:50:03.000Z | 2022-03-27T01:27:31.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils to sample tasks for interleaved optimization."""
import abc
from typing import Union, Dict, Text
import tensorflow as tf
from official.modeling.multitask import configs
class TaskSampler(tf.Module, metaclass=abc.ABCMeta):
"""An abstract class defining task sampling API for interleaving trainer."""
def __init__(self, task_weights: Dict[Text, Union[float, int]]):
self._task_weights = task_weights
@property
def task_weights(self):
return self._task_weights
@abc.abstractmethod
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
"""Compute cumulative distribution to sample tasks.
It calculates the cumulative distribution of the multinomial task
distribution with respect to which to be sampled against.
Args:
global_step: A tensor indicating current progess of training.
Returns:
A float tensor with shape (#(task), 1) that represents the cumulative
sampling distribution.
"""
pass
class UniformTaskSampler(TaskSampler):
"""Sample all tasks uniformly."""
def __init__(self, task_weights: Dict[Text, Union[float, int]]):
super(UniformTaskSampler, self).__init__(task_weights=task_weights)
self._uniform_cumulative = tf.math.cumsum(
tf.constant(
[1.0 / len(self._task_weights)] * len(self._task_weights),
dtype=tf.float32))
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
del global_step
return self._uniform_cumulative
class ProportionalTaskSampler(TaskSampler):
"""Sample tasks proportional to task weights."""
def __init__(self,
task_weights: Dict[Text, Union[float, int]],
alpha: float = 1.0):
super(ProportionalTaskSampler, self).__init__(task_weights=task_weights)
self._alpha = tf.cast(alpha, dtype=tf.float32)
task_weight_dict_ordered_list = tf.constant(
[weight for _, weight in self._task_weights.items()], dtype=tf.float32)
task_sizes = tf.math.pow(task_weight_dict_ordered_list, self._alpha)
task_distribution = task_sizes / tf.reduce_sum(task_sizes)
self._porportional_cumulative = tf.math.cumsum(task_distribution)
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
del global_step
return self._porportional_cumulative
class AnnealingTaskSampler(TaskSampler):
"""Sample tasks according to task weights as well as training progress.
See http://proceedings.mlr.press/v97/stickland19a/stickland19a.pdf
"""
def __init__(self,
task_weights: Dict[Text, Union[float, int]],
steps_per_epoch: int,
total_steps: int):
super(AnnealingTaskSampler, self).__init__(task_weights=task_weights)
self._steps_per_epoch = tf.cast(steps_per_epoch, dtype=tf.float32)
self._total_epochs = tf.cast(
total_steps / self._steps_per_epoch, dtype=tf.float32)
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
cur_epoch = tf.math.floor(
tf.cast(global_step, dtype=tf.float32) / self._steps_per_epoch)
alpha = 1.0 - 0.8 * (cur_epoch - 1) / (self._total_epochs - 1 + 1e-10)
task_weight_dict_ordered_list = [
weight for _, weight in self._task_weights.items()
]
task_sizes = tf.math.pow(
tf.constant(task_weight_dict_ordered_list, dtype=tf.float32),
tf.cast(alpha, dtype=tf.float32))
dynamic_task_distribution = task_sizes / tf.reduce_sum(task_sizes)
return tf.math.cumsum(dynamic_task_distribution)
def get_task_sampler(config: configs.TaskSamplingConfig,
task_weights: Dict[Text, float]) -> TaskSampler:
"""Utils to create task sampler with configuration and task weights."""
oneof_config = config.get()
if config.type == 'uniform':
return UniformTaskSampler(task_weights=task_weights)
elif config.type == 'proportional':
return ProportionalTaskSampler(
task_weights=task_weights, alpha=oneof_config.alpha)
elif config.type == 'annealing':
return AnnealingTaskSampler(
task_weights=task_weights,
steps_per_epoch=oneof_config.steps_per_epoch,
total_steps=oneof_config.total_steps)
else:
raise RuntimeError('Task sampler type not supported')
| 37.883721 | 79 | 0.727849 |
import abc
from typing import Union, Dict, Text
import tensorflow as tf
from official.modeling.multitask import configs
class TaskSampler(tf.Module, metaclass=abc.ABCMeta):
def __init__(self, task_weights: Dict[Text, Union[float, int]]):
self._task_weights = task_weights
@property
def task_weights(self):
return self._task_weights
@abc.abstractmethod
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
pass
class UniformTaskSampler(TaskSampler):
def __init__(self, task_weights: Dict[Text, Union[float, int]]):
super(UniformTaskSampler, self).__init__(task_weights=task_weights)
self._uniform_cumulative = tf.math.cumsum(
tf.constant(
[1.0 / len(self._task_weights)] * len(self._task_weights),
dtype=tf.float32))
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
del global_step
return self._uniform_cumulative
class ProportionalTaskSampler(TaskSampler):
def __init__(self,
task_weights: Dict[Text, Union[float, int]],
alpha: float = 1.0):
super(ProportionalTaskSampler, self).__init__(task_weights=task_weights)
self._alpha = tf.cast(alpha, dtype=tf.float32)
task_weight_dict_ordered_list = tf.constant(
[weight for _, weight in self._task_weights.items()], dtype=tf.float32)
task_sizes = tf.math.pow(task_weight_dict_ordered_list, self._alpha)
task_distribution = task_sizes / tf.reduce_sum(task_sizes)
self._porportional_cumulative = tf.math.cumsum(task_distribution)
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
del global_step
return self._porportional_cumulative
class AnnealingTaskSampler(TaskSampler):
def __init__(self,
task_weights: Dict[Text, Union[float, int]],
steps_per_epoch: int,
total_steps: int):
super(AnnealingTaskSampler, self).__init__(task_weights=task_weights)
self._steps_per_epoch = tf.cast(steps_per_epoch, dtype=tf.float32)
self._total_epochs = tf.cast(
total_steps / self._steps_per_epoch, dtype=tf.float32)
def task_cumulative_distribution(self, global_step: tf.Tensor) -> tf.Tensor:
cur_epoch = tf.math.floor(
tf.cast(global_step, dtype=tf.float32) / self._steps_per_epoch)
alpha = 1.0 - 0.8 * (cur_epoch - 1) / (self._total_epochs - 1 + 1e-10)
task_weight_dict_ordered_list = [
weight for _, weight in self._task_weights.items()
]
task_sizes = tf.math.pow(
tf.constant(task_weight_dict_ordered_list, dtype=tf.float32),
tf.cast(alpha, dtype=tf.float32))
dynamic_task_distribution = task_sizes / tf.reduce_sum(task_sizes)
return tf.math.cumsum(dynamic_task_distribution)
def get_task_sampler(config: configs.TaskSamplingConfig,
task_weights: Dict[Text, float]) -> TaskSampler:
oneof_config = config.get()
if config.type == 'uniform':
return UniformTaskSampler(task_weights=task_weights)
elif config.type == 'proportional':
return ProportionalTaskSampler(
task_weights=task_weights, alpha=oneof_config.alpha)
elif config.type == 'annealing':
return AnnealingTaskSampler(
task_weights=task_weights,
steps_per_epoch=oneof_config.steps_per_epoch,
total_steps=oneof_config.total_steps)
else:
raise RuntimeError('Task sampler type not supported')
| true | true |
1c365c914308d1ed9d75b4e5de173d4ff5ba4097 | 33,893 | py | Python | apps/life_sci/python/dgllife/utils/splitters.py | szha/dgl | 00efec60a0e1a250dd95fceec1d8e5209b8e01ea | [
"Apache-2.0"
] | 1 | 2021-01-02T03:34:21.000Z | 2021-01-02T03:34:21.000Z | apps/life_sci/python/dgllife/utils/splitters.py | sherry-1001/dgl | 60d2e7d3c928d43bbb18e7ab17c066451c49f649 | [
"Apache-2.0"
] | null | null | null | apps/life_sci/python/dgllife/utils/splitters.py | sherry-1001/dgl | 60d2e7d3c928d43bbb18e7ab17c066451c49f649 | [
"Apache-2.0"
] | null | null | null | """Various methods for splitting chemical datasets.
We mostly adapt them from deepchem
(https://github.com/deepchem/deepchem/blob/master/deepchem/splits/splitters.py).
"""
# pylint: disable= no-member, arguments-differ, invalid-name
# pylint: disable=E0611
from collections import defaultdict
from functools import partial
from itertools import accumulate, chain
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem.rdmolops import FastFindRings
from rdkit.Chem.Scaffolds import MurckoScaffold
import dgl.backend as F
import numpy as np
from dgl.data.utils import split_dataset, Subset
__all__ = ['ConsecutiveSplitter',
'RandomSplitter',
'MolecularWeightSplitter',
'ScaffoldSplitter',
'SingleTaskStratifiedSplitter']
def base_k_fold_split(split_method, dataset, k, log):
"""Split dataset for k-fold cross validation.
Parameters
----------
split_method : callable
Arbitrary method for splitting the dataset
into training, validation and test subsets.
dataset
We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
gives the ith datapoint.
k : int
Number of folds to use and should be no smaller than 2.
log : bool
Whether to print a message at the start of preparing each fold.
Returns
-------
all_folds : list of 2-tuples
Each element of the list represents a fold and is a 2-tuple (train_set, val_set).
"""
assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(k)
all_folds = []
frac_per_part = 1. / k
for i in range(k):
if log:
print('Processing fold {:d}/{:d}'.format(i + 1, k))
# We are reusing the code for train-validation-test split.
train_set1, val_set, train_set2 = split_method(dataset,
frac_train=i * frac_per_part,
frac_val=frac_per_part,
frac_test=1. - (i + 1) * frac_per_part)
# For cross validation, each fold consists of only a train subset and
# a validation subset.
train_set = Subset(dataset, np.concatenate(
[train_set1.indices, train_set2.indices]).astype(np.int64))
all_folds.append((train_set, val_set))
return all_folds
def train_val_test_sanity_check(frac_train, frac_val, frac_test):
"""Sanity check for train-val-test split
Ensure that the fractions of the dataset to use for training,
validation and test add up to 1.
Parameters
----------
frac_train : float
Fraction of the dataset to use for training.
frac_val : float
Fraction of the dataset to use for validation.
frac_test : float
Fraction of the dataset to use for test.
"""
total_fraction = frac_train + frac_val + frac_test
assert np.allclose(total_fraction, 1.), \
'Expect the sum of fractions for training, validation and ' \
'test to be 1, got {:.4f}'.format(total_fraction)
def indices_split(dataset, frac_train, frac_val, frac_test, indices):
"""Reorder datapoints based on the specified indices and then take consecutive
chunks as subsets.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
gives the ith datapoint.
frac_train : float
Fraction of data to use for training.
frac_val : float
Fraction of data to use for validation.
frac_test : float
Fraction of data to use for test.
indices : list or ndarray
Indices specifying the order of datapoints.
Returns
-------
list of length 3
Subsets for training, validation and test, which are all :class:`Subset` instances.
"""
frac_list = np.array([frac_train, frac_val, frac_test])
assert np.allclose(np.sum(frac_list), 1.), \
'Expect frac_list sum to 1, got {:.4f}'.format(np.sum(frac_list))
num_data = len(dataset)
lengths = (num_data * frac_list).astype(int)
lengths[-1] = num_data - np.sum(lengths[:-1])
return [Subset(dataset, list(indices[offset - length:offset]))
for offset, length in zip(accumulate(lengths), lengths)]
def count_and_log(message, i, total, log_every_n):
"""Print a message to reflect the progress of processing once a while.
Parameters
----------
message : str
Message to print.
i : int
Current index.
total : int
Total count.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed.
"""
if (log_every_n is not None) and ((i + 1) % log_every_n == 0):
print('{} {:d}/{:d}'.format(message, i + 1, total))
def prepare_mols(dataset, mols, sanitize, log_every_n=1000):
"""Prepare RDKit molecule instances.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
ith datapoint.
mols : None or list of rdkit.Chem.rdchem.Mol
None or pre-computed RDKit molecule instances. If not None, we expect a
one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.
``mols[i]`` corresponds to ``dataset.smiles[i]``.
sanitize : bool
This argument only comes into effect when ``mols`` is None and decides whether
sanitization is performed in initializing RDKit molecule instances. See
https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed. Default to 1000.
Returns
-------
mols : list of rdkit.Chem.rdchem.Mol
RDkit molecule instances where there is a one-on-one correspondence between
``dataset.smiles`` and ``mols``, i.e. ``mols[i]`` corresponds to ``dataset.smiles[i]``.
"""
if mols is not None:
# Sanity check
assert len(mols) == len(dataset), \
'Expect mols to be of the same size as that of the dataset, ' \
'got {:d} and {:d}'.format(len(mols), len(dataset))
else:
if log_every_n is not None:
print('Start initializing RDKit molecule instances...')
mols = []
for i, s in enumerate(dataset.smiles):
count_and_log('Creating RDKit molecule instance',
i, len(dataset.smiles), log_every_n)
mols.append(Chem.MolFromSmiles(s, sanitize=sanitize))
return mols
class ConsecutiveSplitter(object):
"""Split datasets with the input order.
The dataset is split without permutation, so the splitting is deterministic.
"""
@staticmethod
def train_val_test_split(dataset, frac_train=0.8, frac_val=0.1, frac_test=0.1):
"""Split the dataset into three consecutive chunks for training, validation and test.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
gives the ith datapoint.
frac_train : float
Fraction of data to use for training. By default, we set this to be 0.8, i.e.
80% of the dataset is used for training.
frac_val : float
Fraction of data to use for validation. By default, we set this to be 0.1, i.e.
10% of the dataset is used for validation.
frac_test : float
Fraction of data to use for test. By default, we set this to be 0.1, i.e.
10% of the dataset is used for test.
Returns
-------
list of length 3
Subsets for training, validation and test, which are all :class:`Subset` instances.
"""
return split_dataset(dataset, frac_list=[frac_train, frac_val, frac_test], shuffle=False)
@staticmethod
def k_fold_split(dataset, k=5, log=True):
"""Split the dataset for k-fold cross validation by taking consecutive chunks.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
gives the ith datapoint.
k : int
Number of folds to use and should be no smaller than 2. Default to be 5.
log : bool
Whether to print a message at the start of preparing each fold.
Returns
-------
list of 2-tuples
Each element of the list represents a fold and is a 2-tuple (train_set, val_set).
"""
return base_k_fold_split(ConsecutiveSplitter.train_val_test_split, dataset, k, log)
class RandomSplitter(object):
"""Randomly reorder datasets and then split them.
The dataset is split with permutation and the splitting is hence random.
"""
@staticmethod
def train_val_test_split(dataset, frac_train=0.8, frac_val=0.1,
frac_test=0.1, random_state=None):
"""Randomly permute the dataset and then split it into
three consecutive chunks for training, validation and test.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
gives the ith datapoint.
frac_train : float
Fraction of data to use for training. By default, we set this to be 0.8, i.e.
80% of the dataset is used for training.
frac_val : float
Fraction of data to use for validation. By default, we set this to be 0.1, i.e.
10% of the dataset is used for validation.
frac_test : float
Fraction of data to use for test. By default, we set this to be 0.1, i.e.
10% of the dataset is used for test.
random_state : None, int or array_like, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an array
(or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from /dev/urandom
(or the Windows analogue) if available or seed from the clock otherwise.
Returns
-------
list of length 3
Subsets for training, validation and test.
"""
return split_dataset(dataset, frac_list=[frac_train, frac_val, frac_test],
shuffle=True, random_state=random_state)
@staticmethod
def k_fold_split(dataset, k=5, random_state=None, log=True):
"""Randomly permute the dataset and then split it
for k-fold cross validation by taking consecutive chunks.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
gives the ith datapoint.
k : int
Number of folds to use and should be no smaller than 2. Default to be 5.
random_state : None, int or array_like, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an array
(or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from /dev/urandom
(or the Windows analogue) if available or seed from the clock otherwise.
log : bool
Whether to print a message at the start of preparing each fold. Default to True.
Returns
-------
list of 2-tuples
Each element of the list represents a fold and is a 2-tuple (train_set, val_set).
"""
# Permute the dataset only once so that each datapoint
# will appear once in exactly one fold.
indices = np.random.RandomState(seed=random_state).permutation(len(dataset))
return base_k_fold_split(partial(indices_split, indices=indices), dataset, k, log)
# pylint: disable=I1101
class MolecularWeightSplitter(object):
"""Sort molecules based on their weights and then split them."""
@staticmethod
def molecular_weight_indices(molecules, log_every_n):
"""Reorder molecules based on molecular weights.
Parameters
----------
molecules : list of rdkit.Chem.rdchem.Mol
Pre-computed RDKit molecule instances. We expect a one-on-one
correspondence between ``dataset.smiles`` and ``mols``, i.e.
``mols[i]`` corresponds to ``dataset.smiles[i]``.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed.
Returns
-------
indices : list or ndarray
Indices specifying the order of datapoints, which are basically
argsort of the molecular weights.
"""
if log_every_n is not None:
print('Start computing molecular weights.')
mws = []
for i, mol in enumerate(molecules):
count_and_log('Computing molecular weight for compound',
i, len(molecules), log_every_n)
mws.append(rdMolDescriptors.CalcExactMolWt(mol))
return np.argsort(mws)
@staticmethod
def train_val_test_split(dataset, mols=None, sanitize=True, frac_train=0.8,
frac_val=0.1, frac_test=0.1, log_every_n=1000):
"""Sort molecules based on their weights and then split them into
three consecutive chunks for training, validation and test.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
ith datapoint.
mols : None or list of rdkit.Chem.rdchem.Mol
None or pre-computed RDKit molecule instances. If not None, we expect a
one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.
``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.
sanitize : bool
This argument only comes into effect when ``mols`` is None and decides whether
sanitization is performed in initializing RDKit molecule instances. See
https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
Default to be True.
frac_train : float
Fraction of data to use for training. By default, we set this to be 0.8, i.e.
80% of the dataset is used for training.
frac_val : float
Fraction of data to use for validation. By default, we set this to be 0.1, i.e.
10% of the dataset is used for validation.
frac_test : float
Fraction of data to use for test. By default, we set this to be 0.1, i.e.
10% of the dataset is used for test.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed. Default to 1000.
Returns
-------
list of length 3
Subsets for training, validation and test, which are all :class:`Subset` instances.
"""
# Perform sanity check first as molecule instance initialization and descriptor
# computation can take a long time.
train_val_test_sanity_check(frac_train, frac_val, frac_test)
molecules = prepare_mols(dataset, mols, sanitize, log_every_n)
sorted_indices = MolecularWeightSplitter.molecular_weight_indices(molecules, log_every_n)
return indices_split(dataset, frac_train, frac_val, frac_test, sorted_indices)
@staticmethod
def k_fold_split(dataset, mols=None, sanitize=True, k=5, log_every_n=1000):
"""Sort molecules based on their weights and then split them
for k-fold cross validation by taking consecutive chunks.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
ith datapoint.
mols : None or list of rdkit.Chem.rdchem.Mol
None or pre-computed RDKit molecule instances. If not None, we expect a
one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.
``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.
sanitize : bool
This argument only comes into effect when ``mols`` is None and decides whether
sanitization is performed in initializing RDKit molecule instances. See
https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
Default to be True.
k : int
Number of folds to use and should be no smaller than 2. Default to be 5.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed. Default to 1000.
Returns
-------
list of 2-tuples
Each element of the list represents a fold and is a 2-tuple (train_set, val_set).
"""
molecules = prepare_mols(dataset, mols, sanitize, log_every_n)
sorted_indices = MolecularWeightSplitter.molecular_weight_indices(molecules, log_every_n)
return base_k_fold_split(partial(indices_split, indices=sorted_indices), dataset, k,
log=(log_every_n is not None))
# pylint: disable=W0702
class ScaffoldSplitter(object):
"""Group molecules based on their Bemis-Murcko scaffolds and then split the groups.
Group molecules so that all molecules in a group have a same scaffold (see reference).
The dataset is then split at the level of groups.
References
----------
Bemis, G. W.; Murcko, M. A. “The Properties of Known Drugs.
1. Molecular Frameworks.” J. Med. Chem. 39:2887-93 (1996).
"""
@staticmethod
def get_ordered_scaffold_sets(molecules, include_chirality, log_every_n):
"""Group molecules based on their Bemis-Murcko scaffolds and
order these groups based on their sizes.
The order is decided by comparing the size of groups, where groups with a larger size
are placed before the ones with a smaller size.
Parameters
----------
molecules : list of rdkit.Chem.rdchem.Mol
Pre-computed RDKit molecule instances. We expect a one-on-one
correspondence between ``dataset.smiles`` and ``mols``, i.e.
``mols[i]`` corresponds to ``dataset.smiles[i]``.
include_chirality : bool
Whether to consider chirality in computing scaffolds.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed.
Returns
-------
scaffold_sets : list
Each element of the list is a list of int,
representing the indices of compounds with a same scaffold.
"""
if log_every_n is not None:
print('Start computing Bemis-Murcko scaffolds.')
scaffolds = defaultdict(list)
for i, mol in enumerate(molecules):
count_and_log('Computing Bemis-Murcko for compound',
i, len(molecules), log_every_n)
# For mols that have not been sanitized, we need to compute their ring information
try:
FastFindRings(mol)
mol_scaffold = MurckoScaffold.MurckoScaffoldSmiles(
mol=mol, includeChirality=include_chirality)
# Group molecules that have the same scaffold
scaffolds[mol_scaffold].append(i)
except:
print('Failed to compute the scaffold for molecule {:d} '
'and it will be excluded.'.format(i + 1))
# Order groups of molecules by first comparing the size of groups
# and then the index of the first compound in the group.
scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
return scaffold_sets
@staticmethod
def train_val_test_split(dataset, mols=None, sanitize=True, include_chirality=False,
frac_train=0.8, frac_val=0.1, frac_test=0.1, log_every_n=1000):
"""Split the dataset into training, validation and test set based on molecular scaffolds.
This spliting method ensures that molecules with a same scaffold will be collectively
in only one of the training, validation or test set. As a result, the fraction
of dataset to use for training and validation tend to be smaller than ``frac_train``
and ``frac_val``, while the fraction of dataset to use for test tends to be larger
than ``frac_test``.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
ith datapoint.
mols : None or list of rdkit.Chem.rdchem.Mol
None or pre-computed RDKit molecule instances. If not None, we expect a
one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.
``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.
sanitize : bool
This argument only comes into effect when ``mols`` is None and decides whether
sanitization is performed in initializing RDKit molecule instances. See
https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
Default to True.
include_chirality : bool
Whether to consider chirality in computing scaffolds. Default to False.
frac_train : float
Fraction of data to use for training. By default, we set this to be 0.8, i.e.
80% of the dataset is used for training.
frac_val : float
Fraction of data to use for validation. By default, we set this to be 0.1, i.e.
10% of the dataset is used for validation.
frac_test : float
Fraction of data to use for test. By default, we set this to be 0.1, i.e.
10% of the dataset is used for test.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed. Default to 1000.
Returns
-------
list of length 3
Subsets for training, validation and test, which are all :class:`Subset` instances.
"""
# Perform sanity check first as molecule related computation can take a long time.
train_val_test_sanity_check(frac_train, frac_val, frac_test)
molecules = prepare_mols(dataset, mols, sanitize)
scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(
molecules, include_chirality, log_every_n)
train_indices, val_indices, test_indices = [], [], []
train_cutoff = int(frac_train * len(molecules))
val_cutoff = int((frac_train + frac_val) * len(molecules))
for group_indices in scaffold_sets:
if len(train_indices) + len(group_indices) > train_cutoff:
if len(train_indices) + len(val_indices) + len(group_indices) > val_cutoff:
test_indices.extend(group_indices)
else:
val_indices.extend(group_indices)
else:
train_indices.extend(group_indices)
return [Subset(dataset, train_indices),
Subset(dataset, val_indices),
Subset(dataset, test_indices)]
@staticmethod
def k_fold_split(dataset, mols=None, sanitize=True,
include_chirality=False, k=5, log_every_n=1000):
"""Group molecules based on their scaffolds and sort groups based on their sizes.
The groups are then split for k-fold cross validation.
Same as usual k-fold splitting methods, each molecule will appear only once
in the validation set among all folds. In addition, this method ensures that
molecules with a same scaffold will be collectively in either the training
set or the validation set for each fold.
Note that the folds can be highly imbalanced depending on the
scaffold distribution in the dataset.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
ith datapoint.
mols : None or list of rdkit.Chem.rdchem.Mol
None or pre-computed RDKit molecule instances. If not None, we expect a
one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.
``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.
sanitize : bool
This argument only comes into effect when ``mols`` is None and decides whether
sanitization is performed in initializing RDKit molecule instances. See
https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
Default to True.
include_chirality : bool
Whether to consider chirality in computing scaffolds. Default to False.
k : int
Number of folds to use and should be no smaller than 2. Default to be 5.
log_every_n : None or int
Molecule related computation can take a long time for a large dataset and we want
to learn the progress of processing. This can be done by printing a message whenever
a batch of ``log_every_n`` molecules have been processed. If None, no messages will
be printed. Default to 1000.
Returns
-------
list of 2-tuples
Each element of the list represents a fold and is a 2-tuple (train_set, val_set).
"""
assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(k)
molecules = prepare_mols(dataset, mols, sanitize)
scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(
molecules, include_chirality, log_every_n)
# k buckets that form a relatively balanced partition of the dataset
index_buckets = [[] for _ in range(k)]
for group_indices in scaffold_sets:
bucket_chosen = int(np.argmin([len(bucket) for bucket in index_buckets]))
index_buckets[bucket_chosen].extend(group_indices)
all_folds = []
for i in range(k):
if log_every_n is not None:
print('Processing fold {:d}/{:d}'.format(i + 1, k))
train_indices = list(chain.from_iterable(index_buckets[:i] + index_buckets[i + 1:]))
val_indices = index_buckets[i]
all_folds.append((Subset(dataset, train_indices), Subset(dataset, val_indices)))
return all_folds
class SingleTaskStratifiedSplitter(object):
"""Splits the dataset by stratification on a single task.
We sort the molecules based on their label values for a task and then repeatedly
take buckets of datapoints to augment the training, validation and test subsets.
"""
@staticmethod
def train_val_test_split(dataset, labels, task_id, frac_train=0.8, frac_val=0.1,
frac_test=0.1, bucket_size=10, random_state=None):
"""Split the dataset into training, validation and test subsets as stated above.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
ith datapoint.
labels : tensor of shape (N, T)
Dataset labels all tasks. N for the number of datapoints and T for the number
of tasks.
task_id : int
Index for the task.
frac_train : float
Fraction of data to use for training. By default, we set this to be 0.8, i.e.
80% of the dataset is used for training.
frac_val : float
Fraction of data to use for validation. By default, we set this to be 0.1, i.e.
10% of the dataset is used for validation.
frac_test : float
Fraction of data to use for test. By default, we set this to be 0.1, i.e.
10% of the dataset is used for test.
bucket_size : int
Size of bucket of datapoints. Default to 10.
random_state : None, int or array_like, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an array
(or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from /dev/urandom
(or the Windows analogue) if available or seed from the clock otherwise.
Returns
-------
list of length 3
Subsets for training, validation and test, which are all :class:`Subset` instances.
"""
train_val_test_sanity_check(frac_train, frac_val, frac_test)
if random_state is not None:
np.random.seed(random_state)
if not isinstance(labels, np.ndarray):
labels = F.asnumpy(labels)
task_labels = labels[:, task_id]
sorted_indices = np.argsort(task_labels)
train_bucket_cutoff = int(np.round(frac_train * bucket_size))
val_bucket_cutoff = int(np.round(frac_val * bucket_size)) + train_bucket_cutoff
train_indices, val_indices, test_indices = [], [], []
while sorted_indices.shape[0] >= bucket_size:
current_batch, sorted_indices = np.split(sorted_indices, [bucket_size])
shuffled = np.random.permutation(range(bucket_size))
train_indices.extend(
current_batch[shuffled[:train_bucket_cutoff]].tolist())
val_indices.extend(
current_batch[shuffled[train_bucket_cutoff:val_bucket_cutoff]].tolist())
test_indices.extend(
current_batch[shuffled[val_bucket_cutoff:]].tolist())
# Place rest samples in the training set.
train_indices.extend(sorted_indices.tolist())
return [Subset(dataset, train_indices),
Subset(dataset, val_indices),
Subset(dataset, test_indices)]
@staticmethod
def k_fold_split(dataset, labels, task_id, k=5, log=True):
"""Sort molecules based on their label values for a task and then split them
for k-fold cross validation by taking consecutive chunks.
Parameters
----------
dataset
We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
ith datapoint.
labels : tensor of shape (N, T)
Dataset labels all tasks. N for the number of datapoints and T for the number
of tasks.
task_id : int
Index for the task.
k : int
Number of folds to use and should be no smaller than 2. Default to be 5.
log : bool
Whether to print a message at the start of preparing each fold.
Returns
-------
list of 2-tuples
Each element of the list represents a fold and is a 2-tuple (train_set, val_set).
"""
if not isinstance(labels, np.ndarray):
labels = F.asnumpy(labels)
task_labels = labels[:, task_id]
sorted_indices = np.argsort(task_labels).tolist()
return base_k_fold_split(partial(indices_split, indices=sorted_indices), dataset, k, log)
| 45.432976 | 97 | 0.632609 |
from collections import defaultdict
from functools import partial
from itertools import accumulate, chain
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem.rdmolops import FastFindRings
from rdkit.Chem.Scaffolds import MurckoScaffold
import dgl.backend as F
import numpy as np
from dgl.data.utils import split_dataset, Subset
__all__ = ['ConsecutiveSplitter',
'RandomSplitter',
'MolecularWeightSplitter',
'ScaffoldSplitter',
'SingleTaskStratifiedSplitter']
def base_k_fold_split(split_method, dataset, k, log):
assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(k)
all_folds = []
frac_per_part = 1. / k
for i in range(k):
if log:
print('Processing fold {:d}/{:d}'.format(i + 1, k))
train_set1, val_set, train_set2 = split_method(dataset,
frac_train=i * frac_per_part,
frac_val=frac_per_part,
frac_test=1. - (i + 1) * frac_per_part)
train_set = Subset(dataset, np.concatenate(
[train_set1.indices, train_set2.indices]).astype(np.int64))
all_folds.append((train_set, val_set))
return all_folds
def train_val_test_sanity_check(frac_train, frac_val, frac_test):
total_fraction = frac_train + frac_val + frac_test
assert np.allclose(total_fraction, 1.), \
'Expect the sum of fractions for training, validation and ' \
'test to be 1, got {:.4f}'.format(total_fraction)
def indices_split(dataset, frac_train, frac_val, frac_test, indices):
frac_list = np.array([frac_train, frac_val, frac_test])
assert np.allclose(np.sum(frac_list), 1.), \
'Expect frac_list sum to 1, got {:.4f}'.format(np.sum(frac_list))
num_data = len(dataset)
lengths = (num_data * frac_list).astype(int)
lengths[-1] = num_data - np.sum(lengths[:-1])
return [Subset(dataset, list(indices[offset - length:offset]))
for offset, length in zip(accumulate(lengths), lengths)]
def count_and_log(message, i, total, log_every_n):
if (log_every_n is not None) and ((i + 1) % log_every_n == 0):
print('{} {:d}/{:d}'.format(message, i + 1, total))
def prepare_mols(dataset, mols, sanitize, log_every_n=1000):
if mols is not None:
assert len(mols) == len(dataset), \
'Expect mols to be of the same size as that of the dataset, ' \
'got {:d} and {:d}'.format(len(mols), len(dataset))
else:
if log_every_n is not None:
print('Start initializing RDKit molecule instances...')
mols = []
for i, s in enumerate(dataset.smiles):
count_and_log('Creating RDKit molecule instance',
i, len(dataset.smiles), log_every_n)
mols.append(Chem.MolFromSmiles(s, sanitize=sanitize))
return mols
class ConsecutiveSplitter(object):
@staticmethod
def train_val_test_split(dataset, frac_train=0.8, frac_val=0.1, frac_test=0.1):
return split_dataset(dataset, frac_list=[frac_train, frac_val, frac_test], shuffle=False)
@staticmethod
def k_fold_split(dataset, k=5, log=True):
return base_k_fold_split(ConsecutiveSplitter.train_val_test_split, dataset, k, log)
class RandomSplitter(object):
@staticmethod
def train_val_test_split(dataset, frac_train=0.8, frac_val=0.1,
frac_test=0.1, random_state=None):
return split_dataset(dataset, frac_list=[frac_train, frac_val, frac_test],
shuffle=True, random_state=random_state)
@staticmethod
def k_fold_split(dataset, k=5, random_state=None, log=True):
indices = np.random.RandomState(seed=random_state).permutation(len(dataset))
return base_k_fold_split(partial(indices_split, indices=indices), dataset, k, log)
class MolecularWeightSplitter(object):
@staticmethod
def molecular_weight_indices(molecules, log_every_n):
if log_every_n is not None:
print('Start computing molecular weights.')
mws = []
for i, mol in enumerate(molecules):
count_and_log('Computing molecular weight for compound',
i, len(molecules), log_every_n)
mws.append(rdMolDescriptors.CalcExactMolWt(mol))
return np.argsort(mws)
@staticmethod
def train_val_test_split(dataset, mols=None, sanitize=True, frac_train=0.8,
frac_val=0.1, frac_test=0.1, log_every_n=1000):
train_val_test_sanity_check(frac_train, frac_val, frac_test)
molecules = prepare_mols(dataset, mols, sanitize, log_every_n)
sorted_indices = MolecularWeightSplitter.molecular_weight_indices(molecules, log_every_n)
return indices_split(dataset, frac_train, frac_val, frac_test, sorted_indices)
@staticmethod
def k_fold_split(dataset, mols=None, sanitize=True, k=5, log_every_n=1000):
molecules = prepare_mols(dataset, mols, sanitize, log_every_n)
sorted_indices = MolecularWeightSplitter.molecular_weight_indices(molecules, log_every_n)
return base_k_fold_split(partial(indices_split, indices=sorted_indices), dataset, k,
log=(log_every_n is not None))
class ScaffoldSplitter(object):
@staticmethod
def get_ordered_scaffold_sets(molecules, include_chirality, log_every_n):
if log_every_n is not None:
print('Start computing Bemis-Murcko scaffolds.')
scaffolds = defaultdict(list)
for i, mol in enumerate(molecules):
count_and_log('Computing Bemis-Murcko for compound',
i, len(molecules), log_every_n)
try:
FastFindRings(mol)
mol_scaffold = MurckoScaffold.MurckoScaffoldSmiles(
mol=mol, includeChirality=include_chirality)
scaffolds[mol_scaffold].append(i)
except:
print('Failed to compute the scaffold for molecule {:d} '
'and it will be excluded.'.format(i + 1))
scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
return scaffold_sets
@staticmethod
def train_val_test_split(dataset, mols=None, sanitize=True, include_chirality=False,
frac_train=0.8, frac_val=0.1, frac_test=0.1, log_every_n=1000):
train_val_test_sanity_check(frac_train, frac_val, frac_test)
molecules = prepare_mols(dataset, mols, sanitize)
scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(
molecules, include_chirality, log_every_n)
train_indices, val_indices, test_indices = [], [], []
train_cutoff = int(frac_train * len(molecules))
val_cutoff = int((frac_train + frac_val) * len(molecules))
for group_indices in scaffold_sets:
if len(train_indices) + len(group_indices) > train_cutoff:
if len(train_indices) + len(val_indices) + len(group_indices) > val_cutoff:
test_indices.extend(group_indices)
else:
val_indices.extend(group_indices)
else:
train_indices.extend(group_indices)
return [Subset(dataset, train_indices),
Subset(dataset, val_indices),
Subset(dataset, test_indices)]
@staticmethod
def k_fold_split(dataset, mols=None, sanitize=True,
include_chirality=False, k=5, log_every_n=1000):
assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(k)
molecules = prepare_mols(dataset, mols, sanitize)
scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(
molecules, include_chirality, log_every_n)
index_buckets = [[] for _ in range(k)]
for group_indices in scaffold_sets:
bucket_chosen = int(np.argmin([len(bucket) for bucket in index_buckets]))
index_buckets[bucket_chosen].extend(group_indices)
all_folds = []
for i in range(k):
if log_every_n is not None:
print('Processing fold {:d}/{:d}'.format(i + 1, k))
train_indices = list(chain.from_iterable(index_buckets[:i] + index_buckets[i + 1:]))
val_indices = index_buckets[i]
all_folds.append((Subset(dataset, train_indices), Subset(dataset, val_indices)))
return all_folds
class SingleTaskStratifiedSplitter(object):
@staticmethod
def train_val_test_split(dataset, labels, task_id, frac_train=0.8, frac_val=0.1,
frac_test=0.1, bucket_size=10, random_state=None):
train_val_test_sanity_check(frac_train, frac_val, frac_test)
if random_state is not None:
np.random.seed(random_state)
if not isinstance(labels, np.ndarray):
labels = F.asnumpy(labels)
task_labels = labels[:, task_id]
sorted_indices = np.argsort(task_labels)
train_bucket_cutoff = int(np.round(frac_train * bucket_size))
val_bucket_cutoff = int(np.round(frac_val * bucket_size)) + train_bucket_cutoff
train_indices, val_indices, test_indices = [], [], []
while sorted_indices.shape[0] >= bucket_size:
current_batch, sorted_indices = np.split(sorted_indices, [bucket_size])
shuffled = np.random.permutation(range(bucket_size))
train_indices.extend(
current_batch[shuffled[:train_bucket_cutoff]].tolist())
val_indices.extend(
current_batch[shuffled[train_bucket_cutoff:val_bucket_cutoff]].tolist())
test_indices.extend(
current_batch[shuffled[val_bucket_cutoff:]].tolist())
train_indices.extend(sorted_indices.tolist())
return [Subset(dataset, train_indices),
Subset(dataset, val_indices),
Subset(dataset, test_indices)]
@staticmethod
def k_fold_split(dataset, labels, task_id, k=5, log=True):
if not isinstance(labels, np.ndarray):
labels = F.asnumpy(labels)
task_labels = labels[:, task_id]
sorted_indices = np.argsort(task_labels).tolist()
return base_k_fold_split(partial(indices_split, indices=sorted_indices), dataset, k, log)
| true | true |
1c365e88faf97a6f3ac4c02769055545fd519d4d | 13,628 | py | Python | src/train-random-forest.py | peferso/pegaso-training | e1c99be63b58053d0de7f6a6e392bf08c42c7337 | [
"MIT"
] | null | null | null | src/train-random-forest.py | peferso/pegaso-training | e1c99be63b58053d0de7f6a6e392bf08c42c7337 | [
"MIT"
] | null | null | null | src/train-random-forest.py | peferso/pegaso-training | e1c99be63b58053d0de7f6a6e392bf08c42c7337 | [
"MIT"
] | null | null | null | import os
import pymysql
import datetime
import pandas as pd
import numpy as np
import time
import logging
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import joblib
from multiprocessing import Pool
import pickle
from random import randint
def fetch_database_data():
time_start = time.time()
print('Start')
connection = pymysql.connect(host=os.environ['DBHOST'],
user=os.environ['DBUSER'],
passwd=os.environ['DBPASS'],
db="pegaso_db",
charset='utf8')
sql_query = pd.read_sql_query("""SELECT
brand, LTRIM(model), price_c, kilometers, power,
doors, professional_vendor, automatic_gearbox, year, batch_ts
FROM
raw_data
WHERE
brand IN (SELECT nb.brand FROM brands_count nb WHERE nb.num_cars>1000)
;""", connection)
dfrd = pd.DataFrame(sql_query,
columns=['brand', # One-hot
'model', # One-hot
'price_c',
'kilometers',
'power',
'doors',
'professional_vendor', # One-hot
'automatic_gearbox', # One-hot
'year',
'batch_ts'
])
time_end = time.time()
print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
return dfrd
def build_features(df):
time_start = time.time()
print('Start')
print('Compute new variable \'years\'...')
l_years = []
for index, row in df.iterrows():
years = row['batch_ts'].year - int(row['year'])
l_years.append(years)
df['years'] = l_years
print('Compute new variable \'years\'. Done.')
print('Dropping useless columns...')
df = df.drop('batch_ts', axis=1)
df = df.drop('year', axis=1)
df = df.drop('professional_vendor', axis=1)
df = df.drop('automatic_gearbox', axis=1)
df = df.drop('model', axis=1)
print('Dropping useless columns. Done.')
print('Dropping rows with \'nans\'...')
df = df.dropna()
print('Dropping rows with \'nans\'. Done.')
l_avprice = []
print('Getting average price of each car based on brand...')
t1 = time.time()
irow = 0
for index, row in df.iterrows():
t2 = time.time()
brand = row['brand']
avprice = 1 # np.mean(df[df['brand'] == brand]['price_c'])
l_avprice.append(avprice)
if t2 - t1 > 10:
print(' ' + str(index) + ' rows processed. ' +
str(round(t2 - time_start, 2)) + ' seconds elapsed - ' +
str(round((df.shape[0] - irow) / (index - irow) * (t2 - t1), 2)) +
' seconds to finish...')
t1 = time.time()
print('Getting average price of each car based on brand. Done.')
df_baseline = pd.DataFrame(data={'av_price': l_avprice, 'price_c': df['price_c']})
# Shuffle rows and keep apart a set to finally evaluate accuracy
df.sample(frac=1).reset_index(drop=True)
# One-hot encoding TO TEST https://towardsdatascience.com/random-forest-in-python-24d0893d51c0
features = pd.get_dummies(df)
time_end = time.time()
print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
return features, df_baseline
def convert_to_arrays(features):
# Labels are the values we want to predict
labels = np.array(features['price_c']) # Remove the labels from the features
# axis 1 refers to the columns
features = features.drop('price_c', axis=1) # Saving feature names for later use
feature_list = list(features.columns) # Convert to numpy array
features = np.array(features)
return feature_list, features, labels
def initial_checks(data_folder):
time_start = time.time()
print('Start')
if not os.path.exists(data_folder):
logging.warning('Folder ' + data_folder + 'does not exist: creating...')
os.makedirs(data_folder)
else:
print('Folder \'' + data_folder + '\' exists: not creating.')
print('Folder \'' + data_folder + '\' contains the following files:')
ic = 0
for i in os.listdir(data_folder):
ic += 1
print('File ' + str(ic) + ': \'' + str(i) + '\'')
time_end = time.time()
print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
def write_report(rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms, report_file):
dfreport = pd.DataFrame(list(zip(rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms)),
columns=['estimators', 'max_features', 'average_accuracy', 'average_mape', 'folds',
'train_time'])
dfreport.to_csv(report_file, index=None, header=True)
class Model:
def __init__(self, features, labels, id):
logging.basicConfig(
format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s",
level=logging.INFO)
# Hyperparameters
self.n_estimators = 5
self.random_state = None
self.max_features = 0.75
self.criterion = 'squared_error'
# Input data
self.features = features
self.labels = labels
# train set data
self.train_features = None
self.train_labels = None
self.train_indx = None
# test set data
self.test_features = None
self.test_labels = None
self.test_indx = None
self.id = id
def split_train_and_test_sets(self):
random_state = randint(0, 42)
self.train_features, self.test_features, self.train_labels, self.test_labels, self.train_indx, self.test_indx = \
train_test_split(self.features,
self.labels,
np.arange(self.features.shape[0]),
test_size=0.20,
random_state=random_state)
def set_hyperparameters(self, n_estimators, random_state, max_features, criterion):
self.n_estimators = n_estimators
self.random_state = random_state
self.max_features = max_features
self.criterion = criterion
def train_a_random_forest(self):
self.split_train_and_test_sets()
rf = RandomForestRegressor(n_estimators=self.n_estimators,
criterion=self.criterion,
max_features=self.max_features,
random_state=self.random_state)
rf.fit(self.train_features, self.train_labels)
predictions = rf.predict(self.test_features)
rf = None
mape = round(np.mean(abs((predictions - self.test_labels) / self.test_labels * 100.0)), 2)
return mape
def cross_validation_training(model):
return model.train_a_random_forest()
# Variables
THIS_SCRIPT_PATH = os.environ['PEGASO_TRAIN_DIR']
execution_timestamp = datetime.datetime.now()
model_folder = 'models'
model_file = model_folder + '/rf_' + str(execution_timestamp).replace(':', '-').replace('.', '').replace(' ', '_')
report_file = model_file + '.csv'
initial_checks(model_folder)
df = fetch_database_data()
features, df_baseline = build_features(df)
feature_list, features, labels = convert_to_arrays(features)
f = open(model_file + '-feature_list.txt', 'w')
s1 = '\n'.join(feature_list)
f.write(s1)
f.close()
f = open(model_file + '-feature_list.list', 'wb')
pickle.dump(feature_list, f)
f.close()
# Split the data into features + evaluation features. The latter will not be used in training nor hyperparameters tuning
features, eval_features, labels, eval_labels, indx, eval_indx = train_test_split(features, labels,
np.arange(
features.shape[
0]),
test_size=0.30,
random_state=42)
print(' * Size of features set: ' + str(features.shape[0]))
print(' * Size of evaluation set: ' + str(eval_features.shape[0]))
time_start = time.time()
print('Computing baseline mean absolute error (MAE) and mean absolute percentage error (MAPE) on evaluation set...')
it = 0
baseline_mae = 0.0
baseline_mape = 0.0
for i in eval_indx:
baseline_mae += abs(df_baseline.iloc[i, 0] - df_baseline.iloc[i, 1]) / df_baseline.shape[0]
baseline_mape += abs(df_baseline.iloc[i, 0] / df_baseline.iloc[i, 1] - 1.0) * 100.0 / df_baseline.shape[0]
it += 1
print('Computing baseline mean absolute error (MAE) and mean absolute percentage error (MAPE) on evaluation set. Done.')
print(' * baseline Mean Absolute Error (MAE): ' + str(round(baseline_mae, 2)) + ' Euros.')
print(' * baseline Mean Absolute Percentage Error (MAPE): ' + str(round(baseline_mape, 2)) + ' %.')
print(' * baseline Accuracy: ' + str(round(100 - baseline_mape, 2)) + ' %.')
n_est_list = range(1, 501, 1)
max_features_list = [x / 100.0 for x in range(10, 105, 5)]
criterion = 'squared_error'
random_state = None
mape_min = 100.0
times, rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms = ([] for i in range(7))
interations_remaining = len(n_est_list) * len(max_features_list)
folds = 16
cpu_cores = 4
models = []
for i in range(1, folds + 1):
model = Model(features, labels, i)
models.append(model)
print('Computing grid of parameters:')
f = open(model_file + '-grid_cross_val_data.csv', 'w')
print('n_estimators', 'max_features', *['mape_fold' + str(i) for i in range(1, folds + 1)]
, 'average_mape', 'stderr_mape', sep=',', end='\n', file=f)
f.close()
for n_estimators in n_est_list:
for max_features in max_features_list:
#print('\tn_estimators:' + str(n_estimators))
#print('\tmax_features:' + str(max_features))
#print('\tCross validating over ' + str(folds) + '-folds...')
#print('\tFold computations parallelized over ' + str(cpu_cores) + ' cores...')
#print('\t\tMultiprocessing begins...')
ti = time.time()
for model in models:
model.set_hyperparameters(n_estimators, random_state, max_features, criterion)
p = Pool(processes=cpu_cores)
result = p.map(cross_validation_training, models)
p.close()
p.join()
tf = time.time()
#print('\t\tMultiprocessing ends.')
#print('\t\t', result)
#print('\tCross validation finished.')
#print('\tElapsed:' + str(tf - ti))
mape = np.average(np.array(result))
mape_var = np.std(np.array(result))
f = open(model_file + '-grid_cross_val_data.csv', 'a')
print(n_estimators, max_features, *result, mape, mape_var, sep=',', end='\n', file=f)
f.close()
if mape < mape_min:
mape_min = mape
n_estimators_min = n_estimators
max_features_min = max_features
rep_est.append(n_estimators)
rep_mft.append(max_features)
mape_list.append(round(mape, 4))
prec_list.append(round(100.0 - mape, 4))
folds_list.append(folds)
rep_tms.append(tf - ti)
print('\tacc.', round(100.0 - mape, 2), n_estimators, max_features,
' - mape ', mape, mape_var,
' --- max acc.', round(100.0 - mape_min, 2), n_estimators_min, max_features_min)
#print('\tMinimum average mape accross folds found: ' + str(mape_min))
#print('\t max. accuracy: ' + str(100.0 - mape_min))
#print('\t n_estimators: ' + str(n_estimators_min))
#print('\t max_features: ' + str(max_features_min))
#
write_report(rep_est, rep_mft, mape_list, prec_list, folds_list, rep_tms, report_file)
print('\nTraining the best model.\n')
rf = RandomForestRegressor(n_estimators=n_estimators_min, criterion=criterion, max_features=max_features_min, random_state=random_state)
print('Training begins...')
time_start = time.time()
rf.fit(self.train_features, self.train_labels)
time_end = time.time()
print('Training ends. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
print('Predicting evaluation data...')
predictions = rf.predict(eval_features)
print('Predicting test data. Done.')
print('Computing MAE and MAPE on test set...')
mae = round(np.mean(abs(predictions - eval_labels)), 2)
mape = round(np.mean(abs((predictions - eval_labels) / eval_labels * 100.0)), 2)
print('Computing MAE and MAPE on test set. Done.')
print(' * Mean Absolute Error (MAE): ' + str(round(mae, 2)) + ' Euros.')
print(' * Mean Absolute Percentage Error (MAPE): ' + str(round(mape, 2)) + ' %.')
print(' * Accuracy: ' + str(round(100 - mape, 2)) + ' %.')
print('Export the model...')
joblib.dump(rf, model_file + ".joblib", compress=0)
print('Export the model. Done.')
| 40.802395 | 136 | 0.58211 | import os
import pymysql
import datetime
import pandas as pd
import numpy as np
import time
import logging
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import joblib
from multiprocessing import Pool
import pickle
from random import randint
def fetch_database_data():
time_start = time.time()
print('Start')
connection = pymysql.connect(host=os.environ['DBHOST'],
user=os.environ['DBUSER'],
passwd=os.environ['DBPASS'],
db="pegaso_db",
charset='utf8')
sql_query = pd.read_sql_query("""SELECT
brand, LTRIM(model), price_c, kilometers, power,
doors, professional_vendor, automatic_gearbox, year, batch_ts
FROM
raw_data
WHERE
brand IN (SELECT nb.brand FROM brands_count nb WHERE nb.num_cars>1000)
;""", connection)
dfrd = pd.DataFrame(sql_query,
columns=['brand',
'model',
'price_c',
'kilometers',
'power',
'doors',
'professional_vendor',
'automatic_gearbox',
'year',
'batch_ts'
])
time_end = time.time()
print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
return dfrd
def build_features(df):
time_start = time.time()
print('Start')
print('Compute new variable \'years\'...')
l_years = []
for index, row in df.iterrows():
years = row['batch_ts'].year - int(row['year'])
l_years.append(years)
df['years'] = l_years
print('Compute new variable \'years\'. Done.')
print('Dropping useless columns...')
df = df.drop('batch_ts', axis=1)
df = df.drop('year', axis=1)
df = df.drop('professional_vendor', axis=1)
df = df.drop('automatic_gearbox', axis=1)
df = df.drop('model', axis=1)
print('Dropping useless columns. Done.')
print('Dropping rows with \'nans\'...')
df = df.dropna()
print('Dropping rows with \'nans\'. Done.')
l_avprice = []
print('Getting average price of each car based on brand...')
t1 = time.time()
irow = 0
for index, row in df.iterrows():
t2 = time.time()
brand = row['brand']
avprice = 1
l_avprice.append(avprice)
if t2 - t1 > 10:
print(' ' + str(index) + ' rows processed. ' +
str(round(t2 - time_start, 2)) + ' seconds elapsed - ' +
str(round((df.shape[0] - irow) / (index - irow) * (t2 - t1), 2)) +
' seconds to finish...')
t1 = time.time()
print('Getting average price of each car based on brand. Done.')
df_baseline = pd.DataFrame(data={'av_price': l_avprice, 'price_c': df['price_c']})
df.sample(frac=1).reset_index(drop=True)
features = pd.get_dummies(df)
time_end = time.time()
print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
return features, df_baseline
def convert_to_arrays(features):
labels = np.array(features['price_c'])
features = features.drop('price_c', axis=1)
feature_list = list(features.columns)
features = np.array(features)
return feature_list, features, labels
def initial_checks(data_folder):
time_start = time.time()
print('Start')
if not os.path.exists(data_folder):
logging.warning('Folder ' + data_folder + 'does not exist: creating...')
os.makedirs(data_folder)
else:
print('Folder \'' + data_folder + '\' exists: not creating.')
print('Folder \'' + data_folder + '\' contains the following files:')
ic = 0
for i in os.listdir(data_folder):
ic += 1
print('File ' + str(ic) + ': \'' + str(i) + '\'')
time_end = time.time()
print('End. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
def write_report(rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms, report_file):
dfreport = pd.DataFrame(list(zip(rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms)),
columns=['estimators', 'max_features', 'average_accuracy', 'average_mape', 'folds',
'train_time'])
dfreport.to_csv(report_file, index=None, header=True)
class Model:
def __init__(self, features, labels, id):
logging.basicConfig(
format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s",
level=logging.INFO)
self.n_estimators = 5
self.random_state = None
self.max_features = 0.75
self.criterion = 'squared_error'
self.features = features
self.labels = labels
self.train_features = None
self.train_labels = None
self.train_indx = None
self.test_features = None
self.test_labels = None
self.test_indx = None
self.id = id
def split_train_and_test_sets(self):
random_state = randint(0, 42)
self.train_features, self.test_features, self.train_labels, self.test_labels, self.train_indx, self.test_indx = \
train_test_split(self.features,
self.labels,
np.arange(self.features.shape[0]),
test_size=0.20,
random_state=random_state)
def set_hyperparameters(self, n_estimators, random_state, max_features, criterion):
self.n_estimators = n_estimators
self.random_state = random_state
self.max_features = max_features
self.criterion = criterion
def train_a_random_forest(self):
self.split_train_and_test_sets()
rf = RandomForestRegressor(n_estimators=self.n_estimators,
criterion=self.criterion,
max_features=self.max_features,
random_state=self.random_state)
rf.fit(self.train_features, self.train_labels)
predictions = rf.predict(self.test_features)
rf = None
mape = round(np.mean(abs((predictions - self.test_labels) / self.test_labels * 100.0)), 2)
return mape
def cross_validation_training(model):
return model.train_a_random_forest()
THIS_SCRIPT_PATH = os.environ['PEGASO_TRAIN_DIR']
execution_timestamp = datetime.datetime.now()
model_folder = 'models'
model_file = model_folder + '/rf_' + str(execution_timestamp).replace(':', '-').replace('.', '').replace(' ', '_')
report_file = model_file + '.csv'
initial_checks(model_folder)
df = fetch_database_data()
features, df_baseline = build_features(df)
feature_list, features, labels = convert_to_arrays(features)
f = open(model_file + '-feature_list.txt', 'w')
s1 = '\n'.join(feature_list)
f.write(s1)
f.close()
f = open(model_file + '-feature_list.list', 'wb')
pickle.dump(feature_list, f)
f.close()
features, eval_features, labels, eval_labels, indx, eval_indx = train_test_split(features, labels,
np.arange(
features.shape[
0]),
test_size=0.30,
random_state=42)
print(' * Size of features set: ' + str(features.shape[0]))
print(' * Size of evaluation set: ' + str(eval_features.shape[0]))
time_start = time.time()
print('Computing baseline mean absolute error (MAE) and mean absolute percentage error (MAPE) on evaluation set...')
it = 0
baseline_mae = 0.0
baseline_mape = 0.0
for i in eval_indx:
baseline_mae += abs(df_baseline.iloc[i, 0] - df_baseline.iloc[i, 1]) / df_baseline.shape[0]
baseline_mape += abs(df_baseline.iloc[i, 0] / df_baseline.iloc[i, 1] - 1.0) * 100.0 / df_baseline.shape[0]
it += 1
print('Computing baseline mean absolute error (MAE) and mean absolute percentage error (MAPE) on evaluation set. Done.')
print(' * baseline Mean Absolute Error (MAE): ' + str(round(baseline_mae, 2)) + ' Euros.')
print(' * baseline Mean Absolute Percentage Error (MAPE): ' + str(round(baseline_mape, 2)) + ' %.')
print(' * baseline Accuracy: ' + str(round(100 - baseline_mape, 2)) + ' %.')
n_est_list = range(1, 501, 1)
max_features_list = [x / 100.0 for x in range(10, 105, 5)]
criterion = 'squared_error'
random_state = None
mape_min = 100.0
times, rep_est, rep_mft, prec_list, mape_list, folds_list, rep_tms = ([] for i in range(7))
interations_remaining = len(n_est_list) * len(max_features_list)
folds = 16
cpu_cores = 4
models = []
for i in range(1, folds + 1):
model = Model(features, labels, i)
models.append(model)
print('Computing grid of parameters:')
f = open(model_file + '-grid_cross_val_data.csv', 'w')
print('n_estimators', 'max_features', *['mape_fold' + str(i) for i in range(1, folds + 1)]
, 'average_mape', 'stderr_mape', sep=',', end='\n', file=f)
f.close()
for n_estimators in n_est_list:
for max_features in max_features_list:
ti = time.time()
for model in models:
model.set_hyperparameters(n_estimators, random_state, max_features, criterion)
p = Pool(processes=cpu_cores)
result = p.map(cross_validation_training, models)
p.close()
p.join()
tf = time.time()
mape = np.average(np.array(result))
mape_var = np.std(np.array(result))
f = open(model_file + '-grid_cross_val_data.csv', 'a')
print(n_estimators, max_features, *result, mape, mape_var, sep=',', end='\n', file=f)
f.close()
if mape < mape_min:
mape_min = mape
n_estimators_min = n_estimators
max_features_min = max_features
rep_est.append(n_estimators)
rep_mft.append(max_features)
mape_list.append(round(mape, 4))
prec_list.append(round(100.0 - mape, 4))
folds_list.append(folds)
rep_tms.append(tf - ti)
print('\tacc.', round(100.0 - mape, 2), n_estimators, max_features,
' - mape ', mape, mape_var,
' --- max acc.', round(100.0 - mape_min, 2), n_estimators_min, max_features_min)
write_report(rep_est, rep_mft, mape_list, prec_list, folds_list, rep_tms, report_file)
print('\nTraining the best model.\n')
rf = RandomForestRegressor(n_estimators=n_estimators_min, criterion=criterion, max_features=max_features_min, random_state=random_state)
print('Training begins...')
time_start = time.time()
rf.fit(self.train_features, self.train_labels)
time_end = time.time()
print('Training ends. Elapsed time: ' + str(time_end - time_start) + ' seconds.')
print('Predicting evaluation data...')
predictions = rf.predict(eval_features)
print('Predicting test data. Done.')
print('Computing MAE and MAPE on test set...')
mae = round(np.mean(abs(predictions - eval_labels)), 2)
mape = round(np.mean(abs((predictions - eval_labels) / eval_labels * 100.0)), 2)
print('Computing MAE and MAPE on test set. Done.')
print(' * Mean Absolute Error (MAE): ' + str(round(mae, 2)) + ' Euros.')
print(' * Mean Absolute Percentage Error (MAPE): ' + str(round(mape, 2)) + ' %.')
print(' * Accuracy: ' + str(round(100 - mape, 2)) + ' %.')
print('Export the model...')
joblib.dump(rf, model_file + ".joblib", compress=0)
print('Export the model. Done.')
| true | true |
1c3660c30c47af362807bbbd60269eaac231b6dc | 4,251 | py | Python | benchmark/startQiskit_QC933.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC933.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC933.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=41
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[4]) # number=27
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.x(input_qubit[0]) # number=39
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[4]) # number=34
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[2]) # number=28
prog.cz(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=30
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.y(input_qubit[1]) # number=26
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[2]) # number=31
prog.cz(input_qubit[4],input_qubit[2]) # number=32
prog.h(input_qubit[2]) # number=33
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC933.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 31.488889 | 165 | 0.612562 |
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[4])
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[4])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[1])
prog.x(input_qubit[4])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.h(input_qubit[2])
prog.cz(input_qubit[0],input_qubit[2])
prog.h(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0])
prog.x(input_qubit[1])
prog.x(input_qubit[2])
prog.y(input_qubit[1])
prog.x(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[2])
prog.cz(input_qubit[4],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC933.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
1c3660ca9c9f57d0cda70b102246a24f2e77cacb | 12,256 | py | Python | temp.py | AlphaPlusTT/nerf-w | c56589df46b80077eb9e0bfb29b023490b0a7fa1 | [
"MIT"
] | 1 | 2022-03-29T09:59:36.000Z | 2022-03-29T09:59:36.000Z | temp.py | zhoufan1908/nerf-w | c56589df46b80077eb9e0bfb29b023490b0a7fa1 | [
"MIT"
] | null | null | null | temp.py | zhoufan1908/nerf-w | c56589df46b80077eb9e0bfb29b023490b0a7fa1 | [
"MIT"
] | 1 | 2022-03-29T09:59:27.000Z | 2022-03-29T09:59:27.000Z | import torch
from torch.utils.data import Dataset
import glob
import numpy as np
import os
import pandas as pd
import pickle
from PIL import Image
from torchvision import transforms as T
from datasets.ray_utils import *
from datasets.colmap_utils import \
read_cameras_binary, read_images_binary, read_points3d_binary
class PhototourismDataset(Dataset):
def __init__(self, root_dir, split='train', img_downscale=1, val_num=1, use_cache=False):
"""
img_downscale: how much scale to downsample the training images.
The original image sizes are around 500~100, so value of 1 or 2
are recommended.
ATTENTION! Value of 1 will consume large CPU memory,
about 40G for brandenburg gate.
val_num: number of val images (used for multigpu, validate same image for all gpus)
use_cache: during data preparation, use precomputed rays (useful to accelerate
data loading, especially for multigpu!)
"""
self.root_dir = root_dir
self.split = split
assert img_downscale >= 1, 'image can only be downsampled, please set img_downscale>=1!'
self.img_downscale = img_downscale
if split == 'val': # image downscale=1 will cause OOM in val mode
self.img_downscale = max(2, self.img_downscale)
self.val_num = max(1, val_num) # at least 1
self.use_cache = use_cache
self.define_transforms()
self.read_meta()
self.white_back = False
def read_meta(self):
# read all files in the tsv first (split to train and test later)
tsv = glob.glob(os.path.join(self.root_dir, '*.tsv'))[0]
self.scene_name = os.path.basename(tsv)[:-4]
self.files = pd.read_csv(tsv, sep='\t')
self.files = self.files[~self.files['id'].isnull()] # remove data without id
self.files.reset_index(inplace=True, drop=True)
# Step 1. load image paths
# Attention! The 'id' column in the tsv is BROKEN, don't use it!!!!
# Instead, read the id from images.bin using image file name!
if self.use_cache:
with open(os.path.join(self.root_dir, f'cache/img_ids.pkl'), 'rb') as f:
self.img_ids = pickle.load(f)
with open(os.path.join(self.root_dir, f'cache/image_paths.pkl'), 'rb') as f:
self.image_paths = pickle.load(f)
else:
imdata = read_images_binary(os.path.join(self.root_dir, 'dense/sparse/images.bin'))
img_path_to_id = {}
for v in imdata.values():
img_path_to_id[v.name] = v.id
self.img_ids = []
self.image_paths = {} # {id: filename}
for filename in list(self.files['filename']):
id_ = img_path_to_id[filename]
self.image_paths[id_] = filename
self.img_ids += [id_]
pass
# Step 2: read and rescale camera intrinsics
if self.use_cache:
with open(os.path.join(self.root_dir, f'cache/Ks{self.img_downscale}.pkl'), 'rb') as f:
self.Ks = pickle.load(f)
else:
self.Ks = {} # {id: K}
camdata = read_cameras_binary(os.path.join(self.root_dir, 'dense/sparse/cameras.bin'))
for id_ in self.img_ids:
K = np.zeros((3, 3), dtype=np.float32)
cam = camdata[id_]
img_w, img_h = int(cam.params[2] * 2), int(cam.params[3] * 2)
img_w_, img_h_ = img_w // self.img_downscale, img_h // self.img_downscale
K[0, 0] = cam.params[0] * img_w_ / img_w # fx
K[1, 1] = cam.params[1] * img_h_ / img_h # fy
K[0, 2] = cam.params[2] * img_w_ / img_w # cx
K[1, 2] = cam.params[3] * img_h_ / img_h # cy
K[2, 2] = 1
# print(K)
pass
self.Ks[id_] = K
# Step 3: read c2w poses (of the images in tsv file only) and correct the order
if self.use_cache:
self.poses = np.load(os.path.join(self.root_dir, 'cache/poses.npy'))
else:
w2c_mats = []
bottom = np.array([0, 0, 0, 1.]).reshape(1, 4)
for id_ in self.img_ids:
im = imdata[id_]
R = im.qvec2rotmat()
t = im.tvec.reshape(3, 1)
w2c_mats += [np.concatenate([np.concatenate([R, t], 1), bottom], 0)]
w2c_mats = np.stack(w2c_mats, 0) # (N_images, 4, 4)
self.poses = np.linalg.inv(w2c_mats)[:, :3] # (N_images, 3, 4)
# Original poses has rotation in form "right down front", change to "right up back"
self.poses[..., 1:3] *= -1
# Step 4: correct scale
if self.use_cache:
self.xyz_world = np.load(os.path.join(self.root_dir, 'cache/xyz_world.npy'))
with open(os.path.join(self.root_dir, f'cache/nears.pkl'), 'rb') as f:
self.nears = pickle.load(f)
with open(os.path.join(self.root_dir, f'cache/fars.pkl'), 'rb') as f:
self.fars = pickle.load(f)
else:
pts3d = read_points3d_binary(os.path.join(self.root_dir, 'dense/sparse/points3D.bin'))
self.xyz_world = np.array([pts3d[p_id].xyz for p_id in pts3d])
xyz_world_h = np.concatenate([self.xyz_world, np.ones((len(self.xyz_world), 1))], -1)
# Compute near and far bounds for each image individually
self.nears, self.fars = {}, {} # {id_: distance}
for i, id_ in enumerate(self.img_ids):
xyz_cam_i = (xyz_world_h @ w2c_mats[i].T)[:, :3] # xyz in the ith cam coordinate
xyz_cam_i = xyz_cam_i[xyz_cam_i[:, 2] > 0] # filter out points that lie behind the cam
self.nears[id_] = np.percentile(xyz_cam_i[:, 2], 0.1)
self.fars[id_] = np.percentile(xyz_cam_i[:, 2], 99.9)
max_far = np.fromiter(self.fars.values(), np.float32).max()
scale_factor = max_far / 5 # so that the max far is scaled to 5
self.poses[..., 3] /= scale_factor
for k in self.nears:
self.nears[k] /= scale_factor
for k in self.fars:
self.fars[k] /= scale_factor
self.xyz_world /= scale_factor
self.poses_dict = {id_: self.poses[i] for i, id_ in enumerate(self.img_ids)}
# Step 5. split the img_ids (the number of images is verfied to match that in the paper)
self.img_ids_train = [id_ for i, id_ in enumerate(self.img_ids)
if self.files.loc[i, 'split'] == 'train']
self.img_ids_test = [id_ for i, id_ in enumerate(self.img_ids)
if self.files.loc[i, 'split'] == 'test']
self.N_images_train = len(self.img_ids_train)
self.N_images_test = len(self.img_ids_test)
if self.split == 'train': # create buffer of all rays and rgb data
if self.use_cache:
all_rays = np.load(os.path.join(self.root_dir,
f'cache/rays{self.img_downscale}.npy'))
self.all_rays = torch.from_numpy(all_rays)
all_rgbs = np.load(os.path.join(self.root_dir,
f'cache/rgbs{self.img_downscale}.npy'))
self.all_rgbs = torch.from_numpy(all_rgbs)
else:
self.all_rays = []
self.all_rgbs = []
for id_ in self.img_ids_train:
c2w = torch.FloatTensor(self.poses_dict[id_])
img = Image.open(os.path.join(self.root_dir, 'dense/images',
self.image_paths[id_])).convert('RGB')
img_w, img_h = img.size
if self.img_downscale > 1:
img_w = img_w // self.img_downscale
img_h = img_h // self.img_downscale
img = img.resize((img_w, img_h), Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
self.all_rgbs += [img]
directions = get_ray_directions(img_h, img_w, self.Ks[id_])
rays_o, rays_d = get_rays(directions, c2w)
rays_t = id_ * torch.ones(len(rays_o), 1)
self.all_rays += [torch.cat([rays_o, rays_d,
self.nears[id_] * torch.ones_like(rays_o[:, :1]),
self.fars[id_] * torch.ones_like(rays_o[:, :1]),
rays_t],
1)] # (h*w, 8)
self.all_rays = torch.cat(self.all_rays, 0) # ((N_images-1)*h*w, 8)
self.all_rgbs = torch.cat(self.all_rgbs, 0) # ((N_images-1)*h*w, 3)
elif self.split in ['val', 'test_train']: # use the first image as val image (also in train)
self.val_id = self.img_ids_train[0]
else: # for testing, create a parametric rendering path
# test poses and appearance index are defined in eval.py
pass
def define_transforms(self):
self.transform = T.ToTensor()
def __len__(self):
if self.split == 'train':
return len(self.all_rays)
if self.split == 'test_train':
return self.N_images_train
if self.split == 'val':
return self.val_num
return len(self.poses_test)
def __getitem__(self, idx):
if self.split == 'train': # use data in the buffers
sample = {'rays': self.all_rays[idx, :8],
'ts': self.all_rays[idx, 8].long(),
'rgbs': self.all_rgbs[idx]}
elif self.split in ['val', 'test_train']:
sample = {}
if self.split == 'val':
id_ = self.val_id
else:
id_ = self.img_ids_train[idx]
sample['c2w'] = c2w = torch.FloatTensor(self.poses_dict[id_])
img = Image.open(os.path.join(self.root_dir, 'dense/images',
self.image_paths[id_])).convert('RGB')
img_w, img_h = img.size
if self.img_downscale > 1:
img_w = img_w // self.img_downscale
img_h = img_h // self.img_downscale
img = img.resize((img_w, img_h), Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
sample['rgbs'] = img
directions = get_ray_directions(img_h, img_w, self.Ks[id_])
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d,
self.nears[id_] * torch.ones_like(rays_o[:, :1]),
self.fars[id_] * torch.ones_like(rays_o[:, :1])],
1) # (h*w, 8)
sample['rays'] = rays
sample['ts'] = id_ * torch.ones(len(rays), dtype=torch.long)
sample['img_wh'] = torch.LongTensor([img_w, img_h])
else:
sample = {}
sample['c2w'] = c2w = torch.FloatTensor(self.poses_test[idx])
directions = get_ray_directions(self.test_img_h, self.test_img_w, self.test_K)
rays_o, rays_d = get_rays(directions, c2w)
near, far = 0, 5
rays = torch.cat([rays_o, rays_d,
near * torch.ones_like(rays_o[:, :1]),
far * torch.ones_like(rays_o[:, :1])],
1)
sample['rays'] = rays
sample['ts'] = self.test_appearance_idx * torch.ones(len(rays), dtype=torch.long)
sample['img_wh'] = torch.LongTensor([self.test_img_w, self.test_img_h])
return sample
if __name__ == '__main__':
data = PhototourismDataset('/home/zed/data/nerf/brandenburg_gate')
| 43.003509 | 103 | 0.532311 | import torch
from torch.utils.data import Dataset
import glob
import numpy as np
import os
import pandas as pd
import pickle
from PIL import Image
from torchvision import transforms as T
from datasets.ray_utils import *
from datasets.colmap_utils import \
read_cameras_binary, read_images_binary, read_points3d_binary
class PhototourismDataset(Dataset):
def __init__(self, root_dir, split='train', img_downscale=1, val_num=1, use_cache=False):
self.root_dir = root_dir
self.split = split
assert img_downscale >= 1, 'image can only be downsampled, please set img_downscale>=1!'
self.img_downscale = img_downscale
if split == 'val':
self.img_downscale = max(2, self.img_downscale)
self.val_num = max(1, val_num)
self.use_cache = use_cache
self.define_transforms()
self.read_meta()
self.white_back = False
def read_meta(self):
tsv = glob.glob(os.path.join(self.root_dir, '*.tsv'))[0]
self.scene_name = os.path.basename(tsv)[:-4]
self.files = pd.read_csv(tsv, sep='\t')
self.files = self.files[~self.files['id'].isnull()]
self.files.reset_index(inplace=True, drop=True)
# Instead, read the id from images.bin using image file name!
if self.use_cache:
with open(os.path.join(self.root_dir, f'cache/img_ids.pkl'), 'rb') as f:
self.img_ids = pickle.load(f)
with open(os.path.join(self.root_dir, f'cache/image_paths.pkl'), 'rb') as f:
self.image_paths = pickle.load(f)
else:
imdata = read_images_binary(os.path.join(self.root_dir, 'dense/sparse/images.bin'))
img_path_to_id = {}
for v in imdata.values():
img_path_to_id[v.name] = v.id
self.img_ids = []
self.image_paths = {} # {id: filename}
for filename in list(self.files['filename']):
id_ = img_path_to_id[filename]
self.image_paths[id_] = filename
self.img_ids += [id_]
pass
# Step 2: read and rescale camera intrinsics
if self.use_cache:
with open(os.path.join(self.root_dir, f'cache/Ks{self.img_downscale}.pkl'), 'rb') as f:
self.Ks = pickle.load(f)
else:
self.Ks = {} # {id: K}
camdata = read_cameras_binary(os.path.join(self.root_dir, 'dense/sparse/cameras.bin'))
for id_ in self.img_ids:
K = np.zeros((3, 3), dtype=np.float32)
cam = camdata[id_]
img_w, img_h = int(cam.params[2] * 2), int(cam.params[3] * 2)
img_w_, img_h_ = img_w // self.img_downscale, img_h // self.img_downscale
K[0, 0] = cam.params[0] * img_w_ / img_w # fx
K[1, 1] = cam.params[1] * img_h_ / img_h # fy
K[0, 2] = cam.params[2] * img_w_ / img_w # cx
K[1, 2] = cam.params[3] * img_h_ / img_h # cy
K[2, 2] = 1
# print(K)
pass
self.Ks[id_] = K
# Step 3: read c2w poses (of the images in tsv file only) and correct the order
if self.use_cache:
self.poses = np.load(os.path.join(self.root_dir, 'cache/poses.npy'))
else:
w2c_mats = []
bottom = np.array([0, 0, 0, 1.]).reshape(1, 4)
for id_ in self.img_ids:
im = imdata[id_]
R = im.qvec2rotmat()
t = im.tvec.reshape(3, 1)
w2c_mats += [np.concatenate([np.concatenate([R, t], 1), bottom], 0)]
w2c_mats = np.stack(w2c_mats, 0) # (N_images, 4, 4)
self.poses = np.linalg.inv(w2c_mats)[:, :3] # (N_images, 3, 4)
# Original poses has rotation in form "right down front", change to "right up back"
self.poses[..., 1:3] *= -1
# Step 4: correct scale
if self.use_cache:
self.xyz_world = np.load(os.path.join(self.root_dir, 'cache/xyz_world.npy'))
with open(os.path.join(self.root_dir, f'cache/nears.pkl'), 'rb') as f:
self.nears = pickle.load(f)
with open(os.path.join(self.root_dir, f'cache/fars.pkl'), 'rb') as f:
self.fars = pickle.load(f)
else:
pts3d = read_points3d_binary(os.path.join(self.root_dir, 'dense/sparse/points3D.bin'))
self.xyz_world = np.array([pts3d[p_id].xyz for p_id in pts3d])
xyz_world_h = np.concatenate([self.xyz_world, np.ones((len(self.xyz_world), 1))], -1)
# Compute near and far bounds for each image individually
self.nears, self.fars = {}, {} # {id_: distance}
for i, id_ in enumerate(self.img_ids):
xyz_cam_i = (xyz_world_h @ w2c_mats[i].T)[:, :3] # xyz in the ith cam coordinate
xyz_cam_i = xyz_cam_i[xyz_cam_i[:, 2] > 0] # filter out points that lie behind the cam
self.nears[id_] = np.percentile(xyz_cam_i[:, 2], 0.1)
self.fars[id_] = np.percentile(xyz_cam_i[:, 2], 99.9)
max_far = np.fromiter(self.fars.values(), np.float32).max()
scale_factor = max_far / 5 # so that the max far is scaled to 5
self.poses[..., 3] /= scale_factor
for k in self.nears:
self.nears[k] /= scale_factor
for k in self.fars:
self.fars[k] /= scale_factor
self.xyz_world /= scale_factor
self.poses_dict = {id_: self.poses[i] for i, id_ in enumerate(self.img_ids)}
# Step 5. split the img_ids (the number of images is verfied to match that in the paper)
self.img_ids_train = [id_ for i, id_ in enumerate(self.img_ids)
if self.files.loc[i, 'split'] == 'train']
self.img_ids_test = [id_ for i, id_ in enumerate(self.img_ids)
if self.files.loc[i, 'split'] == 'test']
self.N_images_train = len(self.img_ids_train)
self.N_images_test = len(self.img_ids_test)
if self.split == 'train': # create buffer of all rays and rgb data
if self.use_cache:
all_rays = np.load(os.path.join(self.root_dir,
f'cache/rays{self.img_downscale}.npy'))
self.all_rays = torch.from_numpy(all_rays)
all_rgbs = np.load(os.path.join(self.root_dir,
f'cache/rgbs{self.img_downscale}.npy'))
self.all_rgbs = torch.from_numpy(all_rgbs)
else:
self.all_rays = []
self.all_rgbs = []
for id_ in self.img_ids_train:
c2w = torch.FloatTensor(self.poses_dict[id_])
img = Image.open(os.path.join(self.root_dir, 'dense/images',
self.image_paths[id_])).convert('RGB')
img_w, img_h = img.size
if self.img_downscale > 1:
img_w = img_w // self.img_downscale
img_h = img_h // self.img_downscale
img = img.resize((img_w, img_h), Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
self.all_rgbs += [img]
directions = get_ray_directions(img_h, img_w, self.Ks[id_])
rays_o, rays_d = get_rays(directions, c2w)
rays_t = id_ * torch.ones(len(rays_o), 1)
self.all_rays += [torch.cat([rays_o, rays_d,
self.nears[id_] * torch.ones_like(rays_o[:, :1]),
self.fars[id_] * torch.ones_like(rays_o[:, :1]),
rays_t],
1)] # (h*w, 8)
self.all_rays = torch.cat(self.all_rays, 0) # ((N_images-1)*h*w, 8)
self.all_rgbs = torch.cat(self.all_rgbs, 0) # ((N_images-1)*h*w, 3)
elif self.split in ['val', 'test_train']: # use the first image as val image (also in train)
self.val_id = self.img_ids_train[0]
else: # for testing, create a parametric rendering path
# test poses and appearance index are defined in eval.py
pass
def define_transforms(self):
self.transform = T.ToTensor()
def __len__(self):
if self.split == 'train':
return len(self.all_rays)
if self.split == 'test_train':
return self.N_images_train
if self.split == 'val':
return self.val_num
return len(self.poses_test)
def __getitem__(self, idx):
if self.split == 'train': # use data in the buffers
sample = {'rays': self.all_rays[idx, :8],
'ts': self.all_rays[idx, 8].long(),
'rgbs': self.all_rgbs[idx]}
elif self.split in ['val', 'test_train']:
sample = {}
if self.split == 'val':
id_ = self.val_id
else:
id_ = self.img_ids_train[idx]
sample['c2w'] = c2w = torch.FloatTensor(self.poses_dict[id_])
img = Image.open(os.path.join(self.root_dir, 'dense/images',
self.image_paths[id_])).convert('RGB')
img_w, img_h = img.size
if self.img_downscale > 1:
img_w = img_w // self.img_downscale
img_h = img_h // self.img_downscale
img = img.resize((img_w, img_h), Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
sample['rgbs'] = img
directions = get_ray_directions(img_h, img_w, self.Ks[id_])
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d,
self.nears[id_] * torch.ones_like(rays_o[:, :1]),
self.fars[id_] * torch.ones_like(rays_o[:, :1])],
1) # (h*w, 8)
sample['rays'] = rays
sample['ts'] = id_ * torch.ones(len(rays), dtype=torch.long)
sample['img_wh'] = torch.LongTensor([img_w, img_h])
else:
sample = {}
sample['c2w'] = c2w = torch.FloatTensor(self.poses_test[idx])
directions = get_ray_directions(self.test_img_h, self.test_img_w, self.test_K)
rays_o, rays_d = get_rays(directions, c2w)
near, far = 0, 5
rays = torch.cat([rays_o, rays_d,
near * torch.ones_like(rays_o[:, :1]),
far * torch.ones_like(rays_o[:, :1])],
1)
sample['rays'] = rays
sample['ts'] = self.test_appearance_idx * torch.ones(len(rays), dtype=torch.long)
sample['img_wh'] = torch.LongTensor([self.test_img_w, self.test_img_h])
return sample
if __name__ == '__main__':
data = PhototourismDataset('/home/zed/data/nerf/brandenburg_gate')
| true | true |
1c3660fdd92880dd4c39bcd9c439b34832b57eba | 11,979 | py | Python | Fitter.py | artiste-qb-net/Quantum_Edward | 89d3a7d40177065eaa34fabd4b4c255b8ef51881 | [
"MIT"
] | 51 | 2018-05-05T02:38:25.000Z | 2021-11-25T19:41:56.000Z | Fitter.py | artiste-qb-net/Quantum_Edward | 89d3a7d40177065eaa34fabd4b4c255b8ef51881 | [
"MIT"
] | 11 | 2018-05-09T03:35:06.000Z | 2018-07-15T10:38:11.000Z | Fitter.py | artiste-qb-net/Quantum_Edward | 89d3a7d40177065eaa34fabd4b4c255b8ef51881 | [
"MIT"
] | 12 | 2018-06-15T09:55:06.000Z | 2021-02-27T00:09:45.000Z | import numpy as np
import numpy.random as npr
import scipy.stats as ss
import utilities as ut
from TimeStep import *
from Plotter import *
class Fitter:
"""
Read docstrings for Model class first.
The goal of this class is to implement the BBVI(see ref below) for a
Model object 'model' to estimate those values for the hidden variables
list1_angs which best fit the training data y_nsam_nb, x_nsam_na.
In BBVI, one maximizes ELBO with respect to a parameter lambda. In this
case, lambda = list1_conc0, list1_conc1 and z = list1_z =
list1_angs/dpi. The angles in list1_angs are in the interval [0, dpi] so
the entries list1_z are in the interval [0, 1].
References
----------
R. Ranganath, S. Gerrish, D. M. Blei, "Black Box Variational
Inference", https://arxiv.org/abs/1401.0118
"""
def __init__(self, model, y_nsam_nb, x_nsam_na, nsamgrad,
nt, eta, t_step_meth):
"""
Constructor
Parameters
----------
model : Model
y_nsam_nb : np.array
An array of zeros and ones with shape=(nsam, nb) containing nsam
samples of y output.
x_nsam_na : np.array
An array of zeros and ones with shape=(nsam, na) containing nsam
samples of x input.
nsamgrad : int
Number of samples used during averaging of the gradient of ELBO
nt : int
Number of time steps (aka iterations). Value of ELBO changes (
increases or stays the same) with each iteration.
eta : float
positive scaling parameter (proportionality factor) for delta
lambda. Passed to TimeStep class
t_step_meth : str
str labelling the method used to calculate delta lambda. This
str is passed to TimeStep class.
Returns
-------
None
"""
self.mod = model
self.y_nsam_nb = y_nsam_nb
self.x_nsam_na = x_nsam_na
self.nsamgrad = nsamgrad
self.nt = nt
self.eta = eta
self.t_step_meth = t_step_meth
assert self.mod.na == x_nsam_na.shape[1]
assert self.mod.nb == y_nsam_nb.shape[1]
assert self.y_nsam_nb.shape[0] == self.x_nsam_na.shape[0]
# the following will be filled by do_fit()
self.fin_t = None
self.fin_list1_conc0 = None
self.fin_list1_conc1 = None
len1 = self.mod.len1
self.conc_nt_2_len1 = np.zeros((nt, 2, len1), dtype=float)
self.delta_conc_nt_2_len1 = np.zeros((nt, 2, len1), dtype=float)
self.elbo_nt_len1 = np.zeros((nt, len1), dtype=float)
def get_delbo_and_grad_delbo(self, list1_z, list1_conc0, list1_conc1):
"""
delbo = density of elbo. grad = gradient. This is a private
auxiliary function used by do_fit(). Inside the method do_fit(),
we calculate elbo from delbo by taking expected value of delbo over
z~ q(z | lambda)
Parameters
----------
list1_z : list[np.array]
list1_conc0 : list[np.array]
list1_conc1 : list[np.array]
Returns
-------
tuple[list[np.array], list[np.array], list[np.array]]
"""
nsam = self.y_nsam_nb.shape[0]
len1 = self.mod.len1
# grad0,1 log q(z| lambda=conc0, conc1)
xx = [ut.grad_log_beta_prob(list1_z[k],
list1_conc0[k],
list1_conc1[k])
for k in range(len1)]
# zip doesn't work
# list1_g0, list1_g1 = zip(xx)
def my_zip(a):
return [[a[j][k] for j in range(len(a))]
for k in range(len(a[0]))]
# print('---------xx')
# for j in range(2):
# print(j, xx[j])
# print('---------zip(zz)')
# for j in range(2):
# tempo = list(zip(xx))
# print(j, tempo[j])
# print('---------my_zip(zz)')
# for j in range(2):
# tempo = my_zip(xx)
# print(j, tempo[j])
list1_g0, list1_g1 = my_zip(xx)
# sum_sam (log p(y| x, z = angs/dpi))
x_nsam = ut.bin_vec_to_dec(self.x_nsam_na, nsam=nsam)
y_nsam = ut.bin_vec_to_dec(self.y_nsam_nb, nsam=nsam)
list1_angs = [list1_z[k]*ut.dpi for k in range(len1)]
# log_py is a constant with shape 1
log_py = np.sum(np.log(1e-8 + np.array(
[self.mod.prob_y_given_x_and_angs_prior(y_nsam[sam],
x_nsam[sam], list1_angs) for sam in range(nsam)]
)))
# log_px is a constant with shape 1
log_px = np.sum(np.log(1e-8 + np.array(
[self.mod.prob_x(x_nsam[sam], list1_angs) for sam in range(nsam)]
)))
# log p(z)
list1_log_pz = [ut.log_beta_prob(list1_z[k],
self.mod.list1_conc0_prior[k],
self.mod.list1_conc1_prior[k])
for k in range(len1)]
# log q(z| lambda)
list1_log_qz = [ut.log_beta_prob(list1_z[k],
list1_conc0[k],
list1_conc1[k])
for k in range(len1)]
# log p(y, x, z) - log q(z | lambda)
list1_delbo = [log_py + log_px + list1_log_pz[k] - list1_log_qz[k]
for k in range(len1)]
# print("//", len1, "log_py=", log_py, list1_delbo)
list1_grad0_delbo = [np.multiply(list1_g0[k], list1_delbo[k])
for k in range(len1)]
list1_grad1_delbo = [np.multiply(list1_g1[k], list1_delbo[k])
for k in range(len1)]
return list1_delbo, list1_grad0_delbo, list1_grad1_delbo
def do_fit(self):
"""
This function attempts to maximize ELBO over lambda. Does at most nt
iterations (i.e., lambda changes, time steps). But may reach a
convergence condition before doing nt iterations. Final iteration
time is stored in self.fin_t.
This function stores final values for time and lambda (lambda =
concentrations 0, 1)
self.fin_t
self.fin_list1_conc0
self.fin_list1_conc1
It also stores traces (time series) for lambda (lambda =
concentrations 0, 1), delta lambda between consecutive steps,
and the ELBO value:
self.conc_nt_2_len1
self.delta_conc_nt_2_len1
self.elbo_nt_len1
Returns
-------
None
"""
len1 = self.mod.len1
# starting values
shapes = self.mod.shapes1
list1_conc0 = ut.new_uniform_array_list(1., shapes)
list1_conc1 = ut.new_uniform_array_list(1., shapes)
step = TimeStep(self.t_step_meth, self.eta, self.mod.len1)
for t in range(self.nt):
list1_elbo = ut.new_uniform_array_list(0., shapes)
list1_grad0_elbo = ut.new_uniform_array_list(0., shapes)
list1_grad1_elbo = ut.new_uniform_array_list(0., shapes)
for s in range(self.nsamgrad):
list1_z = [ss.beta.rvs(list1_conc0[k], list1_conc1[k])
for k in range(len1)]
x0, x1, x2 =\
self.get_delbo_and_grad_delbo(list1_z,
list1_conc0,
list1_conc1)
for k in range(len1):
list1_elbo[k] += x0[k]/self.nsamgrad
list1_grad0_elbo[k] += x1[k]/self.nsamgrad
list1_grad1_elbo[k] += x2[k]/self.nsamgrad
g0 = list1_grad0_elbo
g1 = list1_grad1_elbo
for k in range(len1):
delta_conc = step.get_delta_conc(g0[k], g1[k], t, k)
old_conc0 = np.copy(list1_conc0[k])
list1_conc0[k] += delta_conc[0]
list1_conc0[k] = np.clip(list1_conc0[k], 1e-5, 15)
true_delta_conc0 = list1_conc0[k] - old_conc0
old_conc1 = np.copy(list1_conc1[k])
list1_conc1[k] += delta_conc[1]
list1_conc1[k] = np.clip(list1_conc1[k], 1e-5, 15)
true_delta_conc1 = list1_conc1[k] - old_conc1
self.conc_nt_2_len1[t, 0, k] = np.sum(list1_conc0[k])
self.conc_nt_2_len1[t, 1, k] = np.sum(list1_conc1[k])
self.delta_conc_nt_2_len1[t, 0, k] = np.sum(true_delta_conc0)
self.delta_conc_nt_2_len1[t, 1, k] = np.sum(true_delta_conc1)
self.elbo_nt_len1[t, :] = \
ut.av_each_elem_in_array_list(list1_elbo)
if np.all(self.delta_conc_nt_2_len1[t, :, :] < 0.001):
break
self.fin_t = t
self.fin_list1_conc0 = list1_conc0
self.fin_list1_conc1 = list1_conc1
def print_fit_values_at_fin_t(self):
"""
Prints to screen summary of values at final time fin_t of do_fit()
run.
Recall z = ang/dpi with ang in interval [0, dpi] so z in interval [
0, 1].This function calculates for each z, its estimate, the std of
that estimate, and the fractional error (z_estimate -
z_prior)/z_prior. z_prior = angs_prior/dpi.
angs_prior are the prior angles assumed for the model. If we use
training data generated by Model:get_toy_data(), angs_prior are true
values, the ones used to generate the synthetic data.
Returns
-------
None
"""
len1 = self.mod.len1
list1_conc0 = self.fin_list1_conc0
list1_conc1 = self.fin_list1_conc1
list1_zpred = [ss.beta.mean(list1_conc0[k], list1_conc1[k])
for k in range(len1)]
list1_std_zpred = [ss.beta.std(list1_conc0[k], list1_conc1[k])
for k in range(len1)]
print('fin_t=', self.fin_t, "\n")
for k in range(len1):
print("list1_z[" + str(k) + "]:")
print("estimate:\n" + str(list1_zpred[k]))
print("st.dev.:\n" + str(list1_std_zpred[k]))
zprior = self.mod.list1_angs_prior[k]/ut.dpi
print("frac. error = (est-prior)/prior:\n" +
str((list1_zpred[k] - zprior)/zprior) + "\n")
def plot_fit_traces(self):
"""
Calls Plotter to plot traces (time series) collected during do_fit()
run. Plots time series of lambda, delta lambda and ELBO.
Returns
-------
None
"""
Plotter.plot_conc_traces(self.fin_t,
self.conc_nt_2_len1,
self.delta_conc_nt_2_len1)
Plotter.plot_elbo_traces(self.fin_t,
self.elbo_nt_len1)
if __name__ == "__main__":
from NbTrolsModel import *
from NoNbTrolsModel import *
def main():
# Ridiculously small numbers,
# just to make sure it runs without crashing
npr.seed(1234)
na = 2 # number of alpha qubits
nb = 2 # number of beta qubits
mod = NbTrolsModel(nb, na)
# mod = NoNbTrolsModel(nb, na)
nsam = 20 # number of samples
y_nsam_nb, x_nsam_na = mod.gen_toy_data(nsam)
nsamgrad = 10 # number of samples for grad estimate
nt = 20 # number of interations
# t_step_type, eta = naive', .0003 # very sensitive to eta
# t_step_type, eta = 'naive_t', .0003 # very sensitive to eta
# t_step_type, eta = 'mag1_grad', .2
t_step_meth, eta = 'ada_grad', .1
ff = Fitter(mod, y_nsam_nb, x_nsam_na,
nsamgrad, nt, eta, t_step_meth)
ff.do_fit()
ff.print_fit_values_at_fin_t()
ff.plot_fit_traces()
main()
| 35.440828 | 77 | 0.552634 | import numpy as np
import numpy.random as npr
import scipy.stats as ss
import utilities as ut
from TimeStep import *
from Plotter import *
class Fitter:
def __init__(self, model, y_nsam_nb, x_nsam_na, nsamgrad,
nt, eta, t_step_meth):
self.mod = model
self.y_nsam_nb = y_nsam_nb
self.x_nsam_na = x_nsam_na
self.nsamgrad = nsamgrad
self.nt = nt
self.eta = eta
self.t_step_meth = t_step_meth
assert self.mod.na == x_nsam_na.shape[1]
assert self.mod.nb == y_nsam_nb.shape[1]
assert self.y_nsam_nb.shape[0] == self.x_nsam_na.shape[0]
self.fin_t = None
self.fin_list1_conc0 = None
self.fin_list1_conc1 = None
len1 = self.mod.len1
self.conc_nt_2_len1 = np.zeros((nt, 2, len1), dtype=float)
self.delta_conc_nt_2_len1 = np.zeros((nt, 2, len1), dtype=float)
self.elbo_nt_len1 = np.zeros((nt, len1), dtype=float)
def get_delbo_and_grad_delbo(self, list1_z, list1_conc0, list1_conc1):
nsam = self.y_nsam_nb.shape[0]
len1 = self.mod.len1
xx = [ut.grad_log_beta_prob(list1_z[k],
list1_conc0[k],
list1_conc1[k])
for k in range(len1)]
# list1_g0, list1_g1 = zip(xx)
def my_zip(a):
return [[a[j][k] for j in range(len(a))]
for k in range(len(a[0]))]
# print('---------xx')
# for j in range(2):
# print(j, xx[j])
# print('---------zip(zz)')
# for j in range(2):
# tempo = list(zip(xx))
# print(j, tempo[j])
# print('---------my_zip(zz)')
# for j in range(2):
# tempo = my_zip(xx)
# print(j, tempo[j])
list1_g0, list1_g1 = my_zip(xx)
# sum_sam (log p(y| x, z = angs/dpi))
x_nsam = ut.bin_vec_to_dec(self.x_nsam_na, nsam=nsam)
y_nsam = ut.bin_vec_to_dec(self.y_nsam_nb, nsam=nsam)
list1_angs = [list1_z[k]*ut.dpi for k in range(len1)]
# log_py is a constant with shape 1
log_py = np.sum(np.log(1e-8 + np.array(
[self.mod.prob_y_given_x_and_angs_prior(y_nsam[sam],
x_nsam[sam], list1_angs) for sam in range(nsam)]
)))
# log_px is a constant with shape 1
log_px = np.sum(np.log(1e-8 + np.array(
[self.mod.prob_x(x_nsam[sam], list1_angs) for sam in range(nsam)]
)))
# log p(z)
list1_log_pz = [ut.log_beta_prob(list1_z[k],
self.mod.list1_conc0_prior[k],
self.mod.list1_conc1_prior[k])
for k in range(len1)]
# log q(z| lambda)
list1_log_qz = [ut.log_beta_prob(list1_z[k],
list1_conc0[k],
list1_conc1[k])
for k in range(len1)]
# log p(y, x, z) - log q(z | lambda)
list1_delbo = [log_py + log_px + list1_log_pz[k] - list1_log_qz[k]
for k in range(len1)]
# print("//", len1, "log_py=", log_py, list1_delbo)
list1_grad0_delbo = [np.multiply(list1_g0[k], list1_delbo[k])
for k in range(len1)]
list1_grad1_delbo = [np.multiply(list1_g1[k], list1_delbo[k])
for k in range(len1)]
return list1_delbo, list1_grad0_delbo, list1_grad1_delbo
def do_fit(self):
len1 = self.mod.len1
# starting values
shapes = self.mod.shapes1
list1_conc0 = ut.new_uniform_array_list(1., shapes)
list1_conc1 = ut.new_uniform_array_list(1., shapes)
step = TimeStep(self.t_step_meth, self.eta, self.mod.len1)
for t in range(self.nt):
list1_elbo = ut.new_uniform_array_list(0., shapes)
list1_grad0_elbo = ut.new_uniform_array_list(0., shapes)
list1_grad1_elbo = ut.new_uniform_array_list(0., shapes)
for s in range(self.nsamgrad):
list1_z = [ss.beta.rvs(list1_conc0[k], list1_conc1[k])
for k in range(len1)]
x0, x1, x2 =\
self.get_delbo_and_grad_delbo(list1_z,
list1_conc0,
list1_conc1)
for k in range(len1):
list1_elbo[k] += x0[k]/self.nsamgrad
list1_grad0_elbo[k] += x1[k]/self.nsamgrad
list1_grad1_elbo[k] += x2[k]/self.nsamgrad
g0 = list1_grad0_elbo
g1 = list1_grad1_elbo
for k in range(len1):
delta_conc = step.get_delta_conc(g0[k], g1[k], t, k)
old_conc0 = np.copy(list1_conc0[k])
list1_conc0[k] += delta_conc[0]
list1_conc0[k] = np.clip(list1_conc0[k], 1e-5, 15)
true_delta_conc0 = list1_conc0[k] - old_conc0
old_conc1 = np.copy(list1_conc1[k])
list1_conc1[k] += delta_conc[1]
list1_conc1[k] = np.clip(list1_conc1[k], 1e-5, 15)
true_delta_conc1 = list1_conc1[k] - old_conc1
self.conc_nt_2_len1[t, 0, k] = np.sum(list1_conc0[k])
self.conc_nt_2_len1[t, 1, k] = np.sum(list1_conc1[k])
self.delta_conc_nt_2_len1[t, 0, k] = np.sum(true_delta_conc0)
self.delta_conc_nt_2_len1[t, 1, k] = np.sum(true_delta_conc1)
self.elbo_nt_len1[t, :] = \
ut.av_each_elem_in_array_list(list1_elbo)
if np.all(self.delta_conc_nt_2_len1[t, :, :] < 0.001):
break
self.fin_t = t
self.fin_list1_conc0 = list1_conc0
self.fin_list1_conc1 = list1_conc1
def print_fit_values_at_fin_t(self):
len1 = self.mod.len1
list1_conc0 = self.fin_list1_conc0
list1_conc1 = self.fin_list1_conc1
list1_zpred = [ss.beta.mean(list1_conc0[k], list1_conc1[k])
for k in range(len1)]
list1_std_zpred = [ss.beta.std(list1_conc0[k], list1_conc1[k])
for k in range(len1)]
print('fin_t=', self.fin_t, "\n")
for k in range(len1):
print("list1_z[" + str(k) + "]:")
print("estimate:\n" + str(list1_zpred[k]))
print("st.dev.:\n" + str(list1_std_zpred[k]))
zprior = self.mod.list1_angs_prior[k]/ut.dpi
print("frac. error = (est-prior)/prior:\n" +
str((list1_zpred[k] - zprior)/zprior) + "\n")
def plot_fit_traces(self):
Plotter.plot_conc_traces(self.fin_t,
self.conc_nt_2_len1,
self.delta_conc_nt_2_len1)
Plotter.plot_elbo_traces(self.fin_t,
self.elbo_nt_len1)
if __name__ == "__main__":
from NbTrolsModel import *
from NoNbTrolsModel import *
def main():
# Ridiculously small numbers,
# just to make sure it runs without crashing
npr.seed(1234)
na = 2 # number of alpha qubits
nb = 2 # number of beta qubits
mod = NbTrolsModel(nb, na)
# mod = NoNbTrolsModel(nb, na)
nsam = 20 # number of samples
y_nsam_nb, x_nsam_na = mod.gen_toy_data(nsam)
nsamgrad = 10 # number of samples for grad estimate
nt = 20 # number of interations
# t_step_type, eta = naive', .0003
p_meth, eta = 'ada_grad', .1
ff = Fitter(mod, y_nsam_nb, x_nsam_na,
nsamgrad, nt, eta, t_step_meth)
ff.do_fit()
ff.print_fit_values_at_fin_t()
ff.plot_fit_traces()
main()
| true | true |
1c36613f37c713ea81efeb03508af68023b16c80 | 14,481 | py | Python | lcd_lms.py | fvzeppelin/lcd_lms | db4984e50f5cf415fb221e5c44bc5a775e1e64e0 | [
"MIT"
] | null | null | null | lcd_lms.py | fvzeppelin/lcd_lms | db4984e50f5cf415fb221e5c44bc5a775e1e64e0 | [
"MIT"
] | null | null | null | lcd_lms.py | fvzeppelin/lcd_lms | db4984e50f5cf415fb221e5c44bc5a775e1e64e0 | [
"MIT"
] | null | null | null | #! /usr/local/bin/python3
import re
import time
import sys
import logging
import argparse
import configparser
from socket import gethostname
from os.path import basename
import lcd
import lms
MAIN_LOOP_DELAY = 0.3
def config_init():
global verbose
verbose = False
parse_arguments()
if (verbose == True):
logging.basicConfig(format='%(levelname)s: %(asctime)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(levelname)s: %(asctime)s %(message)s', level=logging.WARNING)
logging.debug('lmshost:lmsport=' + LMS + ':' + LMSPORT)
logging.debug('lcdhost:lcdport=' + LCDD + ':' + LCDPORT)
logging.debug('player=' + PLAYER)
logging.debug('verbose=' + str(verbose))
def parse_config(configfile):
global verbose
global LCDD
global LCDPORT
global LMS
global LMSPORT
global PLAYER
global STOP_KEY
global PAUSE_KEY
config = configparser.ConfigParser()
config.read(configfile)
if (config.has_section('LCD_LMS')):
if (config.has_option('LCD_LMS','lms')):
LMS = config['LCD_LMS']['lms'].split(':')[0]
LMSPORT = config['LCD_LMS']['lms'].split(':')[1]
if (config.has_option('LCD_LMS','lcd')):
LCDD = config['LCD_LMS']['lcd'].split(':')[0]
LCDPORT = config['LCD_LMS']['lcd'].split(':')[1]
if (config.has_option('LCD_LMS','player')):
PLAYER = config['LCD_LMS']['player']
if (config.has_option('LCD_LMS','verbose')):
verbose = config['LCD_LMS'].getboolean('verbose')
if (config.has_option('LCD_LMS','verbose')):
verbose = config['LCD_LMS'].getboolean('verbose')
if (config.has_option('LCD_LMS','stop key')):
STOP_KEY = config['LCD_LMS']['stop key']
if (config.has_option('LCD_LMS','pause key')):
PAUSE_KEY = config['LCD_LMS']['pause key']
def parse_arguments():
global verbose
global LCDD
global LCDPORT
global LMS
global LMSPORT
global PLAYER
global STOP_KEY
global PAUSE_KEY
STOP_KEY = 'Enter'
PAUSE_KEY = 'Down'
parser = argparse.ArgumentParser(description='Glue the Logitech Media Server CLI to LCDd')
parser.add_argument('-p', '--player', default=gethostname(), help='the client\'s player name')
parser.add_argument('-d', '--lcdproc', default='localhost:13666', dest='lcdhost:lcdport', help='specify the LCDproc server')
parser.add_argument('-l', '--lms', default='localhost:9090', dest='lmshost:lmsport', help='specify the LMS server')
parser.add_argument('-c', '--config', help='specify a config file')
parser.add_argument('-v', '--verbose', action='store_true', help='output debugging information')
args = vars(parser.parse_args())
if (args.get('verbose') == True):
verbose = True
LCDD = args.get('lcdhost:lcdport').split(':')[0]
LCDPORT = args.get('lcdhost:lcdport').split(':')[1]
LMS = args.get('lmshost:lmsport').split(':')[0]
LMSPORT = args.get('lmshost:lmsport').split(':')[1]
PLAYER = args.get('player')
if (args.get('config') != None):
parse_config(args.get('config'))
def centre(w, t):
l = len(t)
if (l > int(w)):
return t
a = int((int(w) - l) / 2)
b = int(w) - l - a
return (' ' * a) + t + (' ' * b)
def trim(s):
s = re.sub('^\s+|\s+$', '', s)
s = re.sub('"', '', s)
return s
def set_title(ltitle):
global title
title = trim(ltitle)
def set_album(lalbum):
global album
album = trim(lalbum)
def multiline(s):
t = ''
l = ''
length = 0
for w in s.split():
n = len(w)
if (n + length < int(width)):
if (length > 0):
l = l + ' ' + w
length += n + 1
else:
l = w
length = n
else:
t = t + centre(width, l)
l = w
length = n
return (t + centre(width, l))
def two_lines(l1, l2):
myLcd.send_receive('widget_set ' + PLAYER + ' album 1 3 ' + width + ' 3 h 3 \"\"')
if (len(l1) >= int(width) and len(l2) >= int(width)):
s = multiline(l1 + ' ' + l2)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 2 h 3 \"\"')
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 3 v 8 \"' + s + '\"')
return 1
if (len(l1) >= int(width) and len(l2) == 0):
t = multiline(l1)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 2 h 3 \"\"')
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 3 v 8 \"' + t + '\"')
return 1
if (len(l1) >= int(width)):
t = multiline(l1)
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 2 v 8 \"' + t + '\"')
a = centre(width, l2)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 3 ' + width + ' 3 h 3 \"' + a + '\"')
return 1
if (len(l2) >= int(width)):
t = centre(width, l1)
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 1 h 3 \"' + t + '\"')
a = multiline(l2)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 3 v 8 \"' + a + '\"')
return 1
return 0
def set_artist(artist):
artist = trim(artist)
if (title == '' and two_lines(album, artist)):
return
if (artist == '' and two_lines(title, album)):
return
if (album == '' and two_lines(title, artist)):
return
t = centre(width, title)
a = centre(width, artist)
l = centre(width, album)
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 1 h 3 \"' + t + '\"')
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 2 h 3 \"' + a + '\"')
myLcd.send_receive('widget_set ' + PLAYER + ' album 1 3 ' + width + ' 3 h 3 \"' + l + '\"')
def set_status(status):
state = centre(10, status)
myLcd.send_receive('widget_set ' + PLAYER + ' status 6 4 \"' + state + '\"')
def set_progress(current_track_id, total_tracks):
p = ''
if (total_tracks > 0):
p = str(current_track_id + 1) + '/' + str(total_tracks)
p = p[:6]
myLcd.send_receive('widget_set ' + PLAYER + ' progress 15 4 \"' + p + '\"')
def set_elapsed_time():
# duration is unknown for radio stream so just show elapsed time
remain = current_duration - elapsed_time
if (remain < 0):
remain = -remain
rh = int(remain / 3600)
rm = int((remain - 3600 * rh) / 60)
rs = int(remain % 60)
t = ''
if (rh > 0):
t = str(rh) + ':' + str(rm).zfill(2) + ':' + str(rs).zfill(2)
else:
t = str(rm) + ':' + str(rs).zfill(2)
set_status(t)
def set_time():
global start_time
start_time = time.time() - int(float(elapsed_time))
set_elapsed_time()
def set_volume(vol):
if (vol == '100'):
vol = '99'
myLcd.send_receive('widget_set ' + PLAYER + ' volume 1 4 ' + vol.zfill(2))
def set_playing(lplaying):
global elapsed_time
global start_time
global current_duration
global playing
playing = lplaying
if (playing):
start_time = time.time()
myLcd.send_receive('screen_set ' + PLAYER + ' priority foreground backlight on')
myLms.send_player('playlist tracks ?', player_id)
myLms.send_player('playlist index ?', player_id)
else:
myLcd.send_receive('screen_set ' + PLAYER + ' priority background backlight off')
if (current_duration > 0):
current_duration -= elapsed_time
elapsed_time = 0
def set_stopped():
set_title('')
set_album('')
set_artist('')
set_status('stop')
set_playing(False)
def playlist(s):
cmd = s[0]
if (cmd == 'clear'):
set_stopped()
set_progress(-1, 0)
elif (cmd == 'stop'):
set_stopped()
elif (cmd == 'pause'):
if (s[-1] == '0'):
set_playing(True)
else:
set_playing(False)
elif (cmd == 'tracks'):
set_progress(current_track_id, int(s[1]))
elif (cmd == 'index'):
set_progress(int(s[1]), total_tracks)
elif (cmd == 'loadtracks'):
myLms.send_player('playlist tracks ?', player_id)
elif (cmd == 'addtracks'):
myLms.send_player('playlist tracks ?', player_id)
elif (cmd == 'load_done'):
myLms.send_player('playlist tracks ?', player_id)
elif (cmd == 'delete'):
myLms.send_player('playlist tracks ?', player_id)
myLms.send_player('playlist index ?', player_id)
elif (cmd == 'newsong'):
try:
id = int(s[-1])
if (playing and (id == current_track_id)):
return
myLms.send_player('duration ?', player_id)
myLms.send_player('album ?', player_id)
set_progress(id, total_tracks)
except Exception as e:
set_album('')
set_playing(True)
myLms.send_player('title ?', player_id)
myLms.send_player('artist ?', player_id)
def mixer(cmd, vol):
if (cmd == 'volume'):
c = vol[0:1]
if (c == '-' or c == '+'):
myLms.send_player('mixer volume ?', player_id)
else:
set_volume(vol)
def mode(cmd):
if (cmd == 'stop'):
set_playing(False)
set_status(cmd)
elif (cmd == 'pause'):
set_playing(False)
set_status(cmd)
elif (cmd == 'play'):
set_playing(True)
set_status(cmd)
myLms.send_player('playlist tracks ?', player_id)
def lms_response(response):
global current_duration
global elapsed_time
if (re.match(player_id + ' (.+)', response) != None):
response = re.search(player_id + ' (.+)', response).group(1)
s = response.split()
if (s[0] == 'playlist'):
del s[0]
playlist(s)
elif (s[0] == 'mixer'):
del s[0]
mixer(s[0], s[1])
elif (s[0] == 'mode'):
mode(s[1])
elif (s[0] == 'time'):
elapsed_time = int(float(s[1]))
set_time()
elif (s[0] == 'pause'):
set_playing(False)
elif (s[0] == 'play'):
set_playing(True)
elif (s[0] == 'artist'):
del s[0]
set_artist(' '.join(s))
elif (s[0] == 'album'):
del s[0]
set_album(' '.join(s))
elif (s[0] == 'title'):
del s[0]
set_title(' '.join(s))
elif (s[0] == 'duration'):
current_duration = int(float(s[1]))
set_elapsed_time()
def set_clock_widget(w, l, s):
s = centre(width, s)
myLcd.send_receive('widget_set CLOCK ' + w + ' 1 ' + str(l) + ' \"' + s + '\"')
def lms_init():
global myLms
global player_id
myLms = lms.Lms(LMS, LMSPORT)
player_id = ''
pcount = int(myLms.send_receive('player count'))
for i in range(pcount):
p = myLms.send_receive('player name ' + str(i))
if (p == PLAYER):
player_id = myLms.send_receive('player id ' + str(i))
break
if (player_id == ''):
sys.exit('unable to find player ' + PLAYER)
logging.info('player_id: ' + player_id)
sub = "subscribe playlist,mixer,time,mode,play,pause,title,album,artist"
if (listen):
sub = "listen 1"
myLms.send_receive(sub, True)
myLms.send_player('mixer volume ?', player_id)
myLms.send_player('mode ?', player_id)
myLms.send_player('time ?', player_id)
myLms.send_player('duration ?', player_id)
myLms.send_player('playlist index ?', player_id)
myLms.send_player('title ?', player_id)
myLms.send_player('album ?', player_id)
myLms.send_player('artist ?', player_id)
def lcd_init():
global myLcd
global width
global lines
myLcd = lcd.Lcd(LCDD, LCDPORT)
lcdresponse = myLcd.send_receive('hello')
width = re.search('wid\s+(\d+)', lcdresponse).group()[4:]
lines = re.search('hgt\s+(\d+)', lcdresponse).group()[4:]
myLcd.send_receive('client_set name ' + basename(sys.argv[0]))
myLcd.send_receive('screen_add ' + PLAYER)
myLcd.send_receive('screen_set ' + PLAYER + ' priority foreground name playback heartbeat off')
myLcd.send_receive('widget_add ' + PLAYER + ' title scroller')
myLcd.send_receive('widget_add ' + PLAYER + ' album scroller')
myLcd.send_receive('widget_add ' + PLAYER + ' artist scroller')
myLcd.send_receive('widget_add ' + PLAYER + ' volume string')
myLcd.send_receive('widget_add ' + PLAYER + ' status string')
myLcd.send_receive('widget_add ' + PLAYER + ' progress string')
myLcd.send_receive('client_add_key ' + STOP_KEY)
myLcd.send_receive('client_add_key ' + PAUSE_KEY)
myLcd.send_receive('screen_add CLOCK')
myLcd.send_receive('screen_set CLOCK priority info heartbeat off backlight off duration 1000')
myLcd.send_receive('widget_add CLOCK time string')
myLcd.send_receive('widget_add CLOCK day string')
myLcd.send_receive('widget_add CLOCK date string')
start_time = time.time()
total_tracks = 0
current_track_id = -1
elapsed_time = 0
current_duration = 0
title = ''
artist = ''
album = ''
playing = False
listen = False
config_init()
lms_init()
lcd_init()
while True:
loop_start = time.time_ns()
response = myLms.check_queue()
if (response != None):
response = response.split("\n")
for r in response:
lms_response(r)
response = myLcd.check_queue()
if (response != None):
if (response == 'key ' + STOP_KEY + '\n'):
if (playing):
myLms.send_player('playlist index +1', player_id)
else:
myLms.send_player('playlist clear', player_id)
elif (response == 'key ' + PAUSE_KEY + '\n'):
p = 0
if (playing):
p = 1
myLms.send_player('pause ' + p, player_id)
if (playing):
elapsed_time = time.time() - start_time
set_elapsed_time()
set_clock_widget('day', 1, time.strftime('%A', time.localtime()))
set_clock_widget('time', 2, time.strftime('%R', time.localtime()))
set_clock_widget('date', 3, time.strftime('%d', time.localtime()) + ' ' + time.strftime('%b', time.localtime()) + ' ' + time.strftime('%Y', time.localtime()))
delta = float(int(loop_start - time.time_ns())) / 1000000000.0 + MAIN_LOOP_DELAY
logging.debug('main loop remaining time (delta in s): ' + str(delta))
if (delta > 0):
time.sleep(delta)
| 34.560859 | 162 | 0.575444 |
import re
import time
import sys
import logging
import argparse
import configparser
from socket import gethostname
from os.path import basename
import lcd
import lms
MAIN_LOOP_DELAY = 0.3
def config_init():
global verbose
verbose = False
parse_arguments()
if (verbose == True):
logging.basicConfig(format='%(levelname)s: %(asctime)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(levelname)s: %(asctime)s %(message)s', level=logging.WARNING)
logging.debug('lmshost:lmsport=' + LMS + ':' + LMSPORT)
logging.debug('lcdhost:lcdport=' + LCDD + ':' + LCDPORT)
logging.debug('player=' + PLAYER)
logging.debug('verbose=' + str(verbose))
def parse_config(configfile):
global verbose
global LCDD
global LCDPORT
global LMS
global LMSPORT
global PLAYER
global STOP_KEY
global PAUSE_KEY
config = configparser.ConfigParser()
config.read(configfile)
if (config.has_section('LCD_LMS')):
if (config.has_option('LCD_LMS','lms')):
LMS = config['LCD_LMS']['lms'].split(':')[0]
LMSPORT = config['LCD_LMS']['lms'].split(':')[1]
if (config.has_option('LCD_LMS','lcd')):
LCDD = config['LCD_LMS']['lcd'].split(':')[0]
LCDPORT = config['LCD_LMS']['lcd'].split(':')[1]
if (config.has_option('LCD_LMS','player')):
PLAYER = config['LCD_LMS']['player']
if (config.has_option('LCD_LMS','verbose')):
verbose = config['LCD_LMS'].getboolean('verbose')
if (config.has_option('LCD_LMS','verbose')):
verbose = config['LCD_LMS'].getboolean('verbose')
if (config.has_option('LCD_LMS','stop key')):
STOP_KEY = config['LCD_LMS']['stop key']
if (config.has_option('LCD_LMS','pause key')):
PAUSE_KEY = config['LCD_LMS']['pause key']
def parse_arguments():
global verbose
global LCDD
global LCDPORT
global LMS
global LMSPORT
global PLAYER
global STOP_KEY
global PAUSE_KEY
STOP_KEY = 'Enter'
PAUSE_KEY = 'Down'
parser = argparse.ArgumentParser(description='Glue the Logitech Media Server CLI to LCDd')
parser.add_argument('-p', '--player', default=gethostname(), help='the client\'s player name')
parser.add_argument('-d', '--lcdproc', default='localhost:13666', dest='lcdhost:lcdport', help='specify the LCDproc server')
parser.add_argument('-l', '--lms', default='localhost:9090', dest='lmshost:lmsport', help='specify the LMS server')
parser.add_argument('-c', '--config', help='specify a config file')
parser.add_argument('-v', '--verbose', action='store_true', help='output debugging information')
args = vars(parser.parse_args())
if (args.get('verbose') == True):
verbose = True
LCDD = args.get('lcdhost:lcdport').split(':')[0]
LCDPORT = args.get('lcdhost:lcdport').split(':')[1]
LMS = args.get('lmshost:lmsport').split(':')[0]
LMSPORT = args.get('lmshost:lmsport').split(':')[1]
PLAYER = args.get('player')
if (args.get('config') != None):
parse_config(args.get('config'))
def centre(w, t):
l = len(t)
if (l > int(w)):
return t
a = int((int(w) - l) / 2)
b = int(w) - l - a
return (' ' * a) + t + (' ' * b)
def trim(s):
s = re.sub('^\s+|\s+$', '', s)
s = re.sub('"', '', s)
return s
def set_title(ltitle):
global title
title = trim(ltitle)
def set_album(lalbum):
global album
album = trim(lalbum)
def multiline(s):
t = ''
l = ''
length = 0
for w in s.split():
n = len(w)
if (n + length < int(width)):
if (length > 0):
l = l + ' ' + w
length += n + 1
else:
l = w
length = n
else:
t = t + centre(width, l)
l = w
length = n
return (t + centre(width, l))
def two_lines(l1, l2):
myLcd.send_receive('widget_set ' + PLAYER + ' album 1 3 ' + width + ' 3 h 3 \"\"')
if (len(l1) >= int(width) and len(l2) >= int(width)):
s = multiline(l1 + ' ' + l2)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 2 h 3 \"\"')
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 3 v 8 \"' + s + '\"')
return 1
if (len(l1) >= int(width) and len(l2) == 0):
t = multiline(l1)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 2 h 3 \"\"')
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 3 v 8 \"' + t + '\"')
return 1
if (len(l1) >= int(width)):
t = multiline(l1)
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 2 v 8 \"' + t + '\"')
a = centre(width, l2)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 3 ' + width + ' 3 h 3 \"' + a + '\"')
return 1
if (len(l2) >= int(width)):
t = centre(width, l1)
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 1 h 3 \"' + t + '\"')
a = multiline(l2)
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 3 v 8 \"' + a + '\"')
return 1
return 0
def set_artist(artist):
artist = trim(artist)
if (title == '' and two_lines(album, artist)):
return
if (artist == '' and two_lines(title, album)):
return
if (album == '' and two_lines(title, artist)):
return
t = centre(width, title)
a = centre(width, artist)
l = centre(width, album)
myLcd.send_receive('widget_set ' + PLAYER + ' title 1 1 ' + width + ' 1 h 3 \"' + t + '\"')
myLcd.send_receive('widget_set ' + PLAYER + ' artist 1 2 ' + width + ' 2 h 3 \"' + a + '\"')
myLcd.send_receive('widget_set ' + PLAYER + ' album 1 3 ' + width + ' 3 h 3 \"' + l + '\"')
def set_status(status):
state = centre(10, status)
myLcd.send_receive('widget_set ' + PLAYER + ' status 6 4 \"' + state + '\"')
def set_progress(current_track_id, total_tracks):
p = ''
if (total_tracks > 0):
p = str(current_track_id + 1) + '/' + str(total_tracks)
p = p[:6]
myLcd.send_receive('widget_set ' + PLAYER + ' progress 15 4 \"' + p + '\"')
def set_elapsed_time():
# duration is unknown for radio stream so just show elapsed time
remain = current_duration - elapsed_time
if (remain < 0):
remain = -remain
rh = int(remain / 3600)
rm = int((remain - 3600 * rh) / 60)
rs = int(remain % 60)
t = ''
if (rh > 0):
t = str(rh) + ':' + str(rm).zfill(2) + ':' + str(rs).zfill(2)
else:
t = str(rm) + ':' + str(rs).zfill(2)
set_status(t)
def set_time():
global start_time
start_time = time.time() - int(float(elapsed_time))
set_elapsed_time()
def set_volume(vol):
if (vol == '100'):
vol = '99'
myLcd.send_receive('widget_set ' + PLAYER + ' volume 1 4 ' + vol.zfill(2))
def set_playing(lplaying):
global elapsed_time
global start_time
global current_duration
global playing
playing = lplaying
if (playing):
start_time = time.time()
myLcd.send_receive('screen_set ' + PLAYER + ' priority foreground backlight on')
myLms.send_player('playlist tracks ?', player_id)
myLms.send_player('playlist index ?', player_id)
else:
myLcd.send_receive('screen_set ' + PLAYER + ' priority background backlight off')
if (current_duration > 0):
current_duration -= elapsed_time
elapsed_time = 0
def set_stopped():
set_title('')
set_album('')
set_artist('')
set_status('stop')
set_playing(False)
def playlist(s):
cmd = s[0]
if (cmd == 'clear'):
set_stopped()
set_progress(-1, 0)
elif (cmd == 'stop'):
set_stopped()
elif (cmd == 'pause'):
if (s[-1] == '0'):
set_playing(True)
else:
set_playing(False)
elif (cmd == 'tracks'):
set_progress(current_track_id, int(s[1]))
elif (cmd == 'index'):
set_progress(int(s[1]), total_tracks)
elif (cmd == 'loadtracks'):
myLms.send_player('playlist tracks ?', player_id)
elif (cmd == 'addtracks'):
myLms.send_player('playlist tracks ?', player_id)
elif (cmd == 'load_done'):
myLms.send_player('playlist tracks ?', player_id)
elif (cmd == 'delete'):
myLms.send_player('playlist tracks ?', player_id)
myLms.send_player('playlist index ?', player_id)
elif (cmd == 'newsong'):
try:
id = int(s[-1])
if (playing and (id == current_track_id)):
return
myLms.send_player('duration ?', player_id)
myLms.send_player('album ?', player_id)
set_progress(id, total_tracks)
except Exception as e:
set_album('')
set_playing(True)
myLms.send_player('title ?', player_id)
myLms.send_player('artist ?', player_id)
def mixer(cmd, vol):
if (cmd == 'volume'):
c = vol[0:1]
if (c == '-' or c == '+'):
myLms.send_player('mixer volume ?', player_id)
else:
set_volume(vol)
def mode(cmd):
if (cmd == 'stop'):
set_playing(False)
set_status(cmd)
elif (cmd == 'pause'):
set_playing(False)
set_status(cmd)
elif (cmd == 'play'):
set_playing(True)
set_status(cmd)
myLms.send_player('playlist tracks ?', player_id)
def lms_response(response):
global current_duration
global elapsed_time
if (re.match(player_id + ' (.+)', response) != None):
response = re.search(player_id + ' (.+)', response).group(1)
s = response.split()
if (s[0] == 'playlist'):
del s[0]
playlist(s)
elif (s[0] == 'mixer'):
del s[0]
mixer(s[0], s[1])
elif (s[0] == 'mode'):
mode(s[1])
elif (s[0] == 'time'):
elapsed_time = int(float(s[1]))
set_time()
elif (s[0] == 'pause'):
set_playing(False)
elif (s[0] == 'play'):
set_playing(True)
elif (s[0] == 'artist'):
del s[0]
set_artist(' '.join(s))
elif (s[0] == 'album'):
del s[0]
set_album(' '.join(s))
elif (s[0] == 'title'):
del s[0]
set_title(' '.join(s))
elif (s[0] == 'duration'):
current_duration = int(float(s[1]))
set_elapsed_time()
def set_clock_widget(w, l, s):
s = centre(width, s)
myLcd.send_receive('widget_set CLOCK ' + w + ' 1 ' + str(l) + ' \"' + s + '\"')
def lms_init():
global myLms
global player_id
myLms = lms.Lms(LMS, LMSPORT)
player_id = ''
pcount = int(myLms.send_receive('player count'))
for i in range(pcount):
p = myLms.send_receive('player name ' + str(i))
if (p == PLAYER):
player_id = myLms.send_receive('player id ' + str(i))
break
if (player_id == ''):
sys.exit('unable to find player ' + PLAYER)
logging.info('player_id: ' + player_id)
sub = "subscribe playlist,mixer,time,mode,play,pause,title,album,artist"
if (listen):
sub = "listen 1"
myLms.send_receive(sub, True)
myLms.send_player('mixer volume ?', player_id)
myLms.send_player('mode ?', player_id)
myLms.send_player('time ?', player_id)
myLms.send_player('duration ?', player_id)
myLms.send_player('playlist index ?', player_id)
myLms.send_player('title ?', player_id)
myLms.send_player('album ?', player_id)
myLms.send_player('artist ?', player_id)
def lcd_init():
global myLcd
global width
global lines
myLcd = lcd.Lcd(LCDD, LCDPORT)
lcdresponse = myLcd.send_receive('hello')
width = re.search('wid\s+(\d+)', lcdresponse).group()[4:]
lines = re.search('hgt\s+(\d+)', lcdresponse).group()[4:]
myLcd.send_receive('client_set name ' + basename(sys.argv[0]))
myLcd.send_receive('screen_add ' + PLAYER)
myLcd.send_receive('screen_set ' + PLAYER + ' priority foreground name playback heartbeat off')
myLcd.send_receive('widget_add ' + PLAYER + ' title scroller')
myLcd.send_receive('widget_add ' + PLAYER + ' album scroller')
myLcd.send_receive('widget_add ' + PLAYER + ' artist scroller')
myLcd.send_receive('widget_add ' + PLAYER + ' volume string')
myLcd.send_receive('widget_add ' + PLAYER + ' status string')
myLcd.send_receive('widget_add ' + PLAYER + ' progress string')
myLcd.send_receive('client_add_key ' + STOP_KEY)
myLcd.send_receive('client_add_key ' + PAUSE_KEY)
myLcd.send_receive('screen_add CLOCK')
myLcd.send_receive('screen_set CLOCK priority info heartbeat off backlight off duration 1000')
myLcd.send_receive('widget_add CLOCK time string')
myLcd.send_receive('widget_add CLOCK day string')
myLcd.send_receive('widget_add CLOCK date string')
start_time = time.time()
total_tracks = 0
current_track_id = -1
elapsed_time = 0
current_duration = 0
title = ''
artist = ''
album = ''
playing = False
listen = False
config_init()
lms_init()
lcd_init()
while True:
loop_start = time.time_ns()
response = myLms.check_queue()
if (response != None):
response = response.split("\n")
for r in response:
lms_response(r)
response = myLcd.check_queue()
if (response != None):
if (response == 'key ' + STOP_KEY + '\n'):
if (playing):
myLms.send_player('playlist index +1', player_id)
else:
myLms.send_player('playlist clear', player_id)
elif (response == 'key ' + PAUSE_KEY + '\n'):
p = 0
if (playing):
p = 1
myLms.send_player('pause ' + p, player_id)
if (playing):
elapsed_time = time.time() - start_time
set_elapsed_time()
set_clock_widget('day', 1, time.strftime('%A', time.localtime()))
set_clock_widget('time', 2, time.strftime('%R', time.localtime()))
set_clock_widget('date', 3, time.strftime('%d', time.localtime()) + ' ' + time.strftime('%b', time.localtime()) + ' ' + time.strftime('%Y', time.localtime()))
delta = float(int(loop_start - time.time_ns())) / 1000000000.0 + MAIN_LOOP_DELAY
logging.debug('main loop remaining time (delta in s): ' + str(delta))
if (delta > 0):
time.sleep(delta)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.