hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbe350f08751ce35bf7b7a760effe123edcf653f | 565 | py | Python | service/methods.py | USECAP/ci-tools | ad2300e3297266ff3ee6ed9118ccd16fc05291e3 | [
"MIT"
] | null | null | null | service/methods.py | USECAP/ci-tools | ad2300e3297266ff3ee6ed9118ccd16fc05291e3 | [
"MIT"
] | null | null | null | service/methods.py | USECAP/ci-tools | ad2300e3297266ff3ee6ed9118ccd16fc05291e3 | [
"MIT"
] | null | null | null | """Forked Methods from jsonrpcserver"""
from jsonrpcserver.methods import Methods as _Methods
from .background import BackgroundTask
class Methods(_Methods): # pylint: disable=too-many-ancestors
"""
Holds a list of methods.
"""
def __setitem__(self, key, value):
# Method must be callable
if not callable(value) and not issubclass(value, BackgroundTask):
raise TypeError('%s is not callable or a task class' % type(value))
self._items[key] = value
method_instance = Methods() # pylint: disable=invalid-name
| 31.388889 | 79 | 0.692035 |
69657d1984ccb6f8f58878434109d7fe1ba9a712 | 2,931 | py | Python | python/oneflow/framework/docstr/tensor_ops.py | Panlichen/oneflow | ad93c69c9932e5515aa31fb7f157073708810a3d | [
"Apache-2.0"
] | null | null | null | python/oneflow/framework/docstr/tensor_ops.py | Panlichen/oneflow | ad93c69c9932e5515aa31fb7f157073708810a3d | [
"Apache-2.0"
] | null | null | null | python/oneflow/framework/docstr/tensor_ops.py | Panlichen/oneflow | ad93c69c9932e5515aa31fb7f157073708810a3d | [
"Apache-2.0"
] | 1 | 2021-12-15T02:14:49.000Z | 2021-12-15T02:14:49.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.narrow,
r"""
narrow(x, dim: int, start: int, length: int) -> Tensor
Returns a new tensor that is a narrowed version of `input` tensor.
The dimension `dim` is input from `start` to `start + length`.
Args:
input: the tensor to narrow.
dim: the dimension along which to narrow.
start: the starting dimension.
length: the distance to the ending dimension.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> flow.narrow(input, 0, 0, 2)
tensor([[1, 2, 3],
[4, 5, 6]], dtype=oneflow.int64)
>>> flow.narrow(input, 1, 1, 2)
tensor([[2, 3],
[5, 6],
[8, 9]], dtype=oneflow.int64)
""",
)
add_docstr(
oneflow.unsqueeze,
r"""
unsqueeze(input, dim) -> Tensor
Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A :attr:`dim` value within the range `[-input.ndimension() - 1, input.ndimension() + 1)`
can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
applied at :attr:`dim` = ``dim + input.ndimension() + 1``.
Args:
input (Tensor): the input tensor.
dim (int): the index at which to insert the singleton dimension
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = flow.randn(2, 3, 4)
>>> y = x.unsqueeze(2)
>>> y.shape
oneflow.Size([2, 3, 1, 4])
""",
)
add_docstr(
oneflow.permute,
r"""
permute(input, *dims) -> Tensor
Returns a view of the original tensor with its dimensions permuted.
Args:
dims (tuple of ints): The desired ordering of dimensions
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
>>> output = flow.permute(input, (1, 0, 2, 3)).shape
>>> output
oneflow.Size([6, 2, 5, 3])
""",
)
| 27.914286 | 92 | 0.607984 |
25458c5bbde6c839aae12bcc7b3a6b1438a33b4b | 7,249 | py | Python | pirates/leveleditor/worldData/RavensCoveJailInterior.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 81 | 2018-04-08T18:14:24.000Z | 2022-01-11T07:22:15.000Z | pirates/leveleditor/worldData/RavensCoveJailInterior.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 4 | 2018-09-13T20:41:22.000Z | 2022-01-08T06:57:00.000Z | pirates/leveleditor/worldData/RavensCoveJailInterior.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 26 | 2018-05-26T12:49:27.000Z | 2021-09-11T09:11:59.000Z | from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'LevelEnvironment': {'BASE': {'AmbientColor': Vec4(1, 0.25, 0.25, 1),'Direction': Vec3(0.0, 0.0, 270.0),'FogColor': Vec4(0.27, 0.31, 0.32, 0),'FogLinearRange': (0.0, 80.0),'FogType': 2,'LightSwitch': [0, 0, 0],'SkyType': 10,'EnvEffect': 1}},'Objects': {'1271353470.51akelts0': {'Type': 'Building Interior','Name': '','Instanced': False,'Objects': {'1168049461.92akelts': {'Type': 'Player Spawn Node','Hpr': VBase3(-68.11, 0.0, 0.0),'Index': '2','Pos': Point3(-54.973, 5.818, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168049479.89akelts': {'Type': 'Player Spawn Node','Hpr': VBase3(-71.236, 0.0, 0.0),'Index': '3','Pos': Point3(-45.181, -18.273, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168299145.14akelts': {'Type': 'Player Spawn Node','Hpr': VBase3(160.095, 0.0, 0.0),'Index': '1','Pos': Point3(-5.53, 31.897, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168299176.47akelts': {'Type': 'Player Spawn Node','Hpr': VBase3(162.825, 0.0, 0.0),'Index': '0','Pos': Point3(27.059, 18.146, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168299237.44akelts': {'Type': 'Player Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Index': '5','Pos': Point3(15.187, -34.447, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168299256.56akelts': {'Type': 'Player Spawn Node','Hpr': VBase3(-1.141, 0.0, 0.0),'Index': '4','Pos': Point3(-16.913, -34.848, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168372719.98akelts': {'Type': 'Jail Cell Door','Cell Index': 0,'Hpr': Point3(0.0, 0.0, 0.0),'Level': 1,'Pos': Point3(-48.176, 4.421, -1.442),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168372722.88akelts': {'Type': 'Jail Cell Door','Cell Index': 1,'Hpr': Point3(0.0, 0.0, 0.0),'Level': 1,'Pos': Point3(-35.803, -19.437, -1.442),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168372726.92akelts': {'Type': 'Jail Cell Door','Cell Index': 2,'Hpr': Point3(0.0, 0.0, 0.0),'Level': 1,'Pos': Point3(-9.981, -35.275, -1.442),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168372729.38akelts': {'Type': 'Jail Cell Door','Cell Index': 3,'Hpr': Point3(0.0, 0.0, 0.0),'Level': 1,'Pos': Point3(20.36, -33.659, -1.442),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168372737.22akelts': {'Type': 'Jail Cell Door','Cell Index': 4,'Hpr': Point3(0.0, 0.0, 0.0),'Level': 1,'Pos': Point3(32.105, 23.776, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1168372740.06akelts': {'Type': 'Jail Cell Door','Cell Index': 5,'Hpr': Point3(0.0, 0.0, 0.0),'Level': 1,'Pos': Point3(0.904, 34.037, -1.5),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1222897352.06akelts': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '59.9096','DropOff': '0.0000','FlickRate': '1.0000','Flickering': True,'Holiday': '','Hpr': VBase3(-180.0, -70.754, 180.0),'Intensity': '0.5783','LightType': 'POINT','Pos': Point3(-32.466, 9.879, 9.187),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.99, 0.63, 0.297, 1.0),'Model': 'models/props/light_tool_bulb'}},'1222897352.63akelts': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '76.3554','DropOff': '0.0000','FlickRate': '1.0000','Flickering': True,'Holiday': '','Hpr': VBase3(-180.0, -89.672, 180.0),'Intensity': '0.7831','LightType': 'POINT','Pos': Point3(-1.274, -21.972, 9.335),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.98, 0.503, 0.353, 1.0),'Model': 'models/props/light_tool_bulb'}},'1222897353.14akelts': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': '1.0000','Flickering': True,'Holiday': '','Hpr': VBase3(180.0, -88.428, -180.0),'Intensity': '0.7952','LightType': 'POINT','Pos': Point3(35.93, -9.193, 7.739),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.98, 0.491, 0.216, 1.0),'Model': 'models/props/light_tool_bulb'}},'1222897353.63akelts': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': '1.0000','Flickering': True,'Holiday': '','Hpr': VBase3(-52.679, 86.667, 52.632),'Intensity': '0.9277','LightType': 'DIRECTIONAL','Pos': Point3(-3.712, -7.463, 0.964),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (1.0, 0.392, 0.2, 1.0),'Model': 'models/props/light_tool_bulb'}},'1281397139.69dxschafe': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(82.669, 0.0, 0.0),'Pos': Point3(40.946, -9.638, -1.464),'Scale': VBase3(1.0, 1.0, 1.0)}},'VisSize': '','Visual': {'Model': 'models/buildings/pir_m_int_spn_jail_destroyed'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1168049461.92akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168049461.92akelts"]','1168049479.89akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168049479.89akelts"]','1168299145.14akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168299145.14akelts"]','1168299176.47akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168299176.47akelts"]','1168299237.44akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168299237.44akelts"]','1168299256.56akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168299256.56akelts"]','1168372719.98akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168372719.98akelts"]','1168372722.88akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168372722.88akelts"]','1168372726.92akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168372726.92akelts"]','1168372729.38akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168372729.38akelts"]','1168372737.22akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168372737.22akelts"]','1168372740.06akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1168372740.06akelts"]','1222897352.06akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1222897352.06akelts"]','1222897352.63akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1222897352.63akelts"]','1222897353.14akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1222897353.14akelts"]','1222897353.63akelts': '["Objects"]["1271353470.51akelts0"]["Objects"]["1222897353.63akelts"]','1271353470.51akelts0': '["Objects"]["1271353470.51akelts0"]','1281397139.69dxschafe': '["Objects"]["1271353470.51akelts0"]["Objects"]["1281397139.69dxschafe"]'}}
extraInfo = {'camPos': Point3(-28.3455, -15.8743, 9.28302),'camHpr': VBase3(-83.7155, -4.90919, 0),'focalLength': 0.773999989033,'skyState': 21,'fog': 1} | 2,416.333333 | 7,036 | 0.634432 |
cba012d869847309dc86a9d8b977564065193279 | 8,254 | py | Python | tests/service/optimizer/gpu/test_nvidia_gpu_driver.py | vinifmor/guapow | 59a9a1e6706bacbcb3d4bbc762ff9264d5e6f582 | [
"Zlib"
] | 7 | 2021-10-06T17:02:13.000Z | 2022-03-22T10:45:23.000Z | tests/service/optimizer/gpu/test_nvidia_gpu_driver.py | vinifmor/guapow | 59a9a1e6706bacbcb3d4bbc762ff9264d5e6f582 | [
"Zlib"
] | 2 | 2022-03-16T11:20:54.000Z | 2022-03-24T13:54:49.000Z | tests/service/optimizer/gpu/test_nvidia_gpu_driver.py | vinifmor/guapow | 59a9a1e6706bacbcb3d4bbc762ff9264d5e6f582 | [
"Zlib"
] | null | null | null | from unittest import IsolatedAsyncioTestCase
from unittest.mock import Mock, patch, call
from guapow import __app_name__
from guapow.service.optimizer.gpu import NvidiaGPUDriver, NvidiaPowerMode
class NvidiaGPUDriverTest(IsolatedAsyncioTestCase):
@patch(f'{__app_name__}.service.optimizer.gpu.shutil.which', return_value='')
def test_can_work__false_when_nvidia_settings_is_not_installed(self, which: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
can_work, msg = driver.can_work()
self.assertEqual(False, can_work)
self.assertIsInstance(msg, str)
which.assert_called_once_with('nvidia-settings')
@patch(f'{__app_name__}.service.optimizer.gpu.shutil.which', side_effect=['nvidia-settings', ''])
def test_can_work__false_when_nvidia_smi_is_not_installed(self, which: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
can_work, msg = driver.can_work()
self.assertEqual(False, can_work)
self.assertIsInstance(msg, str)
which.assert_has_calls([call('nvidia-settings'), call('nvidia-smi')])
@patch(f'{__app_name__}.service.optimizer.gpu.shutil.which', side_effect=['nvidia-settings', 'nvidia-smi'])
def test_can_work__true_when_nvidia_settings_and_smi_are_not_installed(self, which: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
can_work, msg = driver.can_work()
self.assertEqual(True, can_work)
self.assertIsNone(msg)
which.assert_has_calls([call('nvidia-settings'), call('nvidia-smi')])
def test_get_default_mode__must_return_auto(self):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
self.assertEqual(NvidiaPowerMode.AUTO, driver.get_default_mode())
def test_get_performance_mode__must_return_performance(self):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
self.assertEqual(NvidiaPowerMode.PERFORMANCE, driver.get_performance_mode())
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(0, '0 \n 1 '))
async def test_get_gpus__must_call_nvidia_smi_to_list_available_gpu_indexes(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
self.assertEqual({'0', '1'}, await driver.get_gpus())
async_syscall.assert_called_once_with('nvidia-smi --query-gpu=index --format=csv,noheader')
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(1, '0 \n 1 '))
async def test_get_gpus__must_return_empty_set_when_exitcode_is_not_zero(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
self.assertEqual(set(), await driver.get_gpus())
async_syscall.assert_called_once()
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(0, ''))
async def test_get_gpus__must_return_empty_set_when_no_output(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
self.assertEqual(set(), await driver.get_gpus())
async_syscall.assert_called_once()
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(0, "Attribute 'GPUPowerMizerMode' (user:0[gpu:0]): 2.\nAttribute 'GPUPowerMizerMode' (user:0[gpu:1]): 1.\nAttribute 'GPUPowerMizerMode' (user:0[gpu:2]): 0 "))
async def test_get_power_mode__return_modes_from_nvidia_settings_query_for_defined_ids(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
self.assertEqual({'0': NvidiaPowerMode.AUTO,
'1': NvidiaPowerMode.PERFORMANCE}, await driver.get_power_mode({'0', '1'})) # gpu '2' mode must not be returned
async_syscall.assert_called_once()
self.assertTrue(async_syscall.call_args.args[0].startswith('nvidia-settings '))
self.assertIn(' -q [gpu:0]/GpuPowerMizerMode', async_syscall.call_args.args[0])
self.assertIn(' -q [gpu:1]/GpuPowerMizerMode', async_syscall.call_args.args[0])
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(1, "Attribute 'GPUPowerMizerMode' (user:0[gpu:0]): 2.\nAttribute 'GPUPowerMizerMode' (user:0[gpu:1]): 1."))
async def test_get_power_mode__return_none_when_exitcode_nonzero(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
self.assertIsNone(await driver.get_power_mode({'0', '1'}))
async_syscall.assert_called_once()
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(0, "Attribute 'GPUPowerMizerMode' (user:0[gpu:0]) assigned value 1.\nAttribute 'GPUPowerMizerMode' (user:0[gpu:1]) assigned value 0.\nAttribute 'GPUPowerMizerMode' (user:0[gpu:2]) assigned value 2."))
async def test_set_power_mode__must_change_defined_gpus_to_defined_mode(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
env = {'TEST': 1, 'LANG': 'fr.UTF-8'}
res = await driver.set_power_mode({'0': NvidiaPowerMode.PERFORMANCE, '1': NvidiaPowerMode.ON_DEMAND}, user_environment=env)
self.assertEqual({'0': True, '1': True}, res)
async_syscall.assert_called_once()
self.assertTrue(async_syscall.call_args.args[0].startswith('nvidia-settings '))
self.assertIn('custom_env', async_syscall.call_args.kwargs)
self.assertIn(f' -a [gpu:0]/GpuPowerMizerMode={NvidiaPowerMode.PERFORMANCE.value}', async_syscall.call_args.args[0])
self.assertIn(f' -a [gpu:1]/GpuPowerMizerMode={NvidiaPowerMode.ON_DEMAND.value}', async_syscall.call_args.args[0])
self.assertEqual({**env, 'LANG': 'en_US.UTF-8'}, async_syscall.call_args.kwargs['custom_env'])
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(0, "Attribute 'GPUPowerMizerMode' (user:0[gpu:0]) assigned value 1.\nAttribute 'GPUPowerMizerMode' (user:0[gpu:1]) assigned value 0."))
async def test_set_power_mode__return_not_changed_gpu_mode_as_a_false_value(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
res = await driver.set_power_mode({'0': NvidiaPowerMode.PERFORMANCE, '1': NvidiaPowerMode.PERFORMANCE})
self.assertEqual({'0': True, '1': False}, res)
async_syscall.assert_called_once()
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(1, "error"))
async def test_set_power_mode__return_false_for_all_gpus_when_unknown_output(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
res = await driver.set_power_mode({'0': NvidiaPowerMode.PERFORMANCE, '1': NvidiaPowerMode.PERFORMANCE})
self.assertEqual({'0': False, '1': False}, res)
async_syscall.assert_called_once()
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(1, ""))
async def test_set_power_mode__return_false_for_all_gpus_when_empty_output(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
res = await driver.set_power_mode({'0': NvidiaPowerMode.PERFORMANCE, '1': NvidiaPowerMode.PERFORMANCE})
self.assertEqual({'0': False, '1': False}, res)
async_syscall.assert_called_once()
@patch(f'{__app_name__}.service.optimizer.gpu.system.async_syscall', return_value=(1, ""))
async def test_set_power_mode__must_call_nvidia_settings_with_english_as_default_language_when_no_user_env_is_defined(self, async_syscall: Mock):
driver = NvidiaGPUDriver(cache=False, logger=Mock())
await driver.set_power_mode({'0': NvidiaPowerMode.PERFORMANCE, '1': NvidiaPowerMode.ON_DEMAND}, user_environment=None)
self.assertTrue(async_syscall.call_args.args[0].startswith('nvidia-settings '))
self.assertIn('custom_env', async_syscall.call_args.kwargs)
self.assertIn(f' -a [gpu:0]/GpuPowerMizerMode={NvidiaPowerMode.PERFORMANCE.value}', async_syscall.call_args.args[0])
self.assertIn(f' -a [gpu:1]/GpuPowerMizerMode={NvidiaPowerMode.ON_DEMAND.value}', async_syscall.call_args.args[0])
self.assertEqual({'LANG': 'en_US.UTF-8'}, async_syscall.call_args.kwargs['custom_env'])
| 66.564516 | 287 | 0.73322 |
bb8fc4b1bbfed49af9f61af53ed0657a20ee3fc6 | 730 | py | Python | backend/server/apps/endpoints/urls.py | Thiesvdz/my_ml_service | 9d39d95218d84539906c12e0d400d4eb89af91d1 | [
"MIT"
] | null | null | null | backend/server/apps/endpoints/urls.py | Thiesvdz/my_ml_service | 9d39d95218d84539906c12e0d400d4eb89af91d1 | [
"MIT"
] | null | null | null | backend/server/apps/endpoints/urls.py | Thiesvdz/my_ml_service | 9d39d95218d84539906c12e0d400d4eb89af91d1 | [
"MIT"
] | null | null | null | from django.urls import include, re_path
from rest_framework.routers import DefaultRouter
from apps.endpoints.views import EndpointViewSet
from apps.endpoints.views import MLAlgorithmViewSet
from apps.endpoints.views import MLAlgorithmStatusViewSet
from apps.endpoints.views import MLRequestViewSet
router = DefaultRouter(trailing_slash=False)
router.register(r"endpoints", EndpointViewSet, basename="endpoints")
router.register(r"mlalgorithms", MLAlgorithmViewSet, basename="mlalgorithms")
router.register(r"mlalgorithmstatuses", MLAlgorithmStatusViewSet, basename="mlalgorithmstatuses")
router.register(r"mlrequests", MLRequestViewSet, basename="mlrequests")
urlpatterns = [
re_path(r"^api/v1/", include(router.urls)),
] | 42.941176 | 97 | 0.831507 |
5b4e774e37bdac9c3bda0c5578ee9477a69255a9 | 1,287 | py | Python | ch2/exercise_2_1_4.py | sweetpalma/clrs | baa2dfd99a435b2138f01bda5779e3cd57275a8d | [
"MIT"
] | 2 | 2019-05-30T18:29:10.000Z | 2019-06-26T17:13:14.000Z | ch2/exercise_2_1_4.py | sweetpalma/clrs | baa2dfd99a435b2138f01bda5779e3cd57275a8d | [
"MIT"
] | null | null | null | ch2/exercise_2_1_4.py | sweetpalma/clrs | baa2dfd99a435b2138f01bda5779e3cd57275a8d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Part of CLRS solutions by SweetPalma, 2019. See LICENSE for details.
import unittest
# Solution:
def binary_sum(a, b):
n = len(a)
c = list()
carry = 0
for i in reversed(range(0, n)):
paired = a[i] + b[i] + carry
if paired > 1:
c.insert(0, paired - 2)
carry = 1
else:
c.insert(0, paired)
carry = 0
c.insert(0, carry)
return c
# Test:
class TestBinarySum(unittest.TestCase):
def helper_int_to_binary(self, n):
binary_string = '{0:b}'.format(n)
binary_iter = list(binary_string)
return list(map(int, binary_iter))
def helper_test_pair(self, a, b):
binary_a = self.helper_int_to_binary(a)
binary_b = self.helper_int_to_binary(b)
if len(binary_a) == len(binary_b):
res = binary_sum(binary_a, binary_b)
exp = self.helper_int_to_binary(a + b)
self.assertSequenceEqual(res, exp)
def test_binary_sum(self):
TEST_RANGE = 100
for a in range(1, TEST_RANGE):
for b in range(1, TEST_RANGE):
self.helper_test_pair(a, b)
# Runner:
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored', '-v'], exit=False)
| 26.265306 | 70 | 0.58042 |
adfc05772f01957053fe239c640b679d16399f6b | 47,231 | py | Python | bin/maast.py | zjshi/Maast | 01428afa8dad042cb8fcbba024b60f176b228898 | [
"MIT"
] | null | null | null | bin/maast.py | zjshi/Maast | 01428afa8dad042cb8fcbba024b60f176b228898 | [
"MIT"
] | 8 | 2022-03-24T22:17:19.000Z | 2022-03-29T15:42:00.000Z | bin/maast.py | zjshi/Maast | 01428afa8dad042cb8fcbba024b60f176b228898 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import division
import sys, os, time, argparse
import shutil, hashlib, math, multiprocessing
import numpy as np
from operator import itemgetter
from Bio import SeqIO
from snps_io import id_genome_clusters, id_centroid
from snps_io import vcf_io, concat_alleles, gen_msa, align_assembly
from db_io import build_db
def get_data_type():
""" Get program specified by user (species, genes, or snps) """
import sys
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
cmd = 'maast '
print('usage: %s <module> [options]' % cmd)
print('')
print("version: 0.1.0")
print('')
print('description: identify and genotype core-genome snps from <module>')
print('')
print('modules:')
print(' end_to_end Run full Maast pipeline from begining to end')
print(' genomes Perform multiple alignment of genomes to call core-genome SNPs')
print(' db Build kmer database targeting snps')
print(' genotype Call core-genome SNPs for single genomes and isolate sequencing data')
print(' tree Build SNP tree using identified genotypes')
print('')
print("use '%s <module> -h' for usage on a specific command" % cmd)
print('')
quit()
elif sys.argv[1] not in ['end_to_end', 'genomes', 'db', 'genotype', 'tree']:
sys.exit("\nError: invalid subcommand\n\nSupported subcommand: genomes, db, genotype, end_to_end, tree\n")
else:
return sys.argv[1]
def parse_args():
data_type = get_data_type()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
usage=argparse.SUPPRESS
)
parser.add_argument('data_type', help=argparse.SUPPRESS)
if data_type == 'end_to_end':
end2end_input = parser.add_argument_group('end2end_input')
end2end_input.add_argument('--in-dir', type=str, metavar='PATH',required=True,
help = """Path to directory of raw-read-files in FASTQ format (.fastq or .fq; gzipped or not)\nor whole-genome sequences in FASTA format (.fna, .fa, .fsa or .fasta). (Required)""")
io = parser.add_argument_group('input/output')
io.add_argument('--out-dir', type=str, metavar='PATH', required=True,
help="""Directory to store output (required)""")
if data_type in ['genomes']:
io.add_argument('--fna-dir', type=str, metavar='PATH', required=True,
help = """Path to directory of genomes in FASTA format (required)""")
if data_type in ['genomes', 'end_to_end']:
io.add_argument('--rep-fna', type=str, metavar='PATH', default=None,
help = """Path to the reference genome serving as the template for whole genome alignment. \nIf provided, Maast will not identify and use centroid genome for reference (default None)""")
io.add_argument('--skip-align', action='store_true', default=False,
help = """skip whole genome sequence or short read alignment, only applicable when alignment \nhas already been done (default False)""")
io.add_argument('--has-completeness', action='store_true', default=False,
help = """Toggle for specifying completeness for supplied genomes sequenes. If toggled on, \nit requries to supply either --completeness or --completeness-list (default False)""")
io.add_argument('--completeness', type=float, metavar='FLOAT', default=None,
help = """Single completeness value for all genomes sequenes \n(i.e. all genomes have the same completeness) (default False)""")
io.add_argument('--completeness-list', type=str, metavar='PATH', default=None,
help = """Path to list of pairs of genome file name and completeness value, separated by tab character. \n(note: genome file names should have no duplicates, and should cover all files specified in --fna-dir) (default None)""")
io.add_argument('--missing-ratio', type=float, metavar='FLOAT', default=0.05,
help = """Parameter defining the missing ratio of core sites even when completeness is 1 (default 0.05)""")
io.add_argument('--min-pid', type=float, metavar='FLOAT', default=0,
help = """Parameter defining the minimal identity for including each aligned block, [0, 100] (default 0)""")
io.add_argument('--min-aln-len', type=int, metavar='INT', default=10,
help = """Parameter defining the minimal length for including each aligned block (default 10)""")
io.add_argument('--max-pid-delta', type=float, metavar='FLOAT', default=0.1,
help = """Parameter defining the maximum identity gap between identity of each aligned block and \nwhole-genome ANI, all alignments with identity less than ANI * (1 - delta) will be purged, [0, 1] (default 10)""")
io.add_argument('--mem', action='store_true', default=False,
help = """calling SNPs by genomic segment, option for memory saving (default False)""")
if data_type in ['genomes', 'end_to_end']:
prep = parser.add_argument_group('preprocessing')
prep.add_argument('--keep-redundancy', action='store_true', default=False,
help="""If toggled on, Maast will skip redundancy removal and move on with all input genomes (default=False)""")
prep.add_argument('--skip-centroid', action='store_true', default=False,
help="""If toggled on, Maast will not attempt to identify and use centroid genome for reference (default=False)""")
prep.add_argument('--sketch-k', type=int, metavar='INT', default=21,
help="""k-mer size for building Mash sketch (default=21)""")
prep.add_argument('--sketch-size', type=int, metavar='INT', default=5000,
help="""The number of k-mers per Mash sketch (default=5000)""")
prep.add_argument('--precut', type=float, metavar='FLOAT', default=0.05,
help="""Limit searches among pair of genomes with distance smaller than the provided value (default=0.05)""")
prep.add_argument('--start-cutoff', type=float, metavar='FLOAT', default=0.02,
help="""The cutoff from which Maast will start to search a distance cutoff, which generate \nthe good number of genome clusters and tag genomes based on a given MAF (default=0.02)""")
prep.add_argument('--end-cutoff', type=float, metavar='FLOAT', default=0.0001,
help="""Similiar to --start-cutoff, the cutoff at which Maast will end the search for a distance cutoff. \nThis value should be smaller than --start-cutoff (default=0.0001)""")
prep.add_argument('--range-factor', type=float, metavar='FLOAT', default=1.2,
help="""This factor times the minimum number of genomes needed for a given MAF will create \nthe upper bound of a range satisfying the search. It should be larger than 1 (default=1.2)""")
if data_type in ['genomes', 'end_to_end']:
snps = parser.add_argument_group('snp-calling')
snps.add_argument('--max-sites', type=int, metavar='INT', default=float('inf'),
help="""Maximum genomic sites to parse (use all); useful for testing (default=inf)""")
snps.add_argument('--min-prev', type=float, metavar='FLOAT', default=1.0,
help="""Minimum prevalence (default=1.0)""")
snps.add_argument('--snp-freq', type=float, metavar='FLOAT', default=0.01,
help="""Minimum minor allele frequency for SNP calling (default=0.01)""")
snps.add_argument('--max-samples', type=int, metavar='INT', default=float('inf'),
help="""Only use a subset of genomes or metagenomes for snp calling (default=inf)""")
if data_type in ['db', 'end_to_end']:
db = parser.add_argument_group('db-building')
if data_type in ['db']:
db.add_argument('--ref-genome', type=str, dest='ref_genome', required=True,
help="""Path to reference genome sequence file (required)""")
db.add_argument('--vcf', type=str, dest='vcf', required=True,
help="""Path to a vcf file describing core snps/genetic variants called based on \nmultiple sequence alignments (required)""")
db.add_argument('--msa', type=str, dest='msa', required=True,
help="""Path to multiple sequence alignment file (required)""")
db.add_argument('--tag-fna-list', type=str, dest='tag_list', required=True,
help="""Path to a list of paths to the tag genomes (FASTA format) which are included \nin multiple sequence alignment file (required)""")
db.add_argument('--fna-dir', type=str, dest='fna_dir', default=None,
help="""Path to a list of paths to the tag genomes (FASTA format) which are included \nin multiple sequence alignment file (default=None)""")
db.add_argument('--coords', type=str, dest='coords', default=None,
help="""Path to core genome block coordinate file (default=None)""")
if data_type in ['db', 'end_to_end']:
db.add_argument('--genome-name', dest='genome_name', type=str, default='100000',
help="""Name of the core-genome corresponding to INPUT. Should be six digits \nwith the first digit in [1, 9] (default=100000)""")
db.add_argument('--overwrite', dest='overwrite', action='store_true', help="""Overwrite existing output files""")
db.add_argument('--kmer-type', dest='kmer_type', default='all',
choices=['all', 'center'],
help="""
Choose type of kmers to be fetched
all: all elligible kmers
1) covered snp at any position
and 2) do not cover any bad sites (e.g. N or -)
and 3) were well contained on its coordinate division (default)
center: all kmers whose target snps was at their centers.""")
db.add_argument('--snp-cover', dest='snp_type', default='all',
choices=['all', 'l1-tags', 'l2-tags'],
help="""
Choose object to kmerize
all: all snps from the cluster will be attempted for kmer search; most kmers (default)
l1-tags: only representative snps from all snp blocks will be attempted
l2-tags: only representative snps from representative snp blocks will be attempted; fewest kmers
* note: all kmers must uniquely match an allele and intersect >= 1 SNP""")
if data_type in ['genotype', 'end_to_end']:
genotype_input = parser.add_argument_group('genotype_input')
if data_type in ['genotype']:
genotype_input.add_argument('--in-dir', type=str, metavar='PATH',required=True,
help = """Path to directory of raw-read-files in FASTQ format (.fastq or .fq; gzipped or not) \nor whole-genome sequences in FASTA format (.fna, .fa, .fsa or .fasta) (required)""")
genotype_input.add_argument('--ref-genome', type=str, dest='ref_genome', required=True,
help="""Path to reference genome sequence file (required)""")
genotype_input.add_argument('--db', type=str, metavar='PATH', dest='kmer_db_path', required=True,
help = """Path to directory of raw-read-files in FASTQ format (.fastq or .fq; gzipped or not) \nor whole-genome sequences in FASTA format (.fna, .fa, .fsa or .fasta) (required)""")
genotype_input.add_argument('--vcf', type=str, dest='vcf', required=True,
help="""Path to a vcf file describing core snps/genetic variants called based on \nmultiple sequence alignments (required)""")
single_genome = parser.add_argument_group('genome-genotyping')
single_genome.add_argument('--min-pid', type=float, metavar='FLOAT', default=0,
help = """Parameter defining the minimal identity for including each aligned block, [0, 100] (default=0)""")
single_genome.add_argument('--min-aln-len', type=int, metavar='INT', default=10,
help = """Parameter defining the minimal length for including each aligned block (default=10)""")
single_genome.add_argument('--max-pid-delta', type=float, metavar='FLOAT', default=0.1,
help = """Parameter defining the maximum identity gap between identity of each aligned block and \nwhole-genome ANI, all alignments with identity less than ANI * (1 - delta) will be purged, [0, 1] (default=0.1)""")
if data_type in ['genotype', 'end_to_end']:
genotype_input.add_argument('--merge-pairs', action='store_true', default=False,
help = """Flag to merge paired raw reads files in <in-dir>; indicated by ext '_1*' and '_2*'""")
align = parser.add_argument_group('reads-genotyping')
align.add_argument('--mode', default='very-sensitive',
choices=['very-fast', 'fast', 'sensitive', 'very-sensitive'],
help = """Alignment speed/sensitivity (default=very-sensitive)""")
align.add_argument('--max-reads', type=int, metavar='INT',
help = """Maximum # reads to use from each FASTQ file (default=None; use all)""")
if data_type in ['genomes', 'genotype', 'end_to_end']:
io.add_argument('--subset-list', type=str, metavar='PATH', default=None,
help = """Path to file contains the names of the fullset or subset of the files in the input directory. \nFiles not in the list will not be included for snp calling (default=None; use all)""")
if data_type in ['tree']:
tree_io = parser.add_argument_group('tree_io')
tree_io.add_argument('--input-dir', type=str, dest='input_dir', required=True,
help="""Input directory that should contains genotype result files generated from Maast genotype command""")
tree_io.add_argument('--input-list', type=str, dest='input_list', default=None,
help="""A list of input pairs. Each pair per row contains a path to a genotype result file generated \nfrom Maast genotype command and a unique name of the file. (required)
The path and name must be separated by a tab.
Example
/file/path/1 name1
/file/path/2 name2
/file/path/3 name3
...""")
tree_io.add_argument('--min-sites', type=int, dest='min_sites_per_sample', default=1000,
help="""Minimum SNP sites. Any allele sequence with a number of non-empty sites lower than \nthis value will not be included (default=1000)""")
tree_io.add_argument('--max-gap-ratio', type=float, dest='max_gap_ratio', default=0.5,
help="""Maximum ratio of gaps. Any allele sequence with a ratio of gap higher than this value \nwill not be included (default=0.5)""")
tree_io.add_argument('--min-site-prev', type=float, dest='min_site_prev', default=0.9,
help="""Minimum site prevalence. Any site with an actual allele presents in a fraction of sequences \nlower than this value will not be included (default=0.9)""")
tree_io.add_argument('--min-MAF', type=float, dest='min_maf', default=0.01,
help="""Minimum allele frequency. Any site with MAF lower than this value will not be included (default=0.01)""")
tree_io.add_argument('--min-MAC', type=float, dest='min_mac', default=1,
help="""Minimum allele count. Any site with MAC lower than this value will not be included (default=1)""")
tree_io.add_argument('--min-depth', type=float, dest='min_depth', default=1,
help="""Minimum read depth. Any site supported by a number of reads lower than this value will not be included. \nThis option is only for genotypes identified from sequencing reads. \nDefault value is 1 and any value >1 will effectively exclude all whole genome assemblies from analysis. \nCaution is advised (default=1)""")
misc = parser.add_argument_group('misc')
misc.add_argument("-h", "--help", action="help",
help="""Show this help message and exit""")
misc.add_argument('--threads', type=int, metavar='INT', default=multiprocessing.cpu_count(),
help="""Number of CPUs to use (default=use all)""")
args = vars(parser.parse_args())
args['data_type'] = data_type
return args
def run_command(cmd, env=None):
import subprocess as sp
if env:
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE, env=env)
else:
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
out, err = p.communicate()
if p.returncode != 0:
err_msg = "\nError: the following returned non-zero status: '%s':\n" % cmd
err_msg += "\n%s" % err
sys.exit(err_msg)
else:
return out.decode('utf-8'), err.decode('utf-8')
def parallel(function, argument_list, threads):
""" Based on: https://gist.github.com/admackin/003dd646e5fadee8b8d6 """
import multiprocessing as mp
import signal
import time
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = mp.Pool(int(threads), init_worker)
try:
results = []
for arguments in argument_list:
p = pool.apply_async(function, args=arguments)
results.append(p)
pool.close()
while True:
if all(r.ready() for r in results):
return [r.get() for r in results]
time.sleep(1)
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit("\nKeyboardInterrupt")
def reformat_sequence_headers(args):
"""
Reformat sequence headers in input genomes to prevent parsnp from crashing
"""
import Bio.SeqIO
if 'fna_dir' in args:
try: os.makedirs(args['out_dir']+'/temp/genomes')
except: pass
for file in os.listdir(args['fna_dir']):
infile = open(args['fna_dir']+'/'+file)
outfile = open(args['out_dir']+'/temp/genomes/'+file, 'w')
for seq in Bio.SeqIO.parse(infile, 'fasta'):
seq.id = seq.id.replace('-', '_')
seq.seq = str(seq.seq).upper()
outfile.write('>'+seq.id+'\n'+seq.seq+'\n')
infile.close()
outfile.close()
args['fna_dir'] = args['out_dir']+'/temp/genomes'
if 'rep_fna' in args and args['rep_fna'] is not None:
infile = open(args['rep_fna'])
outfile = open(args['out_dir']+'/temp/'+os.path.basename(args['rep_fna']), 'w')
for seq in Bio.SeqIO.parse(infile, 'fasta'):
seq.id = seq.id.replace('-', '_')
seq.seq = str(seq.seq).upper()
outfile.write('>'+seq.id+'\n'+seq.seq+'\n')
infile.close()
outfile.close()
args['rep_fna'] = args['out_dir']+'/temp/'+os.path.basename(args['rep_fna'])
def locate_fpaths(args, in_dir, rep_fna=None, subset_list=None):
subset_map = dict()
for f in os.listdir(in_dir):
subset_map[f] = 1
if subset_list is not None:
subset_map = dict()
with open(subset_list, 'r') as fh:
for ln in fh:
subset_map[ln.rstrip()] = 1
args["subset_map"] = subset_map
ref_path = ""
fpaths = []
# Using the largest genome file in direcory for reference intead of randomly selecting anyone
lg_fpath = ""
cur_size = 0
for f in os.listdir(in_dir):
if f in subset_map:
fpath = in_dir.rstrip('/')+'/'+f
ftype = id_input_type(fpath)
if os.path.isfile(fpath) and ftype == "fasta":
fstats = os.stat(fpath)
fpaths.append(fpath)
if fstats.st_size >= cur_size:
cur_size = fstats.st_size
lg_fpath = fpath
else:
sys.stderr.write("skip {}: not fasta format\n".format(fpath))
else:
sys.stderr.write("skip {}\n".format(f))
if rep_fna is not None: # Using speficied reference genome
ref_path = rep_fna
else:
ref_path = lg_fpath
args['rep_fna'] = ref_path
args['fna_paths'] = fpaths
def detect_single_chrom(ref_path):
single_chrom = True
chrom_cnt = 0
with open(ref_path, 'r') as fh:
for line in fh:
if line[0] == '>':
chrom_cnt = chrom_cnt + 1
if chrom_cnt == 1:
pass
else:
single_chrom = False
break
return single_chrom
def register_run_id(args, in_dir, single=False):
args['run_id'] = in_dir.rstrip('/').split('/')[-1]
if single is True:
args['run_id'] = args['run_id'] + "_single"
return args['run_id']
def register_msa_id(args, ref_path, fpaths):
order_names = []
for fpath in fpaths:
order_names.append(fpath.rstrip('/').split('/')[-1])
order_names.append(ref_path.rstrip('/').split('/')[-1])
in_string = "".join(order_names)
args['msa_id'] = hashlib.md5(in_string.encode()).hexdigest()
return args['msa_id']
def auto_min_pid_by_delta(coords_path, idt_delta):
min_pid_by_delta = 0
# fields = [('s1',int),('e1',int),
# ('s2',int),('e2',int),
# ('len1',int),('len2',int),
# ('pid',float),
# ('c1',str),('c2',str)]
pids = []
with open(coords_path) as f:
for i in range(5):
next(f)
for l in f:
values = l.replace(' | ', ' ').split()
pid = float(values[6])
pids.append(pid)
avg_pid = 0.7
if len(pids) != 0:
avg_pid = sum(pids)/len(pids)
min_pid_by_delta = avg_pid * (1 - idt_delta)
return min_pid_by_delta
def run_mummer4_single(fpath, genome_id, ref_fpath, rep_id, out_dir, skip_align, min_pid, min_aln_len, max_pid_delta, internal_thread_num):
print(" %s - %s" % (rep_id, genome_id))
try: os.makedirs(out_dir)
except: pass
log = open(out_dir+'/log','w')
if skip_align is True and os.path.isfile("%s/%s.delta" % (out_dir, genome_id)):
log.write('nucmer alignment was skipped\n')
print(' nucmer alignment skipped\n')
else:
command = "nucmer "
command += "-t %s " % internal_thread_num
command += "%s " % ref_fpath
command += "%s " % fpath
command += "--prefix %s/%s " % (out_dir, genome_id)
out, err = run_command(command)
log.write(str(out)+'\n'+str(err))
command = "delta-filter -q -r "
command += "-i %s " % str(min_pid)
command += "-l %s " % str(min_aln_len)
command += "%s/%s.delta " % (out_dir, genome_id)
command += "> %s/%s.filter.delta.1" % (out_dir, genome_id)
out, err = run_command(command)
log.write(str(out)+'\n'+str(err))
command = "show-coords "
command += "%s/%s.filter.delta.1 " % (out_dir, genome_id)
command += "> %s/%s" % (out_dir, 'coords.tmp')
out, err = run_command(command)
log.write(str(out)+'\n'+str(err))
coords_path = "{}/{}".format(out_dir, 'coords.tmp')
min_pid_by_delta = auto_min_pid_by_delta(coords_path, max_pid_delta)
command = "delta-filter -q -r "
command += "-i %s " % str(min_pid_by_delta)
command += "-l %s " % str(min_aln_len)
command += "%s/%s.delta " % (out_dir, genome_id)
command += "> %s/%s.filter.delta" % (out_dir, genome_id)
out, err = run_command(command)
for utility in ['coords', 'snps', 'diff']:
command = "show-%s " % utility
command += "%s/%s.filter.delta " % (out_dir, genome_id)
command += "> %s/%s" % (out_dir, utility)
out, err = run_command(command)
log.write(str(out)+'\n'+str(err))
def run_mummer4(args):
fpaths = args['fna_paths']
if 'tag_genome_paths' in args:
fpaths = args['tag_genome_paths']
ref_fpath = args['rep_fna']
if 'tag_ref' in args:
ref_fpath = args['tag_ref']
register_run_id(args, args['fna_dir'])
register_msa_id(args, ref_fpath, fpaths)
print("reference genome path: %s" % ref_fpath)
args['mummer4_dir'] = args['out_dir']+'/temp/mummer4/'+args['run_id']
try: os.makedirs(args['mummer4_dir'])
except: pass
shutil.copy(ref_fpath, os.path.join(args['mummer4_dir'], 'reference.fna'))
arg_list = []
rep_id = ref_fpath.split('/')[-1].replace('.fna', '')
print("[paired alignment]: start")
for fpath in fpaths:
genome_id = fpath.split('/')[-1].replace('.fna', '')
out_dir = '%s/aln/%s' % (args['mummer4_dir'], genome_id)
arg_list.append([fpath, genome_id, ref_fpath, rep_id, out_dir, args['skip_align'], args['min_pid'], args['min_aln_len'], args['max_pid_delta'], 1])
print("[paired alignment]: done")
parallel(run_mummer4_single, arg_list, args['threads'])
msa_path = gen_msa.build_msa(indir=args['mummer4_dir'], overwrite=True, subset=args["subset_map"])
shutil.copy(os.path.join(args['mummer4_dir'], 'reference.fna'), args['out_dir'])
args['msa_path'] = args['out_dir'] + '/tag_msa.fna'
shutil.move(msa_path, args['msa_path'])
args['msa_type'] = 'xmfa-mummer4'
args['tag_list_path'] = args['out_dir'] + '/tag_paths.list'
with open(args['tag_list_path'], 'w') as fh:
for fpath in fpaths:
fh.write("{}\n".format(fpath.rstrip()))
def run_mash_scketch(args):
ref_fpath = args['rep_fna']
fpaths = args['fna_paths']
register_run_id(args, args['fna_dir'])
register_msa_id(args, ref_fpath, fpaths)
print("reference genome path: %s" % ref_fpath)
args['mash_dir'] = args['out_dir']+'/temp/mash/'+args['run_id']
try: os.makedirs(args['mash_dir'])
except: pass
args['fna_list_path'] = args['mash_dir'] + '/in_fna.list'
with open(args['fna_list_path'], 'w') as fh:
for fpath in fpaths:
fh.write("{}\n".format(fpath))
print("[building mash sketch]: start")
command = "mash sketch "
command += "-k %s " % str(args['sketch_k'])
command += "-s %s " % str(args['sketch_size'])
command += "-p %s " % str(args['threads'])
command += "-o %s " % (args['mash_dir']+'/mash_sketch')
command += "-l %s " % args['fna_list_path']
out, err = run_command(command)
with open(args['logfile'], 'a') as logger:
logger.write(str(out)+'\n'+str(err))
args['mash_sketch_path'] = args['mash_dir']+'/mash_sketch.msh'
def run_mash_dist(args):
sketch_path = args['mash_sketch_path']
assert os.path.exists(sketch_path)
args['mash_dist_path'] = args['mash_dir'] + '/mash_dist.tsv'
print("[calculating mash distance]: start")
command = "mash dist "
command += "-p %s " % str(args['threads'])
command += "%s %s " % (sketch_path, sketch_path)
command += "> %s " % args['mash_dist_path']
out, err = run_command(command)
with open(args['logfile'], 'a') as logger:
logger.write(str(out)+'\n'+str(err))
def do_precut(args):
dist_path = args['mash_dist_path']
assert os.path.exists(dist_path)
args['cut_dist_path'] = args['mash_dir'] + '/mash_dist.cut.tsv'
print("[cut mash distance: {}]: start".format(str(args['precut'])))
command = "awk '$3 < %s' " % str(args['precut'])
command += "%s " % dist_path
command += "> %s " % args['cut_dist_path']
out, err = run_command(command)
with open(args['logfile'], 'a') as logger:
logger.write(str(out)+'\n'+str(err))
def id_clusters(args):
run_mash_scketch(args)
run_mash_dist(args)
s_cut = args['start_cutoff']
e_cut = args['end_cutoff']
r_fac = args['range_factor']
total_n = len(args['fna_paths'])
maf = args['snp_freq']
critical_n = math.ceil(1 / maf)
do_precut(args)
dist_path = args['cut_dist_path']
assert os.path.exists(dist_path)
optimal_clusters, optimal_d, optimal_n = [], None, None
while s_cut <= args['precut']:
optimal_clusters, optimal_d, optimal_n, firstcut_exit = id_genome_clusters.build_genome_blocks(dist_path, total_n, critical_n, s_cut, e_cut, r_fac)
if firstcut_exit is True:
s_cut = s_cut + 0.01
else:
break
clust_genomes = dict()
tag_genomes = []
for cluster in optimal_clusters:
tag_genomes.append(cluster.tag_genome)
for genome in cluster.genomes:
clust_genomes[genome] = 1
for fpath in args['fna_paths']:
if fpath not in clust_genomes:
tag_genomes.append(fpath)
args['tag_genome_paths'] = tag_genomes
def id_tag_ref(args):
if 'mash_dist_path' not in args or not os.path.exists(args['mash_dist_path']):
run_mash_scketch(args)
run_mash_dist(args)
dist_path = args['mash_dist_path']
tag_paths = args['fna_paths']
if 'tag_genome_paths' in args and len(args['tag_genome_paths']) > 1:
tag_paths = args['tag_genome_paths']
centroid = id_centroid.identify(tag_paths, dist_path)
print(centroid)
args['tag_ref'] = centroid
args['rep_fna'] = centroid
def run_kmerset_validate(args):
assert os.path.exists(args['kmer_set'])
assert os.path.exists(args['tag_list'])
args['kmer_prof_path'] = args['out_dir']+'/kmer_prof.tsv'
args['check_fna_paths'] = args['out_dir']+'/check_fna_paths.list'
if 'fna_paths' in args:
with open(args['check_fna_paths'], 'w') as fh:
for fpath in args['fna_paths']:
fh.write("{}\n".format(fpath))
print("[validating kmer set]: start")
command = "callm_db_val "
command += "-d %s " % args['kmer_set']
command += "-n %s " % args['genome_name']
command += "-t %s " % args['threads']
#command += "-L %s " % args['tag_list']
command += "-L %s " % args['check_fna_paths']
command += "-o %s " % args['kmer_prof_path']
out, err = run_command(command)
with open(args['logfile'], 'a') as logger:
logger.write(str(out)+'\n'+str(err))
def filter_kmers(args):
assert os.path.exists(args['kmer_prof_path'])
args['filtered_kmer_path'] = args['out_dir']+'/selected_kmers.tsv'
with open(args['filtered_kmer_path'], 'w') as fw:
with open(args['kmer_prof_path'], 'r') as fh:
for line in fh:
items = line.rstrip().split('\t')
nonsingle_hit = int(items[8])
null_hit = int(items[6])
single_hit = int(items[7])
ref_hit = int(items[10])
alt_hit = int(items[11])
if nonsingle_hit > 0:
continue
if single_hit / (single_hit + null_hit) < 0.5:
continue
if ref_hit == 0 or alt_hit == 0:
continue
rec1 = "{}\t{}0{}".format(items[2], items[9], items[0])
rec2 = "{}\t{}1{}".format(items[3], items[9], items[0])
rec3 = "{}\t{}0{}".format(items[4], items[9], items[0])
rec4 = "{}\t{}1{}".format(items[5], items[9], items[0])
fw.write("{}\n{}\n{}\n{}\n".format(rec1, rec2, rec3, rec4))
def run_build_db(args):
assert args['filtered_kmer_path']
args['kmer_db_path'] = args['out_dir']+'/kmer_db.bin'
command = "callm_db_build "
command += "%s " % args['filtered_kmer_path']
command += "> %s " % args['kmer_db_path']
out, err = run_command(command)
with open(args['logfile'], 'a') as logger:
logger.write(str(out)+'\n'+str(err))
def read_input_dir(args, in_dir, subset_list=None):
subset_map = dict()
for f in os.listdir(in_dir):
subset_map[f] = 1
if subset_list is not None:
subset_map = dict()
with open(subset_list, 'r') as fh:
for ln in fh:
subset_map[ln.rstrip()] = 1
args["subset_map"] = subset_map
fna_paths = []
fq_paths = []
for f in os.listdir(in_dir):
if f in subset_map:
fpath = in_dir.rstrip('/')+'/'+f
print(fpath)
if os.path.isdir(fpath):
continue
assert os.path.isfile(fpath)
ftype = id_input_type(fpath)
if ftype == "unknown":
sys.stderr.write("skip {}: unknown input type\n".format(fpath))
elif ftype == "not_supported":
sys.stderr.write("skip {}: compressed fasta is not supported yet\n".format(fpath))
elif ftype == "fasta":
fna_paths.append(fpath)
elif ftype in ["fastq", "fastq.gz", "fastq.lz4", "fastq.bz2"]:
fq_paths.append(fpath)
else:
assert False
else:
sys.stderr.write("skip {}\n".format(f))
fq_pairs = []
if len(fq_paths) > 1:
fq_pairs = pair_inputs(fq_paths)
args['fna_paths'] = fna_paths
args['fq_paths'] = fq_paths
args['fq_pairs'] = fq_pairs
def id_input_type(fpath):
in_type = "fastq" #default
fn_its = fpath.split("/")[-1].split(".")
fn_end = ""
if fn_its[-1] in ['gz', 'lz4', 'bz2']:
fn_end = fn_its[-2]
else:
fn_end = fn_its[-1]
if fn_end in ['fa', 'fsa', 'fna', 'fasta']:
in_type = "fasta"
elif fn_end in ['fq', 'fastq']:
in_type = "fastq"
else:
in_type = "unknown"
if fn_its[-1] in ['gz', 'lz4', 'bz2']:
if fn_end in ['fa', 'fsa', 'fna', 'fasta']:
in_type = "not_supported"
else:
in_type = in_type + '.' + fn_its[-1]
return in_type
def pair_inputs(fq_paths):
pairs = dict()
for fqpath in fq_paths:
fn_its = fqpath.split("/")[-1].split(".")
fq_name_parts = fn_its[0].split("_")
if len(fq_name_parts) != 2:
continue
if fq_name_parts[1] not in ["1", "2"]:
continue
if fq_name_parts[0] not in pairs:
pairs[fq_name_parts[0]] = dict()
pairs[fq_name_parts[0]][fq_name_parts[1]] = fqpath
real_pairs = []
for name in pairs.keys():
if "1" in pairs[name] and "2" in pairs[name]:
real_pairs.append([pairs[name]["1"], pairs[name]["2"], name])
return real_pairs
def genotype_single_genomes(args):
ref_fpath = args['ref_genome']
fpaths = args['fna_paths']
print("reference genome path: %s" % ref_fpath)
args['genotype_dir'] = args['out_dir']+'/temp/genotype'
try: os.makedirs(args['genotype_dir'])
except: pass
args['gt_results_dir'] = args['out_dir']+'/gt_results'
try: os.makedirs(args['gt_results_dir'])
except: pass
arg_list = []
arg_list_gt = []
rep_id = ref_fpath.split('/')[-1].replace('.fna', '')
global ref
ref = read_ref(ref_fpath)
global genos
genos = extract_genotypes(args['vcf'])
print("[paired alignment]: start")
for fpath in fpaths:
genome_id = fpath.split('/')[-1]
out_dir = '%s/aln/%s' % (args['genotype_dir'], genome_id)
arg_list.append([fpath, genome_id, ref_fpath, rep_id, out_dir, False, args['min_pid'], args['min_aln_len'], args['max_pid_delta'], 1])
coord_path = out_dir + '/coords'
snp_path = out_dir + '/snps'
output = args['gt_results_dir'] + '/' + genome_id + ".tsv"
arg_list_gt.append([genos, ref, coord_path, snp_path, output])
print("[paired alignment]: done")
parallel(run_mummer4_single, arg_list, args['threads'])
parallel(run_single_fasta_gt, arg_list_gt, args['threads'])
def read_ref(fpath):
seq_recs = list(SeqIO.parse(fpath, "fasta"))
rec_table = dict()
for rec in seq_recs:
rec_table[rec.id] = str(rec.seq).upper()
return rec_table
def extract_genotypes(vcf_path):
genos = []
with open(vcf_path, 'r') as fh:
for l in fh:
if l[0] == "#":
continue
else:
values = l.rstrip().split('\t')[:5]
chrom = values[0]
pos_r = int(values[1])
gid = values[2]
allele_ma = values[3]
allele_mi = values[4]
if len(allele_mi) > 1:
continue
genos.append([chrom, str(pos_r), gid, allele_ma, allele_mi])
return genos
def run_single_fasta_gt(genos, ref, coord_path, snp_path, output):
coord_map = dict()
with open(coord_path, 'r') as fh:
for i in range(5):
next(fh)
for l in fh:
values = l.replace(' | ', ' ').split()
# position in coords file is 1 indexed compared to 0 indexed in vcf
start = int(values[0]) - 1
end = int(values[1]) - 1
chrom = values[7]
assert end > start
if chrom not in coord_map:
coord_map[chrom] = []
coord_map[chrom].append([start, end])
snp_map = dict()
with open(snp_path) as fh:
for i in range(5):
next(fh)
for l in fh:
values = l.replace(' | ', ' ').split()
# position in snps file is 1 indexed compared to 0 indexed in vcf
pos_r = int(values[0]) - 1
allele_r = values[1]
allele_a = values[2]
chrom = values[10]
if allele_r == "." or allele_a == ".":
continue
if chrom not in snp_map:
snp_map[chrom] = dict()
snp_map[chrom][pos_r] = [allele_r, allele_a]
gtypes = []
for geno in genos:
chrom = geno[0]
pos_r = int(geno[1])
gid = geno[2]
allele_ma = geno[3]
allele_mi = geno[4]
if chrom not in coord_map:
continue
for g_range in coord_map[chrom]:
if pos_r >= g_range[0] and pos_r <= g_range[1]:
if chrom in snp_map and pos_r in snp_map[chrom]:
if allele_mi == snp_map[chrom][pos_r][1]:
gtypes.append([chrom, str(pos_r), gid, allele_ma, allele_mi, '0', '1'])
else:
gtypes.append([chrom, str(pos_r), gid, allele_ma, allele_mi, '1', '0'])
else:
assert chrom in ref
allele_r = ref[chrom][pos_r]
if allele_mi == allele_r:
gtypes.append([chrom, str(pos_r), gid, allele_ma, allele_mi, '0', '1'])
else:
gtypes.append([chrom, str(pos_r), gid, allele_ma, allele_mi, '1', '0'])
with open(output, 'w') as fw:
for gtype in gtypes:
fw.write("{}\n".format("\t".join(gtype)))
def genotype_reads(args):
fpaths = args['fq_paths']
args['genotype_dir'] = args['out_dir']+'/temp/genotype'
try: os.makedirs(args['genotype_dir'])
except: pass
args['gt_results_dir'] = args['out_dir']+'/gt_results'
try: os.makedirs(args['gt_results_dir'])
except: pass
gt_paths = []
outname = '%s/iso_gt' % args['genotype_dir']
try: os.makedirs(outname)
except: pass
mode = 2
if args['mode'] == "very-fast":
mode = 10
elif args['mode'] == "fast":
mode = 5
elif args['mode'] == 'sensitive':
mode = 2
elif args['mode'] == 'very-sensitive':
mode = 1
else:
assert False
command = "iso_gt_mtar "
command += "-d %s " % args['kmer_db_path']
command += "-t %s " % args['threads']
command += "-j %s " % mode
command += "-o %s/" % outname
command += "%{in} "
command += "-f "
for fpath in fpaths:
command += "%s " % fpath
gt_paths.append(outname + '/' + extract_fastq_path_name(fpath) + ".tsv")
out, err = run_command(command)
with open(args['logfile'], 'a') as logger:
logger.write(str(out)+'\n'+str(err))
merge_paths = []
if args["merge_pairs"]:
assert "fq_pairs" in args
for fq_pair in args["fq_pairs"]:
fq_1 = fq_pair[0]
fq_2 = fq_pair[1]
fq_name = fq_pair[2]
fq_gt_1 = extract_fastq_path_name(fq_1) + ".tsv"
fq_gt_2 = extract_fastq_path_name(fq_2) + ".tsv"
fq_merge = dict()
for fq_gt in [fq_gt_1, fq_gt_2]:
with open(fq_gt, 'r') as fh:
for line in fh:
items = line.rstrip().split('\t')
if items[0] not in fq_merge:
fq_merge[items[0]] = int(items[0])
else:
fq_merge[items[0]] += int(items[0])
merge_output = outname + "/" + fq_name + ".merged.tsv"
with open(merge_output, 'w') as fw:
for snp in fq_merge.keys():
fw.write("{}\t{}\n".format(snp, str(fq_merge[snp])))
merge_paths.append(merge_output)
arg_list = []
for gt_path in gt_paths + merge_paths:
fq_id = '.'.join(gt_path.split('/')[-1].split('.')[:-1])
output = args['gt_results_dir'] + '/' + fq_id + '.reads.tsv'
arg_list.append([args['vcf'], gt_path, output])
parallel(run_parse_single, arg_list, args['threads'])
def extract_fastq_path_name(fpath):
# chop off all leading '.' and '/'
pparts = []
real_idx = 0
for i, ppart in enumerate(fpath.split('/')):
if ppart == '.' or ppart == "..":
continue
else:
real_idx = i
break
vpath = '/'.join(fpath.split('/')[real_idx:])
path_parts = vpath.split('.')
real_parts = []
if path_parts[-1] in ['gz', 'lz4', 'bz2']:
real_parts = path_parts[:-2]
elif path_parts[-1] in ['fq', 'fastq']:
real_parts = path_parts[:-1]
else:
assert False
return ".".join(real_parts).replace('/', '_').replace('.','_')
def run_parse_single(vcf_path, gt_path, output):
snp_map = dict()
with open(gt_path, 'r') as fh:
for line in fh:
values = line.rstrip().split('\t')
snp = values[0]
count = values[1]
allele_type = int(snp[6])
assert allele_type in [0, 1]
gid = snp[7:]
if gid not in snp_map:
snp_map[gid] = [0, 0]
snp_map[gid][allele_type] = snp_map[gid][allele_type] + int(count)
gtypes = []
with open(vcf_path, 'r') as fh:
for l in fh:
if l[0] == "#":
continue
else:
values = l.rstrip().split('\t')[:5]
chrom = values[0]
pos_r = int(values[1])
gid = values[2]
allele_ma = values[3]
allele_mi = values[4]
if len(allele_mi) > 1:
continue
if gid in snp_map:
gtypes.append([chrom, str(pos_r), gid, allele_ma, allele_mi, str(snp_map[gid][0]), str(snp_map[gid][1])])
with open(output, 'w') as fw:
for gtype in gtypes:
fw.write("{}\n".format("\t".join(gtype)))
def call_snps_main(args):
cmdl_str = ' '.join(sys.argv[1:])
if args['data_type'] in ['genomes', 'end_to_end']:
locate_fpaths(args, args['fna_dir'], args['rep_fna'], args['subset_list'])
if args['data_type'] in ['genomes', 'end_to_end']:
if args["has_completeness"]:
if args["completeness"]:
args["min_prev"] = (1 - float(args["missing_ratio"])) * float(args["completeness"])
elif args["completeness_list"]:
completeness_map = {}
with open(args["completeness_list"], 'w') as fh:
for line in fh:
items = line.rstrip('').split('\t')
completeness_map[items[0]] = float(items[1])
ref_fpath = args['rep_fna']
fpaths = args['fna_paths']
completenesses = []
for fpath in fpaths:
fname = fpath.rstrip('/').split('/')[-1]
if fname in completeness_map:
completenesses.append(completeness_map[fname])
else:
sys.exit("missing completeness: {}".format(fpath))
avg_completeness = sum(completenesses)/len(completenesses)
args["min_prev"] = (1 - float(args["missing_ratio"])) * avg_completeness
else:
print("useless option --has-completeness")
if len(args['fna_paths']) <= 5:
sys.exit("Input genomes {} are fewer than the min. requirement (5)".format(len(args['fna_paths'])))
if len(args['fna_paths']) <= math.ceil(1 / args['snp_freq']):
print("[Warning] Total number of genomes ({}) < min. number of genomes required for effective SNP calling with MAF {} ({})".format(len(args['fna_paths']), args['snp_freq'], math.ceil(1 / args['snp_freq'])))
print("[Warning] Skip tag genome selection, all genomes will be used")
args['keep_redundancy'] = True
if args['data_type'] in ['genomes', 'end_to_end']:
if not args['keep_redundancy']:
id_clusters(args)
if args['skip_centroid']:
assert args['rep_fna'] is not None
assert os.path.exists(args['rep_fna'])
else:
id_tag_ref(args)
# >>> 1. Generate multiple-genome-alignment or pileups
# data type is genomes: use parsnp to perform multiple genome alignment
start = time.time()
if args['data_type'] in ['genomes', 'end_to_end']:
print("Running mummer4; start")
run_mummer4(args)
#args['mummer4_dir'] = '/Users/jasonshi/Documents/zjshi_github/snpMLST/unit_test_raw/snps_from_genomes/Borrelia_burgdorferi_56121/temp/mummer4/54d64396-732c-42b0-8e88-3de63e8a665e/msa.fna'
# msa_path = gen_msa.build_msa(indir=args['mummer4_dir'], max_genomes=1280)
# args['msa_path'] = '/Users/jasonshi/Documents/zjshi_github/snpMLST/unit_test_raw/snps_from_genomes/Borrelia_burgdorferi_56121/temp/mummer4/54d64396-732c-42b0-8e88-3de63e8a665e/msa.fa'
# args['msa_type'] = 'xmfa-mummer4'
print("Running mummer4; done!")
print("Elapsed time: {}".format(time.time()-start))
# >>> 2. Parse multiple-genome-alignment or pileup and call SNPs
# fetch generator to parse msa columns or mpileup sites
start = time.time()
print("Fetching file-type-specific parser; start")
if args['data_type'] in ['genomes', 'end_to_end', 'msa']:
from align_io import msa
if args['mem']:
site_assembly = msa.iter_parse(args['msa_path'], args['msa_type'], args['max_samples'])
else:
site_assembly = msa.monolithic_parse(args['msa_path'], args['msa_type'], args['max_samples'])
print("Fetching file-type-specific parser; done")
print("Elapsed time: {}".format(time.time()-start))
# id core-genome coords and snps
start = time.time()
print("Identifying core-snps; start")
print("max sites: {}".format(args['max_sites']))
print("min prevalence: {}".format(args['min_prev']))
print("min MAF: {}".format(args['snp_freq']))
if args['mem']:
align_assembs = align_assembly.call_snps_iter(site_assembly, args['max_sites'], args['min_prev'], args['snp_freq'])
else:
align_assembs = align_assembly.call_snps(site_assembly, args['max_sites'], args['min_prev'], args['snp_freq'])
print("Identifying core-snps; done")
print("Elapsed time: {}".format(time.time()-start))
# sys.exit()
single_chrom_rep = False
if args['mem'] is True and args['rep_fna'] is not None:
single_chrom_rep = detect_single_chrom(args['rep_fna'])
# write output files
start = time.time()
print("Writing snps to VCF; start")
if args['mem']:
header_ready = False
coords_buffer = []
for align_assemb in align_assembs:
if len(align_assemb.snps) > 0:
if not header_ready:
vcf_io.write_coords_header(coords_buffer, args['out_dir'])
vcf_io.write_vcf_header(align_assemb.snps, args['out_dir'], cmdl_str)
header_ready = True
# vcf_io.write_genome(core_genome.consensus_genome, args['out_dir'])
coords_buffer = coords_buffer + align_assemb.coords
vcf_io.write_vcf(align_assemb.snps, args['out_dir'], single_chrom_rep)
vcf_io.write_coords(vcf_io.merge_coords(coords_buffer), args['out_dir'])
# vcf_io.write_coords(coords_buffer, args['out_dir'])
else:
vcf_io.write_coords_header(align_assembs.coords, args['out_dir'])
vcf_io.write_vcf_header(align_assembs.snps, args['out_dir'], cmdl_str)
vcf_io.write_coords(align_assembs.coords, args['out_dir'])
# vcf_io.write_genome(core_genome.consensus_genome, args['out_dir'])
vcf_io.write_vcf(align_assembs.snps, args['out_dir'])
print("Writing snps to VCF; done!")
print("Elapsed time: {}".format(time.time()-start))
def build_db_main(args):
print("Database building; start")
args['kmer_size'] = 31
genome_path, vcf_path, coords_path, tag_list_path = args['ref_genome'], args['vcf'], args['coords'], args['tag_list']
k_size, k_type = args['kmer_size'], args['kmer_type']
if args['fna_dir'] is not None:
locate_fpaths(args, args['fna_dir'])
genome_seq = build_db.open_genome_seq(genome_path)
#snps = build_db.open_vcf_file(vcf_path)
coords = None
if coords_path is not None:
coords = build_db.read_coords(coords_path)
snp_gb_pos, snp_alleles = build_db.open_vcf_file_local(vcf_path)
#snp_gb_pos = [int(snp.ID) for snp in snps]
#snp_alleles = [[str(snp.REF), str(snp.ALT[0])] for snp in snps]
#snp_kmers = fetch_snp_kmers(genome_seq, snp_gb_pos, snp_alleles, k_size, k_type, coords)
genome_seqs = build_db.load_msa(args['msa'])
snp_kmers = build_db.fetch_all_from_msa(genome_seqs, genome_seq, snp_gb_pos, snp_alleles, k_size, coords)
args['kmer_set'] = args['out_dir'] + '/nr_kmer_set.tsv'
build_db.dump_tsv(snp_kmers, args['kmer_set'])
run_kmerset_validate(args)
filter_kmers(args)
run_build_db(args)
print("Database building; finished")
def genotype_main(args):
print("Genotyping; start")
read_input_dir(args, args['in_dir'], args['subset_list'])
try: os.makedirs(args['out_dir'])
except: pass
if len(args["fna_paths"]) > 0:
print("Genomes found; start")
genotype_single_genomes(args)
print("Genomes found; done")
if len(args["fq_paths"]) > 0:
print("Reads found; start")
genotype_reads(args)
print("Reads found; start")
print("Genotyping; finished")
def tree_main(args):
print("SNP tree building; start")
concat_alleles.concat_allele_tree(args)
print("SNP tree building; finished")
def end2end_main(args):
try: os.makedirs(args['out_dir'])
except: pass
args['fna_dir'] = args['in_dir']
locate_fpaths(args, args['in_dir'], args['rep_fna'], args['subset_list'])
call_snps_main(args)
args['kmer_size'] = 31
args['ref_genome'] = args['rep_fna']
args['vcf'] = args['out_dir'].rstrip('/') + '/core_snps.vcf'
args['coords'] = args['out_dir'].rstrip('/') + '/coords.tsv'
args['tag_list'] = args['out_dir'].rstrip('/') + '/tag_paths.list'
args['msa'] = args['out_dir'].rstrip('/') + '/tag_msa.fna'
build_db_main(args)
print("Genotyping; start")
read_input_dir(args, args['in_dir'], args['subset_list'])
if len(args["fna_paths"]) > 0:
print("Genomes found; start")
genotype_single_genomes(args)
print("Genomes found; done")
if len(args["fq_paths"]) > 0:
print("Reads found; start")
genotype_reads(args)
print("Reads found; start")
print("Genotyping; finished")
print("All output files are in {}".format(args['out_dir']))
print("The output files include the following")
print(" reference.fna (selected reference genome)")
print(" tag_paths.list (list of selected tag genomes)")
print(" tag_msa.fna (multiple sequence alignment of tag genomes)")
print(" coords.tsv (coordinates of consensus genome)")
print(" core_snps.vcf (called SNPs in VCF format)")
print(" nr_kmer_set.tsv (raw SNP-covering k-mers)")
print(" check_fna_paths.list (a list of genomes used for validating SNP-covering k-mers)")
print(" kmer_prof.tsv (hit profile of SNP-covering k-mers)")
print(" selected_kmers.tsv (validated SNP-covering k-mers)")
print(" kmer_db.bin (optimized database of SNP-covering k-mers)")
print("The directories include")
print(" gt_results (SNP genotyping results)")
print(" temp (tempory directory for hosting)")
def main():
args = parse_args()
try: os.makedirs(args['out_dir'])
except: pass
args['logfile'] = "{}/logfile".format(args['out_dir'].rstrip('/'))
if args['data_type'] == 'genomes':
call_snps_main(args)
elif args['data_type'] == 'db':
build_db_main(args)
elif args['data_type'] == 'genotype':
genotype_main(args)
elif args['data_type'] == 'tree':
tree_main(args)
elif args['data_type'] == 'end_to_end':
end2end_main(args)
else:
sys.exit("\nError: invalid subcommand\nSupported subcommand: genomes, db, genotype, tree, end_to_end\n")
if __name__ == "__main__":
main()
| 34.424927 | 327 | 0.678114 |
da0a670f00184ba762023ea80571798c62e4997b | 5,541 | py | Python | numdifftools/test_functions.py | jlec/numdifftools | 43071da54627f896213cabcea61158d29f4e86b0 | [
"BSD-3-Clause"
] | null | null | null | numdifftools/test_functions.py | jlec/numdifftools | 43071da54627f896213cabcea61158d29f4e86b0 | [
"BSD-3-Clause"
] | null | null | null | numdifftools/test_functions.py | jlec/numdifftools | 43071da54627f896213cabcea61158d29f4e86b0 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on 17. mai 2015
@author: pab
'''
from __future__ import division
import numpy as np
function_names = ['cos', 'sin', 'tan',
'cosh', 'sinh', 'tanh',
'arcsinh',
'exp', 'expm1', 'exp2', 'square',
'sqrt',
'log', 'log1p', 'log10', 'log2',
'arccos', 'arcsin', 'arctan', ]
def dcos(x):
return -np.sin(x)
def ddcos(x):
return -np.cos(x)
def get_function(fun_name, n=1):
sinh, cosh, tanh = np.sinh, np.cosh, np.tanh
sin, cos, tan = np.sin, np.cos, np.tan
f_dic = dict(sinh=(sinh, cosh, sinh, cosh, sinh),
cosh=(cosh, sinh, cosh, sinh, cosh),
arccosh=(np.arccosh,
lambda x: 1./np.sqrt(x**2-1),
lambda x: -x/(x**2-1)**(1.5),
lambda x: -1./(x**2-1)**(1.5) +
3*x**2/(x**2-1)**(2.5),
),
arcsinh=(np.arcsinh,
lambda x: 1./np.sqrt(1+x**2),
lambda x: -x/(1+x**2)**(3./2),
lambda x: -1./(1+x**2)**(3./2) +
3*x**2/(1+x**2)**(5./2),
),
arctanh=(np.arctanh,
lambda x: 1./(1-x**2),
lambda x: 2*x/(1-x**2)**2,
lambda x: 2./(1-x**2)**2 +
8*x**2/(1-x**2)**3,
),
arccos=(np.arccos,
lambda x: -1./np.sqrt(1-x**2),
lambda x: -x/(1-x**2)**(3./2),
lambda x: -1./(1-x**2)**(3./2) -
3*x**2/(1-x**2)**(5./2),
),
arcsin=(np.arcsin,
lambda x: 1./np.sqrt(1-x**2),
lambda x: x/(1-x**2)**(3./2),
lambda x: 1./(1-x**2)**(3./2) +
3*x**2./(1-x**2)**(5./2),
),
square=(lambda x: x * x, # np.square,
lambda x: 2 * x,
lambda x: 2 * np.ones_like(x)) + (
lambda x: np.zeros_like(x),)*15,
exp=(np.exp,)*20,
expm1=(np.expm1,) + (np.exp,)*20,
exp2=(np.exp2,
lambda x: np.exp2(x)*np.log(2),
lambda x: np.exp2(x)*np.log(2)**2,
lambda x: np.exp2(x)*np.log(2)**3,
lambda x: np.exp2(x)*np.log(2)**4
),
arctan=(np.arctan,
lambda x: 1./(1+x**2),
lambda x: -2*x/(1+x**2)**2,
lambda x: 8.0*x**2/(1+x**2)**3 - 2./(1+x**2)**2,
lambda x: 24*x/(1+x**2)**3 - 48*x**3./(1+x**2)**4,
),
cos=(cos, dcos, ddcos, sin) * 6,
sin=(sin, np.cos, dcos, ddcos) * 6,
tan=(tan,
lambda x: 1./np.cos(x)**2,
lambda x: 2*np.tan(x)/np.cos(x)**2,
lambda x: (4*(tan(x)**2 + 1)*tan(x)**2 +
2*(tan(x)**2 + 1)**2),
lambda x: (8*(tan(x)**2 + 1)*tan(x)**3 +
16*(tan(x)**2 + 1)**2*tan(x))
),
tanh=(tanh,
lambda x: 1. / cosh(x) ** 2,
lambda x: -2 * sinh(x) / cosh(x) ** 3,
lambda x: 4*(tanh(x)/cosh(x))**2 - 2./cosh(x)**4,
lambda x: (8*(tanh(x)**2 - 1)*tanh(x)**3 +
16*(tanh(x)**2 - 1)**2*tanh(x))),
log1p=(np.log1p,
lambda x: 1. / (1+x),
lambda x: -1. / (1+x) ** 2,
lambda x: 2. / (1+x) ** 3,
lambda x: -6. / (1+x) ** 4),
log2=(np.log2,
lambda x: 1. / (x*np.log(2)),
lambda x: -1. / (x ** 2 * np.log(2)),
lambda x: 2. / (x ** 3 * np.log(2)),
lambda x: -6. / (x ** 4 * np.log(2))),
log10=(np.log10,
lambda x: 1. / (x * np.log(10)),
lambda x: -1. / (x ** 2 * np.log(10)),
lambda x: 2. / (x ** 3 * np.log(10)),
lambda x: -6. / (x ** 4 * np.log(10))),
log=(np.log,
lambda x: 1. / x,
lambda x: -1. / x ** 2,
lambda x: 2. / x ** 3,
lambda x: -6. / x ** 4),
sqrt=(np.sqrt,
lambda x: 0.5/np.sqrt(x),
lambda x: -0.25/x**(1.5),
lambda x: 1.5*0.25/x**(2.5),
lambda x: -2.5*1.5*0.25/x**(3.5)),
inv=(lambda x: 1. / x,
lambda x: -1. / x ** 2,
lambda x: 2. / x ** 3,
lambda x: -6. / x ** 4,
lambda x: 24. / x ** 5))
if fun_name == 'all':
return f_dic.keys()
funs = f_dic.get(fun_name)
fun0 = funs[0]
if n < len(funs):
return fun0, funs[n]
return fun0, None
if __name__ == '__main__':
pass
| 40.152174 | 75 | 0.304097 |
5e34dd564c7f63d99b7c3a05e142fdd76d6786ec | 1,199 | py | Python | cf/arena_util.py | sr-study/melon-playlist-continuation-2020 | 04386434133de7adbcc63fbb88ee71f69a604ecb | [
"Apache-2.0"
] | 1 | 2020-07-27T15:03:10.000Z | 2020-07-27T15:03:10.000Z | cf/arena_util.py | sr-study/melon-playlist-continuation-2020 | 04386434133de7adbcc63fbb88ee71f69a604ecb | [
"Apache-2.0"
] | 2 | 2020-07-27T14:51:14.000Z | 2020-07-28T11:12:28.000Z | cf/arena_util.py | sr-study/melon-playlist-continuation-2020 | 04386434133de7adbcc63fbb88ee71f69a604ecb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import io
import os
import json
import distutils.dir_util
from collections import Counter
import numpy as np
import numpy as np
import pandas as pd
def write_json(data, fname):
def _conv(o):
if isinstance(o, np.int64):
return int(o)
else:
return int(o)
#return o
#raise TypeError
parent = os.path.dirname(fname)
distutils.dir_util.mkpath("./arena_data/" + parent)
with io.open("./arena_data/" + fname, "w", encoding="utf8") as f:
json_str = json.dumps(data, ensure_ascii=True, default=_conv)
f.write(json_str)
def load_json(fname):
with open(fname, encoding="UTF-8") as f:
json_obj = json.load(f)
return json_obj
def load_json_to_df(fname):
df = pd.read_json(fname,encoding="UTF-8")
return df
def debug_json(r):
print(json.dumps(r, ensure_ascii=False, indent=4))
def remove_seen(seen, l):
seen = set(seen)
return [x for x in l if not (x in seen)]
def most_popular(playlists, col, topk_count):
c = Counter()
for doc in playlists:
c.update(doc[col])
topk = c.most_common(topk_count)
return c, [k for k, v in topk]
| 21.410714 | 69 | 0.63553 |
32514d2177f813b2e883495be3e0c5213bb1ddc3 | 625 | py | Python | auto_changelog/__init__.py | WqyJh/auto-changelog | 884fa133bb13013b694646472b2b113d6be2abc4 | [
"MIT"
] | 1 | 2019-08-21T10:41:17.000Z | 2019-08-21T10:41:17.000Z | auto_changelog/__init__.py | WqyJh/auto-changelog | 884fa133bb13013b694646472b2b113d6be2abc4 | [
"MIT"
] | null | null | null | auto_changelog/__init__.py | WqyJh/auto-changelog | 884fa133bb13013b694646472b2b113d6be2abc4 | [
"MIT"
] | null | null | null | from typing import Any
from auto_changelog.domain_model import RepositoryInterface, PresenterInterface
__version__ = '1.0.0dev1'
def generate_changelog(
repository: RepositoryInterface,
presenter: PresenterInterface,
title: str = 'Changelog',
description: str = '',
starting_commit: str = '',
stopping_commit: str = 'HEAD',
) -> Any:
""" Use-case function coordinates repository and interface """
changelog = repository.generate_changelog(title, description, starting_commit=starting_commit, stopping_commit=stopping_commit)
return presenter.present(changelog)
| 32.894737 | 131 | 0.7248 |
7b19ce3f1a50141479ddea652f8a96c42cfc1a46 | 3,324 | py | Python | app/requests.py | vugutsa/News-API | 16becc037aa05d54a5ec6abede5baf9f94b1eb3a | [
"Unlicense"
] | null | null | null | app/requests.py | vugutsa/News-API | 16becc037aa05d54a5ec6abede5baf9f94b1eb3a | [
"Unlicense"
] | null | null | null | app/requests.py | vugutsa/News-API | 16becc037aa05d54a5ec6abede5baf9f94b1eb3a | [
"Unlicense"
] | null | null | null | import urllib.request,json
from .models import News
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def configure_request(app):
global api_key,base_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
def get_news(category):
'''
Function that gets the json response to our url request
'''
get_news_url = base_url.format(category,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_articles_list = get_news_response['articles']
news_results = process_results(news_articles_list)
# print("Result",news_results)
return news_results
def process_results(news_list):
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain news details
Returns :
news_results: A list of news objects
'''
news_results = []
for news_item in news_list:
id = news_item.get('id')
title = news_item.get('title')
image = news_item.get('urlToImage')
description = news_item.get('description')
date = news_item.get('publishedAt')
if title:
news_object = News(title,id,image,description,date)
news_results.append(news_object)
return news_results
def get_articles(articles):
get_news_details_url = articlesbase_url.format(articles,api_key)
with urllib.request.urlopen(get_news_details_url) as url:
news_details_data = url.read()
news_details_response = json.loads(news_details_data)
news_object = None
if news_details_response:
title = news_item.get('title')
image = news_item.get('urlToImage')
description = news_item.get('description')
date = news_item.get('publishedAt')
articles = news_item.get('url')
id = news_item.get('id')
news_object = News(title,id,image,description,date,articles)
return news_object
def get_category(category_name):
get_category_url = base_url.format(category_name,api_key)
with urllib.request.urlopen(get_category_url) as url:
get_category_data = url.read()
get_category_response = json.loads(get_category_data)
get_category_results = None
if get_category_response['articles']:
get_category_list = get_category_response['articles']
get_category_results = process_results(get_category_list)
return get_category_results
def search_articles(articles_name):
search_articles_url = 'http://newsapi.org/v2/everything/search?q={}&apiKey=&query={}'.format(api_key,movie_name)
with urllib.request.urlopen(search_articles_url) as url:
search_articles_data = url.read()
search_articles_response = json.loads(search_aricles_data)
search_articles_results = None
if search_articles_response['results']:
search_articles_list = search_aricles_response['results']
search_articles_results = process_results(search_articles_list)
return search_articles_results
| 32.910891 | 116 | 0.690132 |
28aaaa735ba7072af61e862f22d4d1d6a9686809 | 3,544 | py | Python | bindings/python/ensmallen/datasets/string/paenibacilluspeoriae.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/paenibacilluspeoriae.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/paenibacilluspeoriae.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Paenibacillus peoriae.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PaenibacillusPeoriae(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Paenibacillus peoriae graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Paenibacillus peoriae graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PaenibacillusPeoriae",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.814815 | 223 | 0.677201 |
da4213c8aaa7bd7c127eb6661b4ae2da317a4d12 | 196 | py | Python | master.py | thor-shuang/git_learning | b3b86ea6636705f2bbe28014fc35303a0d7d75b5 | [
"MIT"
] | null | null | null | master.py | thor-shuang/git_learning | b3b86ea6636705f2bbe28014fc35303a0d7d75b5 | [
"MIT"
] | null | null | null | master.py | thor-shuang/git_learning | b3b86ea6636705f2bbe28014fc35303a0d7d75b5 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
@File : master.py
@Vision : 1.0.0
@Time : 2020/6/4 14:08
@Author : Qing Shuang
@Email : 2075693226@qq.com
@Software: PyCharm
"""
# master
# master
# master
| 13.066667 | 28 | 0.581633 |
fd07f4d0364b0edce94496c98f55c9cce5b58cb8 | 8,145 | py | Python | functions.py | m-pana/AutoSpeech | 46f6b400ef22e400c051718196e5c78091215d25 | [
"MIT"
] | null | null | null | functions.py | m-pana/AutoSpeech | 46f6b400ef22e400c051718196e5c78091215d25 | [
"MIT"
] | null | null | null | functions.py | m-pana/AutoSpeech | 46f6b400ef22e400c051718196e5c78091215d25 | [
"MIT"
] | null | null | null | import time
import torch
import torch.nn.functional as F
import logging
import numpy as np
import matplotlib.pyplot as plt
from utils import compute_eer
from utils import AverageMeter, ProgressMeter, accuracy
plt.switch_backend('agg')
logger = logging.getLogger(__name__)
def train(cfg, model, optimizer, train_loader, val_loader, criterion, architect, epoch, writer_dict, lr_scheduler=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
alpha_entropies = AverageMeter('Entropy', ':.4e')
progress = ProgressMeter(
len(train_loader), batch_time, data_time, losses, top1, top5, alpha_entropies,
prefix="Epoch: [{}]".format(epoch), logger=logger)
writer = writer_dict['writer']
print(f'functions.py: train loader has {len(train_loader)}')
print(f'functions.py: val loader has {len(val_loader)}')
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
global_steps = writer_dict['train_global_steps']
if lr_scheduler:
current_lr = lr_scheduler.set_lr(optimizer, global_steps, epoch)
else:
current_lr = cfg.TRAIN.LR
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_search, target_search = next(iter(val_loader))
input_search = input_search.cuda(non_blocking=True)
target_search = target_search.cuda(non_blocking=True)
# step architecture
architect.step(input_search, target_search)
alpha_entropy = architect.model.compute_arch_entropy()
alpha_entropies.update(alpha_entropy.mean(), input.size(0))
# compute output
output = model(input)
# measure accuracy and record loss
acc1 = accuracy(output, target, topk=(1,))
top1.update(acc1[0], input.size(0))
# top5.update(acc5[0], input.size(0))
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# write to logger
writer.add_scalar('lr', current_lr, global_steps)
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('arch_entropy', alpha_entropies.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
# log acc for cross entropy loss
writer.add_scalar('train_acc1', top1.val, global_steps)
writer.add_scalar('train_acc5', top5.val, global_steps)
if i % cfg.PRINT_FREQ == 0:
progress.print(i)
def train_from_scratch(cfg, model, optimizer, train_loader, criterion, epoch, writer_dict, lr_scheduler=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), batch_time, data_time, losses, top1, top5, prefix="Epoch: [{}]".format(epoch), logger=logger)
writer = writer_dict['writer']
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
global_steps = writer_dict['train_global_steps']
if lr_scheduler:
current_lr = lr_scheduler.get_lr()
else:
current_lr = cfg.TRAIN.LR
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input) # <-- simple forward
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# write to logger
writer.add_scalar('lr', current_lr, global_steps)
writer.add_scalar('train_loss', losses.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
# log acc for cross entropy loss
writer.add_scalar('train_acc1', top1.val, global_steps)
writer.add_scalar('train_acc5', top5.val, global_steps)
if i % cfg.PRINT_FREQ == 0:
progress.print(i)
def validate_verification(cfg, model, test_loader):
batch_time = AverageMeter('Time', ':6.3f')
progress = ProgressMeter(
len(test_loader), batch_time, prefix='Test: ', logger=logger)
# switch to evaluate mode
model.eval()
labels, distances = [], []
with torch.no_grad():
end = time.time()
for i, (input1, input2, label) in enumerate(test_loader):
input1 = input1.cuda(non_blocking=True).squeeze(0)
input2 = input2.cuda(non_blocking=True).squeeze(0)
label = label.cuda(non_blocking=True)
# compute output
outputs1 = model(input1).mean(dim=0).unsqueeze(0)
outputs2 = model(input2).mean(dim=0).unsqueeze(0)
dists = F.cosine_similarity(outputs1, outputs2)
dists = dists.data.cpu().numpy()
distances.append(dists)
labels.append(label.data.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 2000 == 0:
progress.print(i)
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
eer = compute_eer(distances, labels)
logger.info('Test EER: {:.8f}'.format(np.mean(eer)))
return eer
def validate_identification(cfg, model, test_loader, criterion):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(test_loader), batch_time, losses, top1, top5, prefix='Test: ', logger=logger)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(test_loader):
input = input.cuda(non_blocking=True)#.squeeze(0)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
# MODIFYING EVALUATION STAGE: WHY AVG THIS THING?
# output = torch.mean(output, dim=0, keepdim=True)
# output = model.forward_classifier(output)
print("DEBUG 1")
print(f'Target shape: {target.shape}. Target:')
print(target)
print(f'output of forward shape: {output.shape}. output:')
print(output)
acc1 = accuracy(output, target, topk=(1,))
top1.update(acc1[0], input.size(0))
# top5.update(acc5[0], input.size(0))
loss = criterion(output, target)
losses.update(loss.item(), 1)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 2000 == 0:
progress.print(i)
# This gives formatting problems. Just printing the top1 object is fine
# logger.info('Test Acc@1: {:.8f} Acc@5: {:.8f}'.format(top1.avg, top5.avg))
print(top1)
return top1.avg
| 33.79668 | 120 | 0.618293 |
cc799c986856fa1e6e1b691fb88b8e3e03fdb936 | 360 | py | Python | correctiv_justizgelder/cms_apps.py | correctiv/correctiv-justizgelder | 6e72dc8212cdfc38571e5410f4b2c0bab66a6ef3 | [
"MIT"
] | 2 | 2015-05-08T15:48:35.000Z | 2021-09-13T10:57:35.000Z | correctiv_justizgelder/cms_apps.py | correctiv/correctiv-justizgelder | 6e72dc8212cdfc38571e5410f4b2c0bab66a6ef3 | [
"MIT"
] | null | null | null | correctiv_justizgelder/cms_apps.py | correctiv/correctiv-justizgelder | 6e72dc8212cdfc38571e5410f4b2c0bab66a6ef3 | [
"MIT"
] | 1 | 2017-12-11T14:13:28.000Z | 2017-12-11T14:13:28.000Z | """Application hooks for blog"""
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class JustizgelderApphook(CMSApp):
name = _('Court Donations Database')
app_name = 'justizgelder'
urls = ['correctiv_justizgelder.urls']
apphook_pool.register(JustizgelderApphook)
| 24 | 55 | 0.780556 |
19792024274c8bd4219e660bdc03a4e7ea5caa2e | 366 | py | Python | bvbcet/GBM/bisect.py | satish-annigeri/Notebooks | 92a7dc1d4cf4aebf73bba159d735a2e912fc88bb | [
"CC0-1.0"
] | null | null | null | bvbcet/GBM/bisect.py | satish-annigeri/Notebooks | 92a7dc1d4cf4aebf73bba159d735a2e912fc88bb | [
"CC0-1.0"
] | null | null | null | bvbcet/GBM/bisect.py | satish-annigeri/Notebooks | 92a7dc1d4cf4aebf73bba159d735a2e912fc88bb | [
"CC0-1.0"
] | null | null | null | #Bisection method to find a real root of an equation***********
a,b=input ('enter the value of a and b')
maxitr=input('enter the no. of iterations')
itr=0
print("itr, a, b, x, fx")
func= lambda x: x**3+x-1
while itr<maxitr:
x=(a+b)/2.0
fa=func(a)
fx=func(x)
if fa*fx<0.0:
b=x
else:
a=x
print ([a,b,x,fx])
itr=itr+1 | 22.875 | 63 | 0.546448 |
b32bb67c3fbb8bd832815c5e6ac9683edec2eb6d | 25,026 | py | Python | mhctools/parsing.py | denklewer/mhctools | 1aed7e8b975253349a0c504f7d42e7051139e459 | [
"Apache-2.0"
] | null | null | null | mhctools/parsing.py | denklewer/mhctools | 1aed7e8b975253349a0c504f7d42e7051139e459 | [
"Apache-2.0"
] | null | null | null | mhctools/parsing.py | denklewer/mhctools | 1aed7e8b975253349a0c504f7d42e7051139e459 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014-2019. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import numpy as np
from mhcnames import normalize_allele_name
from .binding_prediction import BindingPrediction
NETMHC_TOKENS = {
"pos",
"Pos",
"Seq",
"Number",
"Protein",
"Allele",
"NetMHC",
"Strong",
}
def check_stdout_error(stdout, program_name):
if "ERROR" in stdout.upper():
# if NetMHC* failed with an error then let's pull out the error
# message line and raise an exception with it
error_index = stdout.upper().index("ERROR")
stdout_after_error = stdout[error_index:]
error_line = stdout_after_error.split("\n")[0]
raise ValueError("%s failed - %s" % (program_name, error_line))
def split_stdout_lines(stdout):
"""
Given the standard output from NetMHC/NetMHCpan/NetMHCcons tools,
drop all {comments, lines of hyphens, empty lines} and split the
remaining lines by whitespace.
"""
# all the NetMHC formats use lines full of dashes before any actual
# binding results
seen_dash = False
for l in stdout.split("\n"):
l = l.strip()
# wait for a line like '----------' before trying to parse entries
# have to include multiple dashes here since NetMHC 4.0 sometimes
# gives negative positions in its "peptide" input mode
if l.startswith("---"):
seen_dash = True
continue
if not seen_dash:
continue
# ignore empty lines and comments
if not l or l.startswith("#"):
continue
# beginning of headers in NetMHC
if any(l.startswith(word) for word in NETMHC_TOKENS):
continue
yield l.split()
def clean_fields(fields, ignored_value_indices, transforms):
"""
Sometimes, NetMHC* has fields that are only populated sometimes, which results
in different count/indexing of the fields when that happens.
We handle this by looking for particular strings at particular indices, and
deleting them.
Warning: this may result in unexpected behavior sometimes. For example, we
ignore "SB" and "WB" for NetMHC 3.x output; which also means that any line
with a key called SB or WB will be ignored.
Also, sometimes NetMHC* will have fields that we want to modify in some
consistent way, e.g. NetMHCpan3 has 1-based offsets and all other predictors
have 0-based offsets (and we rely on 0-based offsets). We handle this using
a map from field index to transform function.
"""
cleaned_fields = []
for i, field in enumerate(fields):
if field in ignored_value_indices:
ignored_index = ignored_value_indices[field]
# Is the value we want to ignore at the index where we'd ignore it?
if ignored_index == i:
continue
# transform this field if the index is in transforms, otherwise leave alone
cleaned_field = transforms[i](field) if i in transforms else field
cleaned_fields.append(cleaned_field)
return cleaned_fields
def valid_affinity(x):
"""
Check that an IC50 affinity value is valid.
Parameters
----------
x : float
Returns
-------
bool
"""
if x is None:
return False
if np.isnan(x) or np.isinf(x):
return False
return x >= 0
def parse_stdout(
stdout,
prediction_method_name,
sequence_key_mapping,
key_index,
offset_index,
peptide_index,
allele_index,
score_index,
rank_index=None,
ic50_index=None,
ignored_value_indices={},
transforms={}):
"""
Generic function for parsing any NetMHC* output, given expected indices
of values of interest.
Parameters
----------
stdout : str
prediction_method_name : str
key_index : int
offset_index : int
peptide_index : int
allele_index : int
score_index : int
rank_index : int
ic50_index : int
sequence_key_mapping : dict
Dictionary mapping sequence names (which might be hashes or truncated)
to the sequence names which should be used in the parsed
BindingPrediction objects
ignored_value_indices : dict
Map from values to the positions we'll ignore them at.
See clean_fields.
transforms : dict
Map from field index to a transform function to be applied to values in
that field. See clean_fields.
Returns BindingPredictionCollection
"""
binding_predictions = []
for fields in split_stdout_lines(stdout):
fields = clean_fields(fields, ignored_value_indices, transforms)
offset = int(fields[offset_index])
peptide = str(fields[peptide_index])
allele = str(fields[allele_index])
if score_index is None:
score = None
else:
score = float(fields[score_index])
if rank_index is None:
rank = None
else:
rank = float(fields[rank_index])
if ic50_index is None:
ic50 = None
else:
ic50 = float(fields[ic50_index])
key = str(fields[key_index])
if sequence_key_mapping:
original_key = sequence_key_mapping[key]
else:
# if sequence_key_mapping isn't provided then let's assume it's the
# identity function
original_key = key
# if we have a bad IC50 score we might still get a salvageable
# log of the score. Strangely, this is necessary sometimes!
if ic50_index is not None and (not valid_affinity(ic50)) and np.isfinite(score):
# pylint: disable=invalid-unary-operand-type
ic50 = 50000 ** (1 - score)
binding_predictions.append(BindingPrediction(
source_sequence_name=original_key,
offset=offset,
peptide=peptide,
allele=normalize_allele_name(allele),
score=score,
affinity=ic50,
percentile_rank=rank,
prediction_method_name=prediction_method_name))
return binding_predictions
def parse_netmhc3_stdout(
stdout,
prediction_method_name="netmhc3",
sequence_key_mapping=None):
"""
Parse the output format for NetMHC 3.x, which looks like:
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 SIINKFELL 0.437 441 WB A1 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01
1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01
2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01
3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01
--------------------------------------------------------------------------------------------------
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=4,
offset_index=0,
peptide_index=1,
allele_index=5,
score_index=2,
ic50_index=3,
rank_index=None,
ignored_value_indices={"WB": 4, "SB": 4})
def parse_netmhc4_stdout(
stdout,
prediction_method_name="netmhc4",
sequence_key_mapping=None):
"""
# Peptide length 9
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
pos HLA peptide Core Offset I_pos I_len D_pos D_len iCore Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
0 HLA-A0201 TMDKSELVQ TMDKSELVQ 0 0 0 0 0 TMDKSELVQ 143B_BOVIN_P293 0.051 28676.59 43.00
1 HLA-A0201 MDKSELVQK MDKSELVQK 0 0 0 0 0 MDKSELVQK 143B_BOVIN_P293 0.030 36155.15 70.00
2 HLA-A0201 DKSELVQKA DKSELVQKA 0 0 0 0 0 DKSELVQKA 143B_BOVIN_P293 0.030 36188.42 70.00
3 HLA-A0201 KSELVQKAK KSELVQKAK 0 0 0 0 0 KSELVQKAK 143B_BOVIN_P293 0.032 35203.22 65.00
4 HLA-A0201 SELVQKAKL SELVQKAKL 0 0 0 0 0 SELVQKAKL 143B_BOVIN_P293 0.031 35670.99 65.00
5 HLA-A0201 ELVQKAKLA ELVQKAKLA 0 0 0 0 0 ELVQKAKLA 143B_BOVIN_P293 0.080 21113.07 29.00
6 HLA-A0201 LVQKAKLAE LVQKAKLAE 0 0 0 0 0 LVQKAKLAE 143B_BOVIN_P293 0.027 37257.56 75.00
7 HLA-A0201 VQKAKLAEQ VQKAKLAEQ 0 0 0 0 0 VQKAKLAEQ 143B_BOVIN_P293 0.040 32404.62 55.00
219 HLA-A0201 QLLRDNLTL QLLRDNLTL 0 0 0 0 0 QLLRDNLTL 143B_BOVIN_P293 0.527 167.10 1.50 <= WB
-----------------------------------------------------------------------------------
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=12,
rank_index=13,
score_index=11)
def parse_netmhcpan28_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
check_stdout_error(stdout, "NetMHCpan-2.8")
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=3,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=5,
rank_index=6,
score_index=4)
def parse_netmhcpan3_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None):
"""
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-B*18:01 MFCQLAKT MFCQLAKT- 0 0 0 8 1 MFCQLAKT sequence0_0 0.02864 36676.0 45.00
2 HLA-B*18:01 FCQLAKTY F-CQLAKTY 0 0 0 1 1 FCQLAKTY sequence0_0 0.07993 21056.5 13.00
"""
# the offset specified in "pos" (at index 0) is 1-based instead of 0-based. we adjust it to be
# 0-based, as in all the other netmhc predictors supported by this library.
transforms = {
0: lambda x: int(x) - 1,
}
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=12,
rank_index=13,
score_index=11,
transforms=transforms)
def parse_netmhcpan4_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None,
mode="binding_affinity"):
"""
# NetMHCpan version 4.0
# Tmpdir made /var/folders/jc/fyrvcrcs3sb8g4mkdg6nl_t80000gp/T//netMHCpanuH3SvY
# Input is in PEPTIDE format
# Make binding affinity predictions
HLA-A02:01 : Distance to training data 0.000 (using nearest neighbor HLA-A02:01)
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
-----------------------------------------------------------------------------------
Pos HLA Peptide Core Of Gp Gl Ip Il Icore Identity Score Aff(nM) %Rank BindLevel
-----------------------------------------------------------------------------------
1 HLA-A*02:01 SIINFEKL SIINF-EKL 0 0 0 5 1 SIINFEKL PEPLIST 0.1141340 14543.1 18.9860
-----------------------------------------------------------------------------------
Protein PEPLIST. Allele HLA-A*02:01. Number of high binders 0. Number of weak binders 0. Number of peptides 1
"""
# the offset specified in "pos" (at index 0) is 1-based instead of 0-based. we adjust it to be
# 0-based, as in all the other netmhc predictors supported by this library.
transforms = {
0: lambda x: int(x) - 1,
}
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
score_index=11,
ic50_index=None if mode == "elution_score" else 12,
rank_index=12 if mode == "elution_score" else 13,
transforms=transforms)
def parse_netmhcpan41_stdout(
stdout,
prediction_method_name="netmhcpan",
sequence_key_mapping=None,
mode="binding_affinity"):
"""
NetMHCpan version 4.1b
# Rank Threshold for Strong binding peptides 0.500
# Rank Threshold for Weak binding peptides 2.000
---------------------------------------------------------------------------------------------------------------------------
Pos MHC Peptide Core Of Gp Gl Ip Il Icore Identity Score_EL %Rank_EL Score_BA %Rank_BA Aff(nM) BindLevel
---------------------------------------------------------------------------------------------------------------------------
1 HLA-A*03:01 GKSGGGRCGGG GKSGGGRGG 0 7 2 0 0 GKSGGGRCGGG seq1 0.0000000 100.000 0.009240 95.346 45243.03
---------------------------------------------------------------------------------------------------------------------------
Protein seq1. Allele HLA-A*03:01. Number of high binders 0. Number of weak binders 0. Number of peptides 1
-----------------------------------------------------------------------------------
"""
# the offset specified in "pos" (at index 0) is 1-based instead of 0-based. we adjust it to be
# 0-based, as in all the other netmhc predictors supported by this library.
transforms = {
0: lambda x: int(x) - 1,
}
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=10,
offset_index=0,
peptide_index=2,
allele_index=1,
score_index=11 if mode == "elution_score" else 13,
ic50_index=None if mode == "elution_score" else 15,
rank_index=12 if mode == "elution_score" else 14,
transforms=transforms)
def parse_netmhccons_stdout(
stdout,
prediction_method_name="netmhccons",
sequence_key_mapping=None):
"""
# Affinity Threshold for Strong binding peptides 50.000',
# Affinity Threshold for Weak binding peptides 500.000',
# Rank Threshold for Strong binding peptides 0.500',
# Rank Threshold for Weak binding peptides 2.000',
----------------------------------------------------------------------------
pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel
----------------------------------------------------------------------------
0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00
1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00
2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00
3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00
4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00
5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00
6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00
7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00
8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00
9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00
10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00
11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=3,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=5,
rank_index=6,
score_index=4)
def parse_netmhciipan_stdout(
stdout,
prediction_method_name="netmhciipan",
sequence_key_mapping=None):
"""
# Threshold for Strong binding peptides (IC50) 50.000 nM
# Threshold for Weak binding peptides (IC50) 500.000 nM
# Threshold for Strong binding peptides (%Rank) 0.5%
# Threshold for Weak binding peptides (%Rank) 2%
# Allele: DRB1_0301
--------------------------------------------------------------------------------------------------------------------------------------------
Seq Allele Peptide Identity Pos Core Core_Rel 1-log50k(aff) Affinity(nM) %Rank Exp_Bind BindingLevel
--------------------------------------------------------------------------------------------------------------------------------------------
0 DRB1_0301 AGFKGEQGPKGEPG Sequence 2 FKGEQGPKG 0.810 0.080 21036.68 50.00 9.999
1 DRB1_0301 GELIGTLNAAKVPAD Sequence 2 LIGTLNAAK 0.650 0.340 1268.50 32.00 9.999
2 DRB1_0301 PEVIPMFSALSEGATP Sequence 5 MFSALSEGA 0.385 0.180 7161.16 50.00 9.999
3 DRB1_0301 PKYVKQNTLKLAT Sequence 2 YVKQNTLKL 0.575 0.442 418.70 6.00 9.999 <=WB
4 DRB1_0301 VGSDWRFLRGYHQYA Sequence 0 VGSDWRFLR 0.575 0.466 322.07 10.00 9.999 <=WB
5 DRB1_0301 XFVKQNAAALX Sequence 2 VKQNAAALX 0.500 0.262 2939.20 15.00 9.999
6 DRB1_0301 AAYSDQATPLLLSPR Sequence 1 AYSDQATPL 0.395 0.291 2152.21 50.00 9.999
7 DRB1_0301 PVSKMRMATPLLMQA Sequence 4 MRMATPLLM 0.890 0.770 12.00 0.01 9.999 <=SB
8 DRB1_0301 AYMRADAAAGGA Sequence 2 MRADAAAGG 0.835 0.303 1887.87 15.00 9.999
9 DRB1_0301 PKYVKQNTLKLAT Sequence 2 YVKQNTLKL 0.575 0.442 418.70 6.00 9.999 <=WB
10 DRB1_0301 ENPVVHFFKNIVTPR Sequence 6 FFKNIVTPR 0.425 0.357 1049.04 32.00 9.999
"""
check_stdout_error(stdout, "NetMHCIIpan")
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=3,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=8,
rank_index=9,
score_index=7)
def parse_netmhciipan4_stdout(
stdout,
prediction_method_name="netmhciipan",
sequence_key_mapping=None,
mode="elution_score"):
"""
# Threshold for Strong binding peptides (%Rank) 2%
# Threshold for Weak binding peptides (%Rank) 10%
# Allele: DRB1_0101
--------------------------------------------------------------------------------------------------------------------------------------------
Pos MHC Peptide Of Core Core_Rel Identity Score_EL %Rank_EL Exp_Bind Score_BA Affinity(nM) %Rank_BA BindLevel
--------------------------------------------------------------------------------------------------------------------------------------------
1 DRB1_0101 PAPAPSWPLSSSVPS 4 PSWPLSSSV 0.327 test 0.000857 79.79 NA 0.327674 1442.91 54.35
2 DRB1_0101 APAPSWPLSSSVPSQ 3 PSWPLSSSV 0.333 test 0.001268 71.87 NA 0.346949 1171.30 50.15
3 DRB1_0101 PAPSWPLSSSVPSQK 4 WPLSSSVPS 0.713 test 0.002836 54.45 NA 0.412004 579.40 36.66
4 DRB1_0101 APSWPLSSSVPSQKT 3 WPLSSSVPS 0.773 test 0.003677 49.14 NA 0.448939 388.53 29.75
5 DRB1_0101 PSWPLSSSVPSQKTY 2 WPLSSSVPS 0.407 test 0.001602 66.79 NA 0.470979 306.09 25.98
6 DRB1_0101 SWPLSSSVPSQKTYQ 3 LSSSVPSQK 0.633 test 0.001671 65.82 NA 0.476222 289.21 25.07
7 DRB1_0101 WPLSSSVPSQKTYQG 3 SSSVPSQKT 0.553 test 0.001697 65.45 NA 0.447217 395.83 30.05
"""
check_stdout_error(stdout, "NetMHCIIpan")
if mode not in ["elution_score", "binding_affinity"]:
raise ValueError("Mode is %s but must be one of: elution_score, binding affinity" % mode)
# the offset specified in "pos" (at index 0) is 1-based instead of 0-based. we adjust it to be
# 0-based, as in all the other netmhc predictors supported by this library.
transforms = {
0: lambda x: int(x) - 1,
}
# we're running NetMHCIIpan 4 with -BA every time so both EL and BA are available, but only
# return one of them depending on the input mode
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=6,
offset_index=0,
peptide_index=2,
allele_index=1,
ic50_index=11 if mode == "binding_affinity" else None,
rank_index=8 if mode == "elution_score" else 12,
score_index=7 if mode == "elution_score" else 10,
transforms=transforms)
| 44.137566 | 166 | 0.536242 |
55f48af27a245cff4d79abe1afacb2ca13c703d0 | 11,720 | py | Python | celery/tests/app/test_app.py | stratoukos/celery | da9c0bad1f52515a70ae28d48abddbf42571a39f | [
"BSD-3-Clause"
] | 1 | 2015-12-02T17:12:09.000Z | 2015-12-02T17:12:09.000Z | celery/tests/app/test_app.py | stratoukos/celery | da9c0bad1f52515a70ae28d48abddbf42571a39f | [
"BSD-3-Clause"
] | null | null | null | celery/tests/app/test_app.py | stratoukos/celery | da9c0bad1f52515a70ae28d48abddbf42571a39f | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import with_statement
import os
from mock import Mock, patch
from pickle import loads, dumps
from kombu import Exchange
from celery import Celery
from celery import app as _app
from celery.app import defaults
from celery.app import state
from celery.loaders.base import BaseLoader
from celery.platforms import pyimplementation
from celery.utils.serialization import pickle
from celery.tests import config
from celery.tests.utils import (Case, mask_modules, platform_pyimp,
sys_platform, pypy_version)
from celery.utils import uuid
from celery.utils.mail import ErrorMail
THIS_IS_A_KEY = "this is a value"
class Object(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def _get_test_config():
return dict((key, getattr(config, key))
for key in dir(config)
if key.isupper() and not key.startswith("_"))
test_config = _get_test_config()
class test_module(Case):
def test_default_app(self):
self.assertEqual(_app.default_app, state.default_app)
def test_bugreport(self):
self.assertTrue(_app.bugreport())
class test_App(Case):
def setUp(self):
self.app = Celery(set_as_current=False)
self.app.conf.update(test_config)
def test_task(self):
app = Celery("foozibari", set_as_current=False)
def fun():
pass
fun.__module__ = "__main__"
task = app.task(fun)
self.assertEqual(task.name, app.main + ".fun")
def test_with_broker(self):
app = Celery(set_as_current=False, broker="foo://baribaz")
self.assertEqual(app.conf.BROKER_HOST, "foo://baribaz")
def test_repr(self):
self.assertTrue(repr(self.app))
def test_TaskSet(self):
ts = self.app.TaskSet()
self.assertListEqual(ts.tasks, [])
self.assertIs(ts.app, self.app)
def test_pickle_app(self):
changes = dict(THE_FOO_BAR="bars",
THE_MII_MAR="jars")
self.app.conf.update(changes)
saved = pickle.dumps(self.app)
self.assertLess(len(saved), 2048)
restored = pickle.loads(saved)
self.assertDictContainsSubset(changes, restored.conf)
def test_worker_main(self):
from celery.bin import celeryd
class WorkerCommand(celeryd.WorkerCommand):
def execute_from_commandline(self, argv):
return argv
prev, celeryd.WorkerCommand = celeryd.WorkerCommand, WorkerCommand
try:
ret = self.app.worker_main(argv=["--version"])
self.assertListEqual(ret, ["--version"])
finally:
celeryd.WorkerCommand = prev
def test_config_from_envvar(self):
os.environ["CELERYTEST_CONFIG_OBJECT"] = "celery.tests.app.test_app"
self.app.config_from_envvar("CELERYTEST_CONFIG_OBJECT")
self.assertEqual(self.app.conf.THIS_IS_A_KEY, "this is a value")
def test_config_from_object(self):
class Object(object):
LEAVE_FOR_WORK = True
MOMENT_TO_STOP = True
CALL_ME_BACK = 123456789
WANT_ME_TO = False
UNDERSTAND_ME = True
self.app.config_from_object(Object())
self.assertTrue(self.app.conf.LEAVE_FOR_WORK)
self.assertTrue(self.app.conf.MOMENT_TO_STOP)
self.assertEqual(self.app.conf.CALL_ME_BACK, 123456789)
self.assertFalse(self.app.conf.WANT_ME_TO)
self.assertTrue(self.app.conf.UNDERSTAND_ME)
def test_config_from_cmdline(self):
cmdline = [".always_eager=no",
".result_backend=/dev/null",
'.task_error_whitelist=(list)["a", "b", "c"]',
"celeryd.prefetch_multiplier=368",
".foobarstring=(string)300",
".foobarint=(int)300",
'.result_engine_options=(dict){"foo": "bar"}']
self.app.config_from_cmdline(cmdline, namespace="celery")
self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER)
self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, "/dev/null")
self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368)
self.assertListEqual(self.app.conf.CELERY_TASK_ERROR_WHITELIST,
["a", "b", "c"])
self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, "300")
self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300)
self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS,
{"foo": "bar"})
def test_compat_setting_CELERY_BACKEND(self):
self.app.config_from_object(Object(CELERY_BACKEND="set_by_us"))
self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, "set_by_us")
def test_setting_BROKER_TRANSPORT_OPTIONS(self):
_args = {'foo': 'bar', 'spam': 'baz'}
self.app.config_from_object(Object())
self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, {})
self.app.config_from_object(Object(BROKER_TRANSPORT_OPTIONS=_args))
self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, _args)
def test_Windows_log_color_disabled(self):
self.app.IS_WINDOWS = True
self.assertFalse(self.app.log.supports_color())
def test_compat_setting_CARROT_BACKEND(self):
self.app.config_from_object(Object(CARROT_BACKEND="set_by_us"))
self.assertEqual(self.app.conf.BROKER_TRANSPORT, "set_by_us")
def test_WorkController(self):
x = self.app.WorkController
self.assertIs(x.app, self.app)
def test_Worker(self):
x = self.app.Worker
self.assertIs(x.app, self.app)
def test_AsyncResult(self):
x = self.app.AsyncResult("1")
self.assertIs(x.app, self.app)
r = loads(dumps(x))
# not set as current, so ends up as default app after reduce
self.assertIs(r.app, state.default_app)
@patch("celery.bin.celery.CeleryCommand.execute_from_commandline")
def test_start(self, execute):
self.app.start()
self.assertTrue(execute.called)
def test_mail_admins(self):
class Loader(BaseLoader):
def mail_admins(*args, **kwargs):
return args, kwargs
self.app.loader = Loader()
self.app.conf.ADMINS = None
self.assertFalse(self.app.mail_admins("Subject", "Body"))
self.app.conf.ADMINS = [("George Costanza", "george@vandelay.com")]
self.assertTrue(self.app.mail_admins("Subject", "Body"))
def test_amqp_get_broker_info(self):
self.assertDictContainsSubset({"hostname": "localhost",
"userid": "guest",
"password": "guest",
"virtual_host": "/"},
self.app.broker_connection(
transport="amqplib").info())
self.app.conf.BROKER_PORT = 1978
self.app.conf.BROKER_VHOST = "foo"
self.assertDictContainsSubset({"port": 1978,
"virtual_host": "foo"},
self.app.broker_connection(
transport="amqplib").info())
conn = self.app.broker_connection(virtual_host="/value")
self.assertDictContainsSubset({"virtual_host": "/value"},
conn.info())
def test_BROKER_BACKEND_alias(self):
self.assertEqual(self.app.conf.BROKER_BACKEND,
self.app.conf.BROKER_TRANSPORT)
def test_with_default_connection(self):
@self.app.with_default_connection
def handler(connection=None, foo=None):
return connection, foo
connection, foo = handler(foo=42)
self.assertEqual(foo, 42)
self.assertTrue(connection)
def test_after_fork(self):
p = self.app._pool = Mock()
self.app._after_fork(self.app)
p.force_close_all.assert_called_with()
self.assertIsNone(self.app._pool)
self.app._after_fork(self.app)
def test_pool_no_multiprocessing(self):
with mask_modules("multiprocessing.util"):
pool = self.app.pool
self.assertIs(pool, self.app._pool)
def test_bugreport(self):
self.assertTrue(self.app.bugreport())
def test_send_task_sent_event(self):
class Dispatcher(object):
sent = []
def send(self, type, **fields):
self.sent.append((type, fields))
conn = self.app.broker_connection()
chan = conn.channel()
try:
for e in ("foo_exchange", "moo_exchange", "bar_exchange"):
chan.exchange_declare(e, "direct", durable=True)
chan.queue_declare(e, durable=True)
chan.queue_bind(e, e, e)
finally:
chan.close()
assert conn.transport_cls == "memory"
pub = self.app.amqp.TaskPublisher(conn,
exchange=Exchange("foo_exchange"))
dispatcher = Dispatcher()
self.assertTrue(pub.delay_task("footask", (), {},
exchange="moo_exchange",
routing_key="moo_exchange",
event_dispatcher=dispatcher))
self.assertTrue(dispatcher.sent)
self.assertEqual(dispatcher.sent[0][0], "task-sent")
self.assertTrue(pub.delay_task("footask", (), {},
event_dispatcher=dispatcher,
exchange="bar_exchange",
routing_key="bar_exchange"))
def test_error_mail_sender(self):
x = ErrorMail.subject % {"name": "task_name",
"id": uuid(),
"exc": "FOOBARBAZ",
"hostname": "lana"}
self.assertTrue(x)
class test_defaults(Case):
def test_str_to_bool(self):
for s in ("false", "no", "0"):
self.assertFalse(defaults.str_to_bool(s))
for s in ("true", "yes", "1"):
self.assertTrue(defaults.str_to_bool(s))
with self.assertRaises(TypeError):
defaults.str_to_bool("unsure")
class test_debugging_utils(Case):
def test_enable_disable_trace(self):
try:
_app.enable_trace()
self.assertEqual(_app.app_or_default, _app._app_or_default_trace)
_app.disable_trace()
self.assertEqual(_app.app_or_default, _app._app_or_default)
finally:
_app.disable_trace()
class test_pyimplementation(Case):
def test_platform_python_implementation(self):
with platform_pyimp(lambda: "Xython"):
self.assertEqual(pyimplementation(), "Xython")
def test_platform_jython(self):
with platform_pyimp():
with sys_platform("java 1.6.51"):
self.assertIn("Jython", pyimplementation())
def test_platform_pypy(self):
with platform_pyimp():
with sys_platform("darwin"):
with pypy_version((1, 4, 3)):
self.assertIn("PyPy", pyimplementation())
with pypy_version((1, 4, 3, "a4")):
self.assertIn("PyPy", pyimplementation())
def test_platform_fallback(self):
with platform_pyimp():
with sys_platform("darwin"):
with pypy_version():
self.assertEqual("CPython", pyimplementation())
| 34.880952 | 77 | 0.60256 |
283ac1824f81858e123d452398762a41d3869986 | 1,360 | py | Python | qtaf_settings.py | Vancheung/QTAF | 31133f221f3abaf68078218d9bbf95f097837363 | [
"BSD-3-Clause"
] | null | null | null | qtaf_settings.py | Vancheung/QTAF | 31133f221f3abaf68078218d9bbf95f097837363 | [
"BSD-3-Clause"
] | null | null | null | qtaf_settings.py | Vancheung/QTAF | 31133f221f3abaf68078218d9bbf95f097837363 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''
QTAF配置文件
'''
# -----------------------------------
# 调试模式开关
# -----------------------------------
DEBUG = False
# -----------------------------------
# 全局数据驱动配置
# -----------------------------------
DATA_DRIVE = False
DATA_SOURCE = 'test/data/server.py'
# -----------------------------------
# 项目配置
# -----------------------------------
PROJECT_NAME = 'qtaf'
PROJECT_MODE = 'standalone' #choices: standard/standalone
PROJECT_ROOT = None#os.path.dirname(__file__)
INSTALLED_APPS = []
QTAF_FAILED_SKIP_RUNTEST = True
# -----------------------------------
# Assert
# -----------------------------------
QTAF_REWRITE_ASSERT = True
| 31.627907 | 88 | 0.5875 |
b0f2d3f068285a402855ec3031ff5af42b3ec3b9 | 3,694 | py | Python | server_scripts/dev_board_eth.py | slaclab/pysmurf | 0fef2dda87e6da292485266b3bf630c9b7ca97dc | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-10-17T02:37:59.000Z | 2022-03-09T16:42:34.000Z | server_scripts/dev_board_eth.py | slaclab/pysmurf | 0fef2dda87e6da292485266b3bf630c9b7ca97dc | [
"BSD-3-Clause-LBNL"
] | 446 | 2019-04-10T04:46:20.000Z | 2022-03-15T20:27:57.000Z | server_scripts/dev_board_eth.py | slaclab/pysmurf | 0fef2dda87e6da292485266b3bf630c9b7ca97dc | [
"BSD-3-Clause-LBNL"
] | 13 | 2019-02-05T18:02:05.000Z | 2021-03-02T18:41:49.000Z | #!/usr/bin/env python3
#-----------------------------------------------------------------------------
# Title : PyRogue Server
#-----------------------------------------------------------------------------
# File : python/pyrogue_server.py
# Created : 2017-06-20
#-----------------------------------------------------------------------------
# Description:
# Python script to start a PyRogue Control Server
#-----------------------------------------------------------------------------
# This file is part of the pyrogue-control-server software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import sys
import pyrogue
import pysmurf.core.devices
import pysmurf.core.transmitters
import pysmurf.core.server_scripts.Common as common
# Main body
if __name__ == "__main__":
# Read Arguments
args = common.get_args()
# Import the root device after the python path is updated
from pysmurf.core.roots.DevBoardEth import DevBoardEth as DevBoardEth
if not args['ip_addr']:
sys.exit("ERROR: Must specify an IP address for ethernet base communication devices.")
common.verify_ip(args)
common.ping_fpga(args)
# Define variable groups (we use the provided example definition)
# We can disable it by defining "VariableGroups = None" instead.
from pysmurf.core.server_scripts._VariableGroupExample import VariableGroups
# The PCIeCard object will take care of setting up the PCIe card (if present)
with pysmurf.core.devices.PcieCard( lane = args['pcie_rssi_lane'],
comm_type = "eth-rssi-interleaved",
ip_addr = args['ip_addr'],
dev_rssi = args['pcie_dev_rssi'],
dev_data = args['pcie_dev_data']):
with DevBoardEth( ip_addr = args['ip_addr'],
config_file = args['config_file'],
epics_prefix = args['epics_prefix'],
polling_en = args['polling_en'],
pv_dump_file = args['pv_dump_file'],
disable_bay0 = args['disable_bay0'],
disable_bay1 = args['disable_bay1'],
configure = args['configure'],
server_port = args['server_port'],
VariableGroups = VariableGroups,
txDevice = pysmurf.core.transmitters.BaseTransmitter(name='Transmitter')) as root:
if args['use_gui']:
# Start the GUI
print("Starting GUI...\n")
if args['use_qt']:
# Start the QT GUI, is selected by the user
import pyrogue.gui
pyrogue.gui.runGui(root=root,title=args['windows_title'])
else:
# Otherwise, start the PyDM GUI
import pyrogue.pydm
pyrogue.pydm.runPyDM(root=root, title=args['windows_title'])
else:
# Stop the server when Crtl+C is pressed
print("Running without GUI...")
pyrogue.waitCntrlC()
| 43.97619 | 115 | 0.518138 |
1ac8c87737a6a65e5708196d55194e945b48822d | 389 | py | Python | app.py | alexpulver/company-guardrails | 6a72b34de61bfde0b2360739ab3f0e2dcf6ee1be | [
"MIT-0"
] | null | null | null | app.py | alexpulver/company-guardrails | 6a72b34de61bfde0b2360739ab3f0e2dcf6ee1be | [
"MIT-0"
] | null | null | null | app.py | alexpulver/company-guardrails | 6a72b34de61bfde0b2360739ab3f0e2dcf6ee1be | [
"MIT-0"
] | null | null | null | import os
from aws_cdk import core as cdk
from cdk_nag import NIST80053Checks
from deployment import LandingPageFrontend
app = cdk.App()
LandingPageFrontend(
app,
"LandingPageFrontend",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
cdk.Aspects.of(app).add(NIST80053Checks())
app.synth()
| 17.681818 | 50 | 0.722365 |
c585d4c4a235d180a4f968d805c8b5ad5a6d782e | 720 | py | Python | examples/basic/tube.py | Singlesnail/vedo | c61ad3aca5c926d4b41b8a468aefe8fc02f242ab | [
"CC0-1.0"
] | 1 | 2021-04-25T06:28:01.000Z | 2021-04-25T06:28:01.000Z | examples/basic/tube.py | leftwillow/vedo | b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b | [
"CC0-1.0"
] | null | null | null | examples/basic/tube.py | leftwillow/vedo | b2e2cfc3453bbd118b6c81a2227b8ce6f1d22b7b | [
"CC0-1.0"
] | null | null | null | """Use array to vary radius and color
of a line represented as a tube.
"""
from vedo import *
import numpy as np
settings.defaultFont = 'Quikhand'
ln = [[sin(x), cos(x), x / 2] for x in np.arange(0,9, 0.1)]
N = len(ln)
############################### a simple tube( along ln
t1 = Tube(ln, c="blue", r=0.08)
############################### vary radius
rads = [0.3*(cos(6.0*ir/N))**2+0.1 for ir in range(N)]
t2 = Tube(ln, r=rads, c="tomato", res=24)
############################### vary color
cols = [i for i in range(N)]
cols = makeBands(cols, 5) # make color bins
t3 = Tube(ln, r=rads, c=cols, res=24)
show(t1, __doc__, at=0, N=3, axes=dict(textScale=4), viewup="z")
show(t2, at=1)
show(t3, at=2, interactive=1)
| 25.714286 | 64 | 0.552778 |
6205c915fe7a9942899db05c52d22f18124b1bf6 | 378 | py | Python | tests.py | rjnay1984/photo-album-python | 11a3a8a6e200e1c4406c1d640373a3a95d19cb05 | [
"MIT"
] | null | null | null | tests.py | rjnay1984/photo-album-python | 11a3a8a6e200e1c4406c1d640373a3a95d19cb05 | [
"MIT"
] | null | null | null | tests.py | rjnay1984/photo-album-python | 11a3a8a6e200e1c4406c1d640373a3a95d19cb05 | [
"MIT"
] | null | null | null | from photo_album import request_album
"""
Test the length of the album, since it's
consistent in the placeholder API.
"""
def test_photo_album_success():
album_request = request_album(4)
assert len(album_request) == 50
def test_photo_album_unsuccessful():
album_request = request_album(101)
assert 'There are no photos in this album.' in str(album_request)
| 22.235294 | 69 | 0.751323 |
7d702fe3d0d9ee87710be07e57817fd32ee00db8 | 572 | py | Python | casingSimulations/__init__.py | simpeg-research/casingSimulations | ba55d5847b01b44b8a8209d2b5728e073752e41a | [
"MIT"
] | 3 | 2019-11-13T21:23:19.000Z | 2021-12-07T05:53:08.000Z | casingSimulations/__init__.py | lheagy/casingResearch | bc03c07b216bf6f9015e65ed0d8deaae88d4b019 | [
"MIT"
] | 5 | 2017-03-04T23:36:32.000Z | 2017-04-28T21:11:47.000Z | casingSimulations/__init__.py | lheagy/casingResearch | bc03c07b216bf6f9015e65ed0d8deaae88d4b019 | [
"MIT"
] | 2 | 2018-12-28T01:32:10.000Z | 2020-03-06T08:39:07.000Z | from . import model
from .mesh import (
CylMeshGenerator, CasingMeshGenerator, TensorMeshGenerator
)
from .physics import (
casing_currents, casing_charges, plotCurrentDensity,
plot_currents_over_freq,
plot_currents_over_mu, plot_j_over_mu_z, plot_j_over_freq_z,
plot_j_over_mu_x
)
from .view import plotEdge2D, plotFace2D, FieldsViewer
from . import sources
from . import run
from .utils import (
load_properties, edge3DthetaSlice, face3DthetaSlice, ccv3DthetaSlice
)
from .info import (
__version__, __author__, __license__, __copyright__
)
| 27.238095 | 72 | 0.791958 |
47872f8e7c4856376a7387b557797e7d7eb9c882 | 193 | py | Python | syn/conf/__init__.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 1 | 2021-07-15T08:55:12.000Z | 2021-07-15T08:55:12.000Z | syn/conf/__init__.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 7 | 2021-01-07T23:51:57.000Z | 2021-12-13T19:50:57.000Z | syn/conf/__init__.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 2 | 2016-07-11T08:46:31.000Z | 2017-12-13T13:30:51.000Z | from .conf import *
from .conf2 import *
from .vars import *
from syn.base_utils import harvest_metadata, delete
with delete(harvest_metadata, delete):
harvest_metadata('../metadata.yml')
| 24.125 | 51 | 0.761658 |
843254f3c642463ad250158774b369d82b88581a | 4,794 | py | Python | ros/src/twist_controller/dbw_node.py | Lucap87ct/CarND-Capstone | cb8680b57f0fc1fb7ad46bef7d81c3cf7cda3231 | [
"MIT"
] | null | null | null | ros/src/twist_controller/dbw_node.py | Lucap87ct/CarND-Capstone | cb8680b57f0fc1fb7ad46bef7d81c3cf7cda3231 | [
"MIT"
] | 5 | 2020-03-14T17:32:12.000Z | 2022-03-12T00:20:17.000Z | ros/src/twist_controller/dbw_node.py | andrea-ortalda/CarND-Capstone | 53045261e18a651d06d46455f04b1eb0f2e4f5f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
# Vehicle properties params
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
#fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
#brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
# Subscribers
self.velocity_sub = rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.dbw_enabled_sub = rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
self.twist_cmd_sub = rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
# Publishers
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# DBW Node variables
self.current_velocity = None
self.dbw_enabled = None
self.target_linear_velocity = None
self.target_angular_velocity = None
self.throttle_cmd = None
self.brake_cmd = None
self.steer_cmd = None
self.controller = Controller(vehicle_mass=vehicle_mass,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
self.step()
def step(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
if not None in (self.current_velocity, self.dbw_enabled, self.target_linear_velocity, self.target_angular_velocity):
self.throttle_cmd, self.brake_cmd, self.steer_cmd = self.controller.control(self.dbw_enabled,
self.current_velocity,
self.target_linear_velocity,
self.target_angular_velocity)
#rospy.loginfo('Current throttle cmd = %s', self.throttle_cmd)
#rospy.loginfo('Current brake cmd = %s', self.brake_cmd)
#rospy.loginfo('Current steer cmd = %s', self.steer_cmd)
if self.dbw_enabled:
self.publish(self.throttle_cmd, self.brake_cmd, self.steer_cmd)
rate.sleep()
'''
This method updates the current ego vehicle velocity
'''
def velocity_cb(self, data):
self.current_velocity = data.twist.linear.x
'''
This method updates the dbw enabled status
'''
def dbw_enabled_cb(self, data):
self.dbw_enabled = data
'''
This method updates the target velocity
'''
def twist_cb(self, data):
self.target_linear_velocity = data.twist.linear.x
self.target_angular_velocity = data.twist.angular.z
#rospy.loginfo('Target linear vel %s', self.target_linear_velocity)
#rospy.loginfo('Target angular vel %s', self.target_angular_velocity)
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| 41.327586 | 128 | 0.590738 |
c509b3b0fece0841f9e9bb065dc843d504b031da | 2,758 | py | Python | hyperstream/utils/statistics/percentile.py | vishalbelsare/HyperStream | 35d63962f78cdfaac0383e38d79b16af373f1492 | [
"MIT"
] | 12 | 2017-01-14T15:26:51.000Z | 2020-10-04T14:46:44.000Z | hyperstream/utils/statistics/percentile.py | vishalbelsare/HyperStream | 35d63962f78cdfaac0383e38d79b16af373f1492 | [
"MIT"
] | 27 | 2017-04-04T22:49:02.000Z | 2018-02-22T13:46:52.000Z | hyperstream/utils/statistics/percentile.py | vishalbelsare/HyperStream | 35d63962f78cdfaac0383e38d79b16af373f1492 | [
"MIT"
] | 6 | 2017-04-04T15:09:52.000Z | 2018-11-19T08:01:23.000Z | # The MIT License (MIT)
# Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import math
def flatten(a):
if not a:
return a
if isinstance(a[0], list):
return flatten(a[0]) + flatten(a[1:])
return a[:1] + flatten(a[1:])
def percentile(a, q):
"""
Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements.
"""
if not a:
return None
if isinstance(q, (float, int)):
qq = [q]
elif isinstance(q, (tuple, list)):
qq = q
else:
raise ValueError("Quantile type {} not understood".format(type(q)))
if isinstance(a, (float, int)):
a = [a]
for i in range(len(qq)):
if qq[i] < 0. or qq[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
qq[i] /= 100.
a = sorted(flatten(a))
r = []
for q in qq:
k = (len(a) - 1) * q
f = math.floor(k)
c = math.ceil(k)
if f == c:
r.append(float(a[int(k)]))
continue
d0 = a[int(f)] * (c - k)
d1 = a[int(c)] * (k - f)
r.append(float(d0 + d1))
if len(r) == 1:
return r[0]
return r
| 30.307692 | 80 | 0.620015 |
0d86a24931005ada8510470ea88c669885fc7e29 | 5,543 | py | Python | components/alibi-detect-server/adserver/cm_model.py | M46F/seldon-core | db251e0177c056bac7b4518033833d27a85529ee | [
"Apache-2.0"
] | 4 | 2019-08-29T19:36:55.000Z | 2021-12-20T00:37:08.000Z | components/alibi-detect-server/adserver/cm_model.py | M46F/seldon-core | db251e0177c056bac7b4518033833d27a85529ee | [
"Apache-2.0"
] | 97 | 2021-01-22T11:50:05.000Z | 2021-08-02T21:22:21.000Z | components/alibi-detect-server/adserver/cm_model.py | M46F/seldon-core | db251e0177c056bac7b4518033833d27a85529ee | [
"Apache-2.0"
] | 7 | 2020-09-07T09:10:57.000Z | 2021-11-25T02:59:02.000Z | import json
from typing import List, Dict, Optional, Union
import logging
import numpy as np
from enum import Enum
import kfserving
import importlib
import pickle
import os
from adserver.constants import (
HEADER_RETURN_INSTANCE_SCORE,
REQUEST_ID_HEADER_NAME,
NAMESPACE_HEADER_NAME,
)
from .numpy_encoder import NumpyEncoder
from adserver.base import CEModel
from seldon_core.user_model import SeldonResponse
from seldon_core.flask_utils import SeldonMicroserviceException
from seldon_core.metrics import DEFAULT_LABELS, NONIMPLEMENTED_MSG
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
SELDON_DEPLOYMENT_ID = DEFAULT_LABELS["seldon_deployment_name"]
SELDON_MODEL_ID = DEFAULT_LABELS["model_name"]
SELDON_PREDICTOR_ID = DEFAULT_LABELS["predictor_name"]
def _load_class_module(module_path: str) -> str:
components = module_path.split(".")
mod = __import__(".".join(components[:-1]))
for comp in components[1:]:
print(mod, comp)
mod = getattr(mod, comp)
return mod
class CustomMetricsModel(CEModel): # pylint:disable=c-extension-no-member
def __init__(
self, name: str, storage_uri: str, elasticsearch_uri: str = None, model=None
):
"""
Custom Metrics Model
Parameters
----------
name
The name of the model
storage_uri
The URI location of the model
"""
super().__init__(name)
self.name = name
self.storage_uri = storage_uri
self.model = model
self.ready = False
self.elasticsearch_client = None
if elasticsearch_uri:
if NONIMPLEMENTED_MSG in [
SELDON_DEPLOYMENT_ID,
SELDON_MODEL_ID,
SELDON_PREDICTOR_ID,
]:
logging.error(
f"Elasticsearch URI provided but DEFAULT_LABELS not provided: {DEFAULT_LABELS}"
)
else:
self.elasticsearch_client = Elasticsearch(elasticsearch_uri)
def load(self):
"""
Load the model from storage
"""
if "/" in self.storage_uri:
model_folder = kfserving.Storage.download(self.storage_uri)
self.model = pickle.load(
open(os.path.join(model_folder, "meta.pickle"), "rb")
)
else:
# Load from locally available models
MetricsClass = _load_class_module(self.storage_uri)
self.model = MetricsClass()
self.ready = True
def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
"""
Process the event and return Alibi Detect score
Parameters
----------
inputs
Input data
headers
Header options
Returns
-------
SeldonResponse response
"""
logging.info("PROCESSING Feedback Event.")
logging.info(str(headers))
logging.info("----")
metrics = []
output = {}
truth = None
response = None
error = None
if "truth" not in inputs:
raise SeldonMicroserviceException(
f"No truth value provided in: {json.dumps(inputs)}",
status_code=400,
reason="NO_TRUTH_VALUE",
)
else:
truth = inputs["truth"]
# We automatically add any metrics provided in the incoming request
if "metrics" in inputs:
metrics.extend(inputs["metrics"])
# If response is provided then we can perform a comparison
if "response" in inputs:
response = inputs["response"]
elif REQUEST_ID_HEADER_NAME in headers:
# Otherwise if UUID is provided we can fetch from elasticsearch
if not self.elasticsearch_client:
error = "Seldon-Puid provided but elasticsearch client not configured"
else:
try:
seldon_puid = headers.get(REQUEST_ID_HEADER_NAME, "")
seldon_namespace = headers.get(NAMESPACE_HEADER_NAME, "")
# Currently only supports SELDON inference type (not kfserving)
elasticsearch_index = f"inference-log-{seldon_namespace}-seldon-{SELDON_DEPLOYMENT_ID}-{SELDON_PREDICTOR_ID}"
doc = self.elasticsearch_client.get(
index=elasticsearch_index, id=seldon_puid
)
response = (
doc.get("_source", {})
.get("response", None)
.get("instance", None)
)
if not response:
error = f"Elasticsearch index {elasticsearch_index} with id {seldon_puid} did not contain response value"
except NotFoundError:
error = f"Elasticsearch index {elasticsearch_index} with id {seldon_puid} not found"
else:
error = "Neither response nor request Puid provided in headers"
if error:
raise SeldonMicroserviceException(
error, status_code=400, reason="METRICS_SERVER_ERROR"
)
logging.error(f"{truth}, {response}")
output = self.model.transform(truth, response)
seldon_response = SeldonResponse.create(output or None)
seldon_response.metrics.extend(metrics)
return seldon_response
| 32.798817 | 129 | 0.596067 |
debf6b21d6f1ae10046d75b4730d03b562ac5aa2 | 2,525 | py | Python | ceilometerclient/v2/options.py | dreamhost/python-ceilometerclient | a550dcfa4971df5ef517aa73d2ebc7a6675c72c6 | [
"Apache-2.0"
] | null | null | null | ceilometerclient/v2/options.py | dreamhost/python-ceilometerclient | a550dcfa4971df5ef517aa73d2ebc7a6675c72c6 | [
"Apache-2.0"
] | null | null | null | ceilometerclient/v2/options.py | dreamhost/python-ceilometerclient | a550dcfa4971df5ef517aa73d2ebc7a6675c72c6 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import urllib
def build_url(path, q):
'''
This converts from a list of dict's to what the rest api needs
so from:
"[{field=this,op=le,value=34},{field=that,op=eq,value=foo}]"
to:
"?q.field=this&q.op=le&q.value=34&
q.field=that&q.op=eq&q.value=foo"
'''
if q:
query_params = {'q.field': [],
'q.value': [],
'q.op': []}
for query in q:
for name in ['field', 'op', 'value']:
query_params['q.%s' % name].append(query.get(name, ''))
path += "?" + urllib.urlencode(query_params, doseq=True)
return path
def cli_to_array(cli_query):
'''
This converts from the cli list of queries to what is required
by the python api.
so from:
"this<=34;that=foo"
to
"[{field=this,op=le,value=34},{field=that,op=eq,value=foo}]"
'''
if cli_query is None:
return None
op_lookup = {'!=': 'ne',
'>=': 'ge',
'<=': 'le',
'>': 'gt',
'<': 'lt',
'=': 'eq'}
def split_by_op(string):
# two character split (<=,!=)
fragments = re.findall(r'(\w+)([><!]=)([^ -,\t\n\r\f\v]+)', string)
if len(fragments) == 0:
#single char split (<,=)
fragments = re.findall(r'(\w+)([><=])([^ -,\t\n\r\f\v]+)', string)
return fragments
opts = []
queries = cli_query.split(';')
for q in queries:
frag = split_by_op(q)
if len(frag) > 1:
raise ValueError('incorrect seperator %s in query "%s"' %
('(should be ";")', q))
if len(frag) == 0:
raise ValueError('invalid query %s' % q)
query = frag[0]
opt = {}
opt['field'] = query[0]
opt['op'] = op_lookup[query[1]]
opt['value'] = query[2]
opts.append(opt)
return opts
| 30.059524 | 78 | 0.525545 |
875fc7fd6799e210dc5c4b1c2a28f7918c5d9215 | 1,941 | py | Python | .history/postImages/index_20201006210944.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | 2 | 2020-10-21T22:14:15.000Z | 2020-10-21T22:14:16.000Z | .history/postImages/index_20201006210947.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | .history/postImages/index_20201006210947.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
# url = https://b2ptc.herokuapp.com/bridges
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
finalObj = {}
for i in finCsv:
x = i.split(',')
finalObj[x[6]]= {}
if x[6] in obj:
finalObj[x[6]]['before_img'] = obj[x[6]]['before_img']
finalObj[x[6]]['after_img'] = obj[x[6]]['after_img']
finalObj[x[6]]['district'] = x[1]
finalObj[x[6]]['sector'] = x[2]
finalObj[x[6]]['cell'] = x[3]
finalObj[x[6]]['bridge_site'] = x[4]
finalObj[x[6]]['stage'] = x[5]
finalObj[x[6]]['id'] = int(x[6])
finalObj[x[6]]['type'] = x[7]
finalObj[x[6]]['latt'] = float(x[8])
finalObj[x[6]]['long'] = float(x[9])
try:
serv = float(x[10])
except:
serv = x[10]
sv = x[13].split(' ')[2]
finalObj[x[6]]['served'] = serv
finalObj[x[6]]['community_served'] = x[14]
try:
pId = int(x[15])
except :
pId = x[15]
finalObj[x[6]]['provId'] = pId
finalObj[x[6]]['districtId'] = int(x[16])
finalObj[x[6]]['sectorId'] = int(x[17])
finalObj[x[6]]['cellId'] = int(x[18][0:-1])
print(finalObj[x[6]]['before_img'])
break
else:
print(fin[0])
for i in range(fin[0]):
for key in finalObj:
print(key)
# for i in finalCsv:
# x = i.split(',')
# requests.put(url+x[0],data={before:x[2],after:x[3]})
# pull each id,before image and after from df
# for each data item do a put request with the id as the param id
# and then put the before and after image in an dict and place it as the data for the put request
| 30.328125 | 97 | 0.512107 |
021a368b8a2fb30f58f8d888cfc2d1ac12776cc5 | 2,205 | py | Python | conans/test/functional/old/user_info_test.py | johnmcfarlane/conan | 725bd0cee4e53f35521aef7eeb61d4772c460d5e | [
"MIT"
] | null | null | null | conans/test/functional/old/user_info_test.py | johnmcfarlane/conan | 725bd0cee4e53f35521aef7eeb61d4772c460d5e | [
"MIT"
] | 4 | 2018-09-05T13:08:31.000Z | 2020-03-05T09:17:20.000Z | conans/test/functional/old/user_info_test.py | johnmcfarlane/conan | 725bd0cee4e53f35521aef7eeb61d4772c460d5e | [
"MIT"
] | 2 | 2018-09-05T11:58:44.000Z | 2018-09-05T12:14:11.000Z | import os
import unittest
from conans.paths import CONANFILE
from conans.test.utils.tools import TestClient
class UserInfoTest(unittest.TestCase):
def test_user_info_propagation(self):
client = TestClient()
def export_lib(name, requires, infolines):
base = '''
import os
from conans import ConanFile
class MyConanfile(ConanFile):
name = "%s"
version = "0.1"
requires = "%s"
def build(self):
pass
def package_info(self):
%s
'''
client.save({CONANFILE: base % (name, requires, infolines)}, clean_first=True)
client.run("export . lasote/stable")
export_lib("LIB_A", "", "self.user_info.VAR1=2")
export_lib("LIB_B", "LIB_A/0.1@lasote/stable", "self.user_info.VAR1=2\n "
"self.user_info.VAR2=3")
export_lib("LIB_C", "LIB_B/0.1@lasote/stable", "self.user_info.VAR1=2")
export_lib("LIB_D", "LIB_C/0.1@lasote/stable", "self.user_info.var1=2")
reuse = '''
import os
from conans import ConanFile
class MyConanfile(ConanFile):
name = "reuse"
version = "0.1"
requires = "LIB_D/0.1@lasote/stable"
def build(self):
assert(self.deps_user_info["LIB_A"].VAR1=="2")
assert(self.deps_user_info["LIB_B"].VAR1=="2")
assert(self.deps_user_info["LIB_B"].VAR2=="3")
assert(self.deps_user_info["LIB_C"].VAR1=="2")
assert(self.deps_user_info["LIB_D"].var1=="2")
'''
client.save({CONANFILE: reuse}, clean_first=True)
client.run("export . lasote/stable")
client.run('install reuse/0.1@lasote/stable --build -g txt')
# Assert generator TXT
txt_contents = client.load("conanbuildinfo.txt")
self.assertIn("[USER_LIB_A]%sVAR1=2" % os.linesep, txt_contents)
self.assertIn("[USER_LIB_B]%sVAR1=2%sVAR2=3" % (os.linesep, os.linesep), txt_contents)
self.assertIn("[USER_LIB_C]%sVAR1=2" % os.linesep, txt_contents)
self.assertIn("[USER_LIB_D]%svar1=2" % os.linesep, txt_contents)
# Now try local command with a consumer
client.run('install . --build')
client.run("build .")
| 31.956522 | 94 | 0.61542 |
f3af6b92499d165d199379344a35b391ca9af338 | 21,994 | py | Python | pyecore/resources/resource.py | jamoralp/pyecore | df1f230e6e99dfe0a6ccfa34776b626e2bb13d63 | [
"BSD-3-Clause"
] | 99 | 2017-06-02T14:03:51.000Z | 2022-03-11T06:34:11.000Z | pyecore/resources/resource.py | jamoralp/pyecore | df1f230e6e99dfe0a6ccfa34776b626e2bb13d63 | [
"BSD-3-Clause"
] | 108 | 2017-05-19T05:53:45.000Z | 2022-03-30T04:49:47.000Z | pyecore/resources/resource.py | jamoralp/pyecore | df1f230e6e99dfe0a6ccfa34776b626e2bb13d63 | [
"BSD-3-Clause"
] | 41 | 2017-06-02T14:07:35.000Z | 2021-12-02T06:21:01.000Z | # -*- coding: future_fstrings -*-
""" The resource module proposes all the concepts that are related to Resource
handling. A Resource represents a special model container that can be
serialized. Many ``Resource`` can be contained in a ``ResourceSet``, and
"cross-reference" each others.
"""
from uuid import uuid4
import urllib.request
import re
from os import path
from itertools import chain
from collections import ChainMap
from .. import ecore as Ecore
from ..innerutils import ignored
from abc import abstractmethod
from urllib.parse import urljoin
from functools import lru_cache
global_registry = {}
global_uri_mapper = {}
global_uri_converter = []
class ResourceSet(object):
"""Defines a Resource container.
A ResourceSet can contains many Resources and has the ability to create new
ones. It also gives a way of isolating resources from each others and to
"localy" register metamodels.
Resource can be created empty (using ``create_resource(...)``) or with data
fetched from the actual resource content (using ``get_resource(...)``).
A :py:class:`ResourceSet` contains 3 handy properties:
* ``resources`` which is a dictonary of the ResourceSet loaded resources
(key is the plain string URI, value: the resource).
* ``metamodel_registry`` which is a dictonary of the ResourceSet known
metamodels (key is the plain string metamodel URI, value: the metamodel
``EPackage`` root)
* ``resource_factory`` which is a factory used by the ResourceSet to build
the right Resource kind regarding the URI.
.. seealso:: Resource
"""
def __init__(self):
self.resources = {}
self.metamodel_registry = ChainMap({}, global_registry)
self.uri_mapper = ChainMap({}, global_uri_mapper)
self.uri_converter = []
self.resource_factory = dict(ResourceSet.resource_factory)
def create_resource(self, uri):
"""Creates a new Resource.
The created ressource type depends on the used URI.
:param uri: the resource URI
:type uri: URI
:return: a new Resource
:rtype: Resource
.. seealso:: URI, Resource, XMIResource
"""
if isinstance(uri, str):
uri = URIConverter.convert(URI(uri))
try:
resource = self.resource_factory[uri.extension](uri)
except KeyError:
resource = self.resource_factory['*'](uri)
self.resources[uri.normalize()] = resource
resource.resource_set = self
resource.decoders.insert(0, self)
return resource
def remove_resource(self, resource):
if not resource:
return
for key, value in dict(self.resources).items():
if value is resource:
del self.resources[key]
def get_resource(self, uri, options=None):
if isinstance(uri, str):
uri = URIConverter.convert(URI(uri))
# We check first if the resource already exists in the ResourceSet
if uri.normalize() in self.resources:
return self.resources[uri.normalize()]
# If not, we create a new resource
resource = self.create_resource(uri)
try:
resource.load(options=options)
except Exception:
self.remove_resource(resource)
raise
return resource
def can_resolve(self, uri_path, from_resource=None):
uri_path = Resource.normalize(uri_path)
fragment = uri_path.rsplit('#', maxsplit=1)
nb_fragments = len(fragment)
uri_str = ''
if nb_fragments == 2:
uri_str, fragment = fragment
if uri_str in self.resources:
return True
start = from_resource.uri.normalize() if from_resource else '.'
apath = path.dirname(start)
uri = URI(path.join(apath, uri_str))
return uri.normalize() in self.resources
def resolve(self, uri, from_resource=None):
upath = URIMapper.translate(Resource.normalize(uri), from_resource)
uri_str, fragment = upath.rsplit('#', maxsplit=1)
if uri_str in self.resources:
root = self.resources[uri_str]
else:
start = from_resource.uri.normalize() if from_resource else '.'
apath = path.dirname(start)
uri = URI(path.join(apath, uri_str))
root = self.resources[uri.normalize()]
if isinstance(root, Resource):
root_number, fragment = Resource.extract_rootnum_and_frag(fragment)
root = root.contents[root_number]
return Resource._navigate_from(fragment, root)
class URI(object):
_uri_norm = {'http': lambda x: x,
'https': lambda x: x,
'file': lambda x: path.abspath(x.replace('file://', ''))}
_uri_split = {'http': '/',
'https': '/',
'file': path.sep}
def __init__(self, uri):
if uri is None:
raise TypeError('URI cannot be None')
self.plain = uri
self._split()
self.__stream = None
def _split(self):
if '://' in self.plain:
self._protocol, rest = self.plain.split('://', maxsplit=1)
elif ':/' in self.plain:
self._protocol, rest = self.plain.split(':/', maxsplit=1)
else:
self._protocol, rest = None, self.plain
uri_sep = self._uri_split.get(self._protocol, path.sep)
self._segments = rest.split(uri_sep)
self._last_segment = self._segments[-1:][0]
if '.' in self._last_segment:
self._extension = self._last_segment.split('.')[-1:][0]
else:
self._extension = None
@property
def protocol(self):
return self._protocol
@property
def extension(self):
return self._extension
@property
def segments(self):
return self._segments
@property
def last_segment(self):
return self._last_segment
def create_instream(self):
self.__stream = open(self.plain, 'rb')
return self.__stream
def close_stream(self):
if self.__stream:
self.__stream.close()
def create_outstream(self):
self.__stream = open(self.plain, 'wb')
return self.__stream
def normalize(self):
if self.protocol is not None:
return self._uri_norm.get(self.protocol, lambda x: x)(self.plain)
return path.abspath(self.plain)
def relative_from_me(self, other_uri):
normalized = path.dirname(self.normalize())
if isinstance(other_uri, URI):
other_normalized = other_uri.normalize()
if other_uri.protocol:
return other_normalized
return path.relpath(other_normalized, normalized)
def apply_relative_from_me(self, relative_path):
if ':/' in relative_path:
return relative_path
parent_path = path.dirname(self.normalize())
return path.join(parent_path, relative_path)
class HttpURI(URI):
def __init__(self, uri):
super().__init__(uri)
def create_instream(self):
self.__stream = urllib.request.urlopen(self.plain)
return self.__stream
def create_outstream(self):
raise NotImplementedError('Cannot create an outstream for HttpURI')
def apply_relative_from_me(self, relative_path):
return urljoin(self.normalize(), relative_path)
# class StdioURI(URI):
# def __init__(self):
# super().__init__('stdio')
#
# def create_instream(self):
# self.__stream = sys.stdin.buffer
# return self.__stream
#
# def create_outstream(self):
# self.__stream = sys.stdout.buffer
# return self.__stream
#
# def close_stream(self):
# pass
class MetamodelDecoder(object):
@staticmethod
def split_path(path):
path = Resource.normalize(path)
fragment = path.rsplit('#', maxsplit=1)
if len(fragment) == 2:
uri, fragment = fragment
else:
uri = None
return uri, fragment
@staticmethod
def can_resolve(path, registry):
uri, _ = MetamodelDecoder.split_path(path)
return uri in registry
@staticmethod
def resolve(path, registry):
path = Resource.normalize(path)
uri, fragment = path.rsplit('#', maxsplit=1)
epackage = registry[uri]
return Resource._navigate_from(fragment, epackage)
class Global_URI_decoder(object):
@staticmethod
def can_resolve(path, from_resource=None):
return MetamodelDecoder.can_resolve(path, global_registry)
@staticmethod
def resolve(path, from_resource=None):
path = URIMapper.translate(path, from_resource)
return MetamodelDecoder.resolve(path, global_registry)
class URIMapper(object):
@staticmethod
def translate(path, from_resource=None):
if from_resource is None or from_resource.resource_set is None:
return path
rset = from_resource.resource_set
for key, value in rset.uri_mapper.items():
if path.startswith(key):
return path.replace(key, value)
return path
class URIConverter(object):
@classmethod
def convert(cls, uri, resource_set=None):
iter_from = global_uri_converter
if resource_set:
iter_from = chain(resource_set.uri_converter, global_uri_converter)
for converter in iter_from:
if converter.can_handle(uri):
return converter.convert(uri)
return uri
class AbstractURIConverter(object):
@staticmethod
@abstractmethod
def can_handle(uri):
raise NotImplementedError("can_handle(uri) should be implemented in "
"its subclass")
@staticmethod
@abstractmethod
def convert(uri):
raise NotImplementedError("convert(uri) should be implemented in its "
"subclass")
class HttpURIConverter(AbstractURIConverter):
@staticmethod
def can_handle(uri):
return uri.protocol == 'http' or uri.protocol == 'https'
@staticmethod
def convert(uri):
return HttpURI(uri.plain)
class LocalMetamodelDecoder(object):
@staticmethod
def can_resolve(path, from_resource=None):
if from_resource is None or from_resource.resource_set is None:
return False
rset = from_resource.resource_set
return MetamodelDecoder.can_resolve(path, rset.metamodel_registry)
@staticmethod
def resolve(path, from_resource=None):
rset = from_resource.resource_set
path = URIMapper.translate(path, from_resource)
return MetamodelDecoder.resolve(path, rset.metamodel_registry)
class Resource(object):
decoders = [LocalMetamodelDecoder, Global_URI_decoder]
def __init__(self, uri=None, use_uuid=False):
self.uuid_dict = {}
self.use_uuid = use_uuid
self.prefixes = {}
self._uri = uri
self.resource_set = None
self.decoders = list(Resource.decoders)
self.contents = []
self.listeners = []
self._eternal_listener = []
self._resolve_mem = {}
# self._feature_cache = {}
self.cache_enabled = False
@property
def uri(self):
return self._uri
@uri.setter
def uri(self, value):
uri = value
if isinstance(value, str):
uri = URIConverter.convert(URI(value))
if self.resource_set:
old_uri = self._uri.normalize()
resources = self.resource_set.resources
old_resource = resources[old_uri]
del resources[old_uri]
resources[uri.normalize()] = old_resource
self._uri = uri
def resolve(self, fragment, resource=None):
fragment = self.normalize(fragment)
if fragment in self._resolve_mem:
return self._resolve_mem[fragment]
if self.use_uuid:
with ignored(KeyError):
frag = fragment[1:] if fragment.startswith('#') \
else fragment
frag = frag[2:] if frag.startswith('//') else frag
return self.uuid_dict[frag]
result = None
root_number, fragment = self.extract_rootnum_and_frag(fragment)
root = self.contents[root_number]
result = self._navigate_from(fragment, root)
if self.cache_enabled and result:
self._resolve_mem[fragment] = result
return result
def resolve_object(self, path):
decoder = next((x for x in self.decoders
if x.can_resolve(path, self)), None)
if decoder:
return decoder.resolve(path, self)
newpath = URIMapper.translate(path, self)
decoder = self._get_href_decoder(newpath, path)
return decoder.resolve(newpath, self)
@staticmethod
def extract_rootnum_and_frag(fragment):
if re.match(r'^/\d+.*', fragment):
fragment = fragment[1:]
if '/' in fragment:
index = fragment.index('/')
else:
index = len(fragment)
root_number = fragment[:index]
fragment = fragment[index:]
return (int(root_number), fragment)
else:
return (0, fragment)
def prefix2epackage(self, prefix):
nsURI = None
try:
nsURI = self.prefixes[prefix]
except KeyError:
return None
try:
return self.resource_set.metamodel_registry[nsURI]
except Exception:
return global_registry.get(nsURI)
def get_metamodel(self, nsuri):
try:
if self.resource_set:
return self.resource_set.metamodel_registry[nsuri]
else:
return global_registry[nsuri]
except KeyError:
raise KeyError(f'Unknown metamodel with uri: {nsuri}')
@staticmethod
def normalize(fragment):
return fragment.split()[-1:][0] if ' ' in fragment else fragment
def _is_external(self, path):
path = self.normalize(path)
uri, fragment = (path.rsplit('#', maxsplit=1)
if '#' in path else (None, path))
return uri, fragment
def _get_href_decoder(self, path, original_path):
decoder = next((x for x in self.decoders
if x.can_resolve(path, self)), None)
uri, _ = self._is_external(path)
original_uri, _ = self._is_external(original_path)
if not decoder and uri:
decoder = self._try_resource_autoload(uri, original_uri)
return decoder if decoder else self
def _try_resource_autoload(self, uri, original_uri):
try:
rset = self.resource_set
tmp_uri = URI(self.uri.apply_relative_from_me(uri))
external_uri = URIConverter.convert(tmp_uri, self.resource_set)
norm_plain = self.uri.apply_relative_from_me(external_uri.plain)
external_uri.plain = norm_plain
external_uri._split()
resource = rset.get_resource(external_uri)
if external_uri.plain != original_uri:
rset.resources[original_uri] = resource
return rset
except Exception as e:
raise TypeError(f'Resource "{uri}" cannot be resolved '
f'problem with "{e}"')
@staticmethod
def is_fragment_uuid(fragment):
return fragment and fragment[0] != '/'
@classmethod
def _navigate_from(cls, path, start_obj):
if '#' in path[:1]:
path = path[1:]
if cls.is_fragment_uuid(path) and start_obj.eResource:
return start_obj.eResource.uuid_dict[path]
features = [x for x in path.split('/') if x]
feat_info = [x.split('.') for x in features]
obj = start_obj
annot_content = False
for feat in feat_info:
key, index = feat if len(feat) > 1 else (feat[0], None)
if key.startswith('@'):
tmp_obj = obj.__getattribute__(key[1:])
try:
obj = tmp_obj[int(index)] if index else tmp_obj
except IndexError:
raise ValueError('Index in path is not the collection,'
' broken proxy?')
except ValueError:
# If index is not numeric it may be given as a name.
if index:
obj = tmp_obj.select(lambda x: x.name == index)[0]
elif key.startswith('%'):
key = key[1:-1]
obj = obj.eAnnotations.select(lambda x: x.source == key)[0]
annot_content = True
elif annot_content:
annot_content = False
obj = obj.contents.select(lambda x: x.name == key)[0]
else:
with ignored(Exception):
subpack = next((p for p in obj.eSubpackages
if p.name == key),
None)
if subpack:
obj = subpack
continue
try:
obj = obj.getEClassifier(key)
except AttributeError:
obj = next((c for c in obj.eContents
if hasattr(c, 'name') and c.name == key),
None)
return obj
@staticmethod
def get_id_attribute(eclass):
for attribute in eclass.eAllAttributes():
id_attr = attribute.__dict__.get('iD', False)
try:
res = id_attr._get()
except Exception:
res = id_attr
if res:
return attribute
# Refactor me
def _build_path_from(self, obj):
if isinstance(obj, type):
obj = obj.eClass
# if isinstance(obj, Ecore.EProxy) and not obj.resolved:
if not getattr(obj, 'resolved', True):
return (obj._proxy_path, True)
if obj.eResource != self:
eclass = obj.eClass
prefix = eclass.ePackage.nsPrefix
_type = f'{prefix}:{eclass.name}'
uri_fragment = obj.eURIFragment()
crossref = False
if obj.eResource:
uri = self.uri.relative_from_me(obj.eResource.uri)
crossref = True
if obj.eResource.use_uuid:
self._assign_uuid(obj)
uri_fragment = obj._internal_id
else:
id_attribute = self.get_id_attribute(eclass)
if id_attribute:
id_value = obj.eGet(id_attribute)
# id attributes shall not be used if the value is unset
if id_value:
uri_fragment = id_value
else:
uri = ''
root = obj.eRoot()
mm_registry = None
if self.resource_set:
mm_registry = self.resource_set.metamodel_registry
else:
mm_registry = global_registry
for reguri, value in mm_registry.items():
if value is root:
uri = reguri
break
else:
return '', False
if not uri_fragment.startswith('#'):
uri_fragment = '#' + uri_fragment
if crossref:
return (f'{uri}{uri_fragment}', True)
else:
return (f'{_type} {uri}{uri_fragment}', False)
if self.use_uuid:
self._assign_uuid(obj)
return (obj._internal_id, False)
id_attribute = self.get_id_attribute(obj.eClass)
if id_attribute:
etype = id_attribute._eType
id_att_value = obj.eGet(id_attribute)
# the check for ' ' prevents malformed ids to used as references
if (id_att_value is not None) and (' ' not in id_att_value):
return (etype.to_string(id_att_value), False)
return (obj.eURIFragment(), False)
@staticmethod
def _assign_uuid(obj):
# sets an uuid if the resource should deal with
# and obj has none yet (addition to the resource for example)
if not obj._internal_id:
uuid = str(uuid4())
obj._internal_id = uuid
def append(self, root):
if not isinstance(root, Ecore.EObject):
raise ValueError('The resource requires an EObject type, '
f'but received {type(root)} instead.')
self.contents.append(root)
root._eresource = self
if root._container is not None:
container = root._container
feature = root._containment_feature
if feature.many:
container.eGet(feature).remove(root)
else:
container.eSet(feature, None)
def remove(self, root):
self.contents.remove(root)
root._eresource = None
def open_out_stream(self, other=None):
if other and not isinstance(other, URI):
other = URI(other)
return (other.create_outstream() if other
else self.uri.create_outstream())
def extend(self, values):
append = self.append
for x in values:
append(x)
@lru_cache()
def _find_feature(self, eclass, name):
return eclass.findEStructuralFeature(name)
# fname = f'{eclass.name}#{name}'
# try:
# return self._feature_cache[fname]
# except KeyError:
# feature = eclass.findEStructuralFeature(name)
# self._feature_cache[fname] = feature
# return feature
| 34.473354 | 79 | 0.587297 |
3e79148e432493f36676a1ad234fe82b1434eceb | 34,029 | py | Python | MonocularDepthEstimation/src/train/train_model.py | csharpshooter/DeepLearning | c1d20660c32076468970f7376931e1fcd0d2644e | [
"MIT"
] | null | null | null | MonocularDepthEstimation/src/train/train_model.py | csharpshooter/DeepLearning | c1d20660c32076468970f7376931e1fcd0d2644e | [
"MIT"
] | null | null | null | MonocularDepthEstimation/src/train/train_model.py | csharpshooter/DeepLearning | c1d20660c32076468970f7376931e1fcd0d2644e | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import torch
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
from torch.optim.lr_scheduler import LambdaLR
from torchsummary import summary
from tqdm import tqdm
from src.utils import Utils
'''
Class used for training the model.
This class consists of all different training methods for model training
'''
class TrainModel:
'''
init method of class used for initlizing local varirables
'''
def __init__(self):
self.train_losses = []
self.test_losses = []
self.train_acc = []
self.test_acc = []
self.reg_loss_l1 = []
self.factor = 0 # 0.000005
self.loss_type = self.get_loss_function_monocular()
self.t_acc_max = 0 # track change in validation loss
self.optimizer = None
self.optimizer_mask = None
self.optimizer_depthmask = None
self.train_losses_mask = []
self.test_losses_mask = []
self.train_acc_mask = []
self.test_acc_mask = []
self.train_losses_depthmask = []
self.test_losses_depthmask = []
self.train_acc_depthmask = []
self.test_acc_depthmask = []
def showmodelsummary(self, model, input_size=(3, 32, 32)):
''' Uses torchsummary to display model layer details and parameters in the model per layer
:param model: CNN Model
:param input_size: size of imput to model
:return: None
'''
summary(model, input_size=input_size, device="cuda")
def train(self, model, device, train_loader, optimizer, epoch):
''' Basic train method to train a model with single input image
:param model: CNN Model
:param device: device object w.r.t cuda or non-cuda
:param train_loader: data loader to load data from dataset while training
:param optimizer: optimizer to be used while training
:param epoch: epoch fo which training is done on
:return: None
'''
model.train()
pbar = tqdm(train_loader)
correct = 0
processed = 0
self.optimizer = optimizer
for batch_idx, (data, target) in enumerate(pbar):
# get samples
data, target = data.to(device), target.to(device)
# Init
optimizer.zero_grad()
# In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch
# accumulates the gradients on subsequent backward passes. Because of this, when you start your training
# loop, ideally you should zero out the gradients so that you do the parameter update correctly.
# Predict
y_pred = model(data)
# # Calculate L1 loss
# l1_crit = torch.nn.L1Loss(size_average=False)
# reg_loss = 0
# for param in model.parameters():
# spare_matrix = torch.randn_like(param) * 0
# reg_loss += l1_crit(param, spare_matrix)
#
# self.reg_loss_l1.append(reg_loss)
# Calculate loss
loss = self.loss_type(y_pred, target)
# loss += self.factor * reg_loss
# self.train_losses.append(loss)
# Backpropagation
loss.backward()
optimizer.step()
# Update pbar-tqdm
pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
processed += len(data)
pbar.set_description(
desc=f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100 * correct / processed:0.2f}')
self.train_acc.append(100 * correct / processed)
self.train_losses.append(loss)
def test(self, model, device, test_loader, class_correct, class_total, epoch, lr_data):
''' Basic test method to train a model with single input image
:param model: CNN Model
:param device: device object w.r.t cuda or non-cuda
:param test_loader: data loader to load data from dataset while training
:param class_correct: list to store correct predictions for the epoch
:param class_total: list to store total correct predictions for the epoch
:param epoch: epoch fo which training is done on
:param lr_data: learning rate list to be saved while saving model
:return: test accuracy
'''
model.eval()
test_loss = 0
correct = 0
t_acc = 0
# pbar = tqdm(test_loader)
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += self.loss_type(output, target).item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct_tensor = pred.eq(target.data.view_as(pred))
correct += pred.eq(target.view_as(pred)).sum().item()
correct_new = np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
# for i in range(10):
# label = target.data[i]
# class_correct[label] += correct_new[i].item()
# class_total[label] += 1
test_loss /= len(test_loader.dataset)
self.test_losses.append(test_loss)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
self.test_acc.append(100. * correct / len(test_loader.dataset))
t_acc = 100. * correct / len(test_loader.dataset)
# save model if validation loss has decreased
if self.t_acc_max <= t_acc:
print('Validation accuracy increased ({:.6f} --> {:.6f}). Saving model ...'.format(
self.t_acc_max,
t_acc))
from src.utils import Utils
Utils.savemodel(model=model, epoch=epoch, path="savedmodels/checkpoint.pt",
optimizer_state_dict=self.optimizer.state_dict
, train_losses=self.train_losses, train_acc=self.train_acc, test_acc=self.test_acc,
test_losses=self.test_losses, lr_data=lr_data, class_correct=class_correct,
class_total=class_total)
self.t_acc_max = t_acc
return t_acc
def getlossfunction(self):
'''
returns loss function for model training
:return: cross entropy loss function
'''
return CrossEntropyLoss()
def get_loss_function_monocular(self):
'''
returns loss function for monocular depth estimation model training
:return: BCEWithLogitsLoss loss
'''
return BCEWithLogitsLoss()
# return MSELoss()
def gettraindata(self):
'''
:return: train accuracy and loss values
'''
return self.train_losses, self.train_acc
def gettestdata(self):
'''
:return: test accuracy and loss values
'''
return self.test_losses, self.test_acc
def getinferredimagesfromdataset(dataiterator, model, classes, batch_size, number=25):
'''
return classified and misclassified inferred images from dataset
:param model: CNN Model
:param classes: No of classes in dataset
:param batch_size: batchsize used while inferencing the model
:param number: number of images to display
:return: classified and missclassified images as per 'number' specified
'''
try:
misclassifiedcount = 0
classifiedcount = 0
misclassified = {}
classified = {}
loop = 0
while misclassifiedcount < number or classifiedcount < number:
loop += 1
# print("loop = {}".format(loop))
img, labels = dataiterator.next()
# images = img.numpy()
# move model inputs to cuda
images = img.cuda()
# print(len(img))
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.cpu().numpy())
for idx in np.arange(batch_size):
# print("for")
key = "Pred={} (Act={}) ".format(classes[preds[idx]], classes[labels[idx]])
# print("m-" + str(misclassifiedcount))
# print("c-" + str(classifiedcount))
# print("mlen-" + str(len(misclassified)))
# print("clen-" + str(len(classified)))
# print(preds[idx])
# print(labels[idx].item())
# print(key)
if preds[idx] != labels[idx].item():
if misclassifiedcount < number:
key = key + str(misclassifiedcount)
misclassified[key] = images[idx].unsqueeze(0)
misclassifiedcount += 1
else:
if classifiedcount < number:
key = key + str(classifiedcount)
classified[key] = images[idx].unsqueeze(0)
# images[idx].cpu()
classifiedcount += 1
if misclassifiedcount >= number and classifiedcount >= number:
break
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer.")
except:
print(sys.exc_info()[0])
return classified, misclassified
def start_training_cyclic_lr(self, epochs, model, device, test_loader, train_loader, max_lr_epoch, weight_decay
, min_lr=None,
max_lr=None,
cycles=1, annealing=False):
'''
start training using pytorch inbuilt cyclic LR method
:param epochs: epochs to train
:param model: CNN model
:param device: device cuda or not cuda
:param test_loader: test image loader
:param train_loader: train image loader
:param max_lr_epoch: epoch in which which max lr is achieved
:param weight_decay: weight decay or l2 regularization value
:param min_lr: minimum lr value to reach
:param max_lr: maximum lr value to reach
:param cycles: no of cycles for cyclic lr
:param annealing: if true does annealing for the max lr after every cycle
:return:
'''
lr_data = []
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
optimizer = self.get_optimizer(model=model, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer=optimizer, base_lr=min_lr, max_lr=max_lr,
mode='triangular2',
cycle_momentum=True, step_size_up=max_lr_epoch,
step_size_down=epochs - max_lr_epoch, )
self.start_training(epochs, model, device, test_loader, train_loader, optimizer, scheduler, lr_data,
class_correct, class_total, path="savedmodels/finalmodelwithdata.pt")
return lr_data, class_correct, class_total
def start_training(self, epochs, model, device, test_loader, train_loader, optimizer, scheduler, lr_data,
class_correct, class_total, path):
'''
:param epochs: epochs to train
:param model: CNN model
:param device: device cuda or not cuda
:param test_loader: test image loader
:param train_loader: train image loader
:param optimizer: optimizer to used for training
:param scheduler: scheduler to used for training
:param lr_data: learning rate list to be saved while saving model
:param class_correct: list to store correct predictions for the epoch
:param class_total: list to store total correct predictions for the epoch
:param path: path for model checkpoint to be saved
:return:lr_data, class_correct, class_total
'''
for epoch in range(0, epochs):
print("EPOCH:", epoch)
for param_groups in optimizer.param_groups:
print("Learning rate =", param_groups['lr'], " for epoch: ", epoch) # print LR for different epochs
lr_data.append(param_groups['lr'])
self.train(model, device, train_loader, optimizer, epoch)
t_acc_epoch = self.test(model=model, device=device, test_loader=test_loader,
class_correct=class_correct,
class_total=class_total, epoch=epoch, lr_data=lr_data)
scheduler.step()
print('Saving final model after training cycle completion')
self.save_model(model, epochs, optimizer.state_dict, lr_data, class_correct, class_total,
path=path)
return lr_data, class_correct, class_total
def get_optimizer(self, model, lr=1, momentum=0.9, weight_decay=0):
'''
:param model: CNN model
:param lr: learning rate
:param momentum: momentum mostly used value is 0.9
:param weight_decay: weight decay or also known as l2 regulrization
:return: optimizer object
'''
optimizer = Utils.createoptimizer(model, lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True)
return optimizer
def get_cyclic_scheduler(self, optimizer, epochs=25, max_lr_epoch=5, min_lr=0.01, max_lr=0.1):
'''
Custom cyclic lr logic written by me
:param optimizer: optimizer to be used
:param epochs: epochs to train
:param max_lr_epoch: epoch in which which max lr is achieved
:param min_lr: minimum lr value to reach
:param max_lr: maximum lr value to reach
:return: scheduler with lambda function for desired cyclic lr parmeters
'''
from src.train import TrainHelper
lambda1 = TrainHelper.cyclical_lr(max_lr_epoch=max_lr_epoch, epochs=epochs, min_lr=min_lr, max_lr=max_lr)
scheduler = LambdaLR(optimizer, lr_lambda=[lambda1])
return scheduler
def save_model(self, model, epochs, optimizer_state_dict, lr_data, class_correct, class_total,
path="savedmodels/finalmodelwithdata.pt"):
'''
:param model: model whose data wi;; be saved
:param epochs: no of epochs model was trained for
:param optimizer_state_dict: optimizer state dict to be saved
:param lr_data: lr data to be saved
:param class_correct: class_correct to be saved
:param class_total: class_total to be saved
:param path: path where model is to be saved
:return: None
'''
train_losses, train_acc = self.gettraindata()
test_losses, test_acc = self.gettestdata()
Utils.savemodel(model=model, epoch=epochs, path=path,
optimizer_state_dict=optimizer_state_dict
, train_losses=train_losses, train_acc=train_acc, test_acc=test_acc,
test_losses=test_losses, lr_data=lr_data, class_correct=class_correct,
class_total=class_total)
def start_training_lr_finder(self, epochs, model, device, test_loader, train_loader, lr, weight_decay, lambda_fn):
'''
:param epochs: epochs to train
:param model: CNN model
:param device: device cuda or not cuda
:param test_loader: test image loader
:param train_loader: train image loader
:param lr: start learning rate value
:param weight_decay: weight decay or l2 regularization value
:param lambda_fn: lambda function be used for scheduler
:return: lr_data, class_correct, class_total
'''
lr_data = []
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
optimizer = self.get_optimizer(model=model, lr=lr, weight_decay=weight_decay)
scheduler = Utils.create_scheduler_lambda_lr(lambda_fn, optimizer)
return self.start_training(epochs, model, device, test_loader, train_loader, optimizer, scheduler, lr_data,
class_correct, class_total, path="savedmodels/lrfinder.pt")
def train_Monocular(self, model, device, train_loader, optimizer, epoch, loss_fn, show_output=False, infer_index=2):
'''
Used for training multiple input image inferencing
:param model: CNN model
:param device: device cuda or not cuda
:param train_loader: train image loader
:param optimizer: optimizer to e used for training
:param epoch: current epoch
:param loss_fn: loss fn to be used while training
:param show_output: if true displays output tensors of actual and predicted value
:param infer_index: index of ground truth in the data
:return: output tensor of loast batch of epoch
'''
model.train()
pbar = tqdm(train_loader)
self.optimizer = optimizer
iou = 0
y_pred = None
total_iou = 0
train_loss = 0
for batch_idx, (data, target) in enumerate(pbar):
# get samples
# data, target = data.to(device), target.to(device)
data[0] = data[0].to(device)
data[1] = data[1].to(device)
data[2] = data[2].to(device)
data[3] = data[3].to(device)
# Init
optimizer.zero_grad()
# In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch
# accumulates the gradients on subsequent backward passes. Because of this, when you start your training
# loop, ideally you should zero out the gradients so that you do the parameter update correctly.
# Predict
y_pred = model(data)
# Calculate loss
loss = loss_fn(y_pred, data[infer_index])
iou = self.calculate_iou(data[infer_index].detach().cpu().numpy(), y_pred.detach().cpu().numpy())
total_iou += iou
train_loss += loss.item()
# Backpropagation
loss.backward()
optimizer.step()
# if batch_idx % 50 == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset), (100. * batch_idx / len(train_loader)),
# loss.item()))
# print('IOU : {}'.format(iou))
if batch_idx % 500 == 0:
if show_output == True:
Utils.show(y_pred.detach().cpu(), nrow=8)
Utils.show(data[infer_index].cpu(), nrow=8)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset), (100. * batch_idx / len(train_loader)),
loss.item()))
print('IOU : {}'.format(iou))
train_loss /= len(train_loader.dataset)
total_iou /= len(train_loader.dataset)
print('Batch IOU = {}'.format(total_iou))
self.train_losses.append(train_loss)
self.train_acc.append(total_iou)
return y_pred
def test_Monocular(self, model, device, test_loader, class_correct, class_total, epoch, lr_data, loss_fn,
show_output=False, infer_index=2):
'''
:param model: CNN model
:param device: device cuda or not cuda
:param test_loader: test image loader
:param class_correct: class_correct to be saved
:param class_total: class_total to be saved
:param epoch: current epoch
:param lr_data: lr data to be saved
:param loss_fn: loss function to be used for inferencing
:param infer_index: index of ground truth in the data
:return: output tensor of loast batch of epoch
:return: test accuracy and output of last batch of test
'''
model.eval()
test_loss = 0
correct = 0
pbar = tqdm(test_loader)
output = None
# dice_coeff_var = 0
total_iou = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(pbar):
data[0] = data[0].to(device)
data[1] = data[1].to(device)
data[2] = data[2].to(device)
data[3] = data[3].to(device)
output = model(data)
loss = loss_fn(output, data[infer_index]).item()
test_loss += loss
# pred = output.argmax(dim=1, keepdim=True)
# correct += pred.eq(data[2].view_as(pred)).sum().item()
iou = self.calculate_iou(data[infer_index].detach().cpu().numpy(), output.detach().cpu().numpy())
total_iou += iou
# dice_coeff_var += dice_coeff(data[1], data[infer_index]).item()
if batch_idx % 500 == 0:
if show_output == True:
Utils.show(output.cpu(), nrow=8)
Utils.show(data[infer_index].cpu(), nrow=8)
print('Test Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(test_loader.dataset), (100. * batch_idx / len(test_loader)),
loss))
print('IOU : {}'.format(iou))
test_loss /= len(test_loader.dataset)
total_iou /= len(test_loader.dataset)
print('Batch IOU = {}'.format(total_iou))
self.test_losses.append(test_loss)
self.test_acc.append(total_iou)
model_save_path = "savedmodels" + os.path.sep + "checkpoint-{}.pt".format(epoch)
Utils.savemodel(model=model, epoch=epoch, path=model_save_path,
optimizer_state_dict=self.optimizer.state_dict()
, train_losses=self.train_losses, test_acc=self.test_acc,
test_losses=self.test_losses, lr_data=lr_data, class_correct=class_correct,
class_total=class_total)
return output, total_iou
def calculate_iou(self, target, prediction, thresh=0.5):
'''
Calculate intersection over union value
:param target: ground truth
:param prediction: output predicted by model
:param thresh: threshold
:return: iou value
'''
intersection = np.logical_and(np.greater(target, thresh), np.greater(prediction, thresh))
union = np.logical_or(np.greater(target, thresh), np.greater(prediction, thresh))
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
def train_DualLoss(self, model_mask, model_depthmask, device, train_loader, optimizer_mask, optimer_depthmask,
epoch, loss_fn_mask,
loss_fn_depthmask,
show_output=False):
'''
train method for monocular depth estimate train 2 models in one epoch
:param model_mask: mask model
:param model_depthmask: depth mask model
:param device: device cuda or not cuda
:param train_loader: data loader for train images
:param optimizer_mask: optimizer for mask model
:param optimer_depthmask: optimizer for dept mask model
:param epoch: current epoch
:param loss_fn_mask: loss for mask model
:param loss_fn_depthmask: loss for depth mask model
:param show_output: if true displays output tensors of actual and predicted value
:return: preidiction of last batch of epoch for both models
'''
model_mask.train()
model_depthmask.train()
pbar = tqdm(train_loader)
self.optimizer_mask = optimizer_mask
self.optimer_depthmask = optimer_depthmask
iou_mask = 0
iou_depthmask = 0
y_pred_mask = None
y_pred_depthmask = None
total_iou_mask = 0
total_iou_depthmask = 0
train_loss_mask = 0
train_loss_depthmask = 0
for batch_idx, (data, target) in enumerate(pbar):
# get samples
# data, target = data.to(device), target.to(device)
data[0] = data[0].to(device)
data[1] = data[1].to(device)
data[2] = data[2].to(device)
data[3] = data[3].to(device)
# Init
optimizer_mask.zero_grad()
optimer_depthmask.zero_grad()
# In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch
# accumulates the gradients on subsequent backward passes. Because of this, when you start your training
# loop, ideally you should zero out the gradients so that you do the parameter update correctly.
# Predict
y_pred_mask = model_mask(data)
y_pred_depthmask = model_depthmask(data)
# Calculate loss
loss_mask = loss_fn_mask(y_pred_mask, data[2])
loss_depthmask = loss_fn_depthmask(y_pred_depthmask, data[3])
iou_mask = self.calculate_iou(data[2].detach().cpu().numpy(), y_pred_mask.detach().cpu().numpy())
iou_depthmask = self.calculate_iou(data[3].detach().cpu().numpy(), y_pred_depthmask.detach().cpu().numpy())
total_iou_mask += iou_mask
total_iou_depthmask += iou_depthmask
train_loss_mask += loss_mask.item()
train_loss_depthmask += loss_depthmask.item()
# Backpropagation
loss_mask.backward()
loss_depthmask.backward()
optimizer_mask.step()
optimer_depthmask.step()
# if batch_idx % 50 == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset), (100. * batch_idx / len(train_loader)),
# loss.item()))
# print('IOU : {}'.format(iou))
if batch_idx % 500 == 0:
if show_output == True:
Utils.show(y_pred_mask.detach().cpu(), nrow=8)
Utils.show(data[2].cpu(), nrow=8)
Utils.show(y_pred_depthmask.detach().cpu(), nrow=8)
Utils.show(data[3].cpu(), nrow=8)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tMask Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset), (100. * batch_idx / len(train_loader)),
loss_mask.item()))
print('Mask IOU : {}'.format(iou_mask))
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tDepth Mask Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset), (100. * batch_idx / len(train_loader)),
loss_depthmask.item()))
print('Depth Mask IOU : {}'.format(iou_depthmask))
train_loss_mask /= len(train_loader.dataset)
train_loss_depthmask /= len(train_loader.dataset)
total_iou_mask /= len(train_loader.dataset)
total_iou_depthmask /= len(train_loader.dataset)
print('Batch Mask IOU = {}'.format(total_iou_mask))
print('Batch DepthMask IOU = {}'.format(total_iou_depthmask))
self.train_losses_mask.append(train_loss_mask)
self.train_acc_mask.append(total_iou_mask)
self.train_losses_depthmask.append(train_loss_depthmask)
self.train_acc_depthmask.append(total_iou_depthmask)
return y_pred_mask, y_pred_depthmask
def test_DualLoss(self, model_mask, model_depthmask, device, test_loader, class_correct, class_total, epoch,
lr_data, loss_fn_mask,
loss_fn_depthmask,
show_output=False, ):
'''
test method for monocular depth estimate train 2 models in one epoch
:param model_mask: mask model
:param model_depthmask: depth mask model
:param device: device cuda or not cuda
:param test_loader: data loader for test images
:param epoch: current epoch
:param loss_fn_mask: loss for mask model
:param loss_fn_depthmask: loss for depth mask model
:param show_output: if true displays output tensors of actual and predicted value
:return: preidiction of last batch of epoch for both models
'''
model_mask.eval()
model_depthmask.eval()
test_loss_mask = 0
test_loss_depthmask = 0
correct = 0
pbar = tqdm(test_loader)
output_mask = None
output_depthmask = None
total_iou_mask = 0
total_iou_depthmask = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(pbar):
data[0] = data[0].to(device)
data[1] = data[1].to(device)
data[2] = data[2].to(device)
data[3] = data[3].to(device)
output_mask = model_mask(data)
output_depthmask = model_depthmask(data)
loss_mask = loss_fn_mask(output_mask, data[2]).item()
loss_depthmask = loss_fn_depthmask(output_depthmask, data[3]).item()
test_loss_mask += loss_mask
test_loss_depthmask += loss_depthmask
# pred_mask = output_mask.argmax(dim=1, keepdim=True)
# pred_depthmask = output_depthmask.argmax(dim=1, keepdim=True)
# correct += pred.eq(data[2].view_as(pred)).sum().item()
iou_mask = self.calculate_iou(data[2].detach().cpu().numpy(), output.detach().cpu().numpy())
iou_depthmask = self.calculate_iou(data[3].detach().cpu().numpy(), output.detach().cpu().numpy())
total_iou_mask += iou_mask
total_iou_depthmask += iou_depthmask
# dice_coeff_var += dice_coeff(data[1], data[infer_index]).item()
if batch_idx % 500 == 0:
if show_output == True:
Utils.show(output_mask.cpu(), nrow=8)
Utils.show(data[2].cpu(), nrow=8)
Utils.show(output_depthmask.cpu(), nrow=8)
Utils.show(data[3].cpu(), nrow=8)
print('Test Epoch: {} [{}/{} ({:.0f}%)]\tMask Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(test_loader.dataset), (100. * batch_idx / len(test_loader)),
loss_mask))
print('Mask IOU : {}'.format(iou_mask))
print('Test Epoch: {} [{}/{} ({:.0f}%)]\tDepth Mask Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(test_loader.dataset), (100. * batch_idx / len(test_loader)),
loss_depthmask))
print('Depth Mask IOU : {}'.format(iou_depthmask))
test_loss_mask /= len(test_loader.dataset)
test_loss_depthmask /= len(test_loader.dataset)
total_iou_mask /= len(test_loader.dataset)
total_iou_depthmask /= len(test_loader.dataset)
print('Mask Batch IOU = {}'.format(total_iou_mask))
print('Depth Mask Batch IOU = {}'.format(total_iou_depthmask))
self.test_losses_mask.append(test_loss_mask)
self.test_acc_mask.append(total_iou_mask)
self.test_losses_depthmask.append(test_loss_depthmask)
self.test_acc_depthmask.append(total_iou_depthmask)
model_save_path_mask = "savedmodels" + os.path.sep + "checkpoint-mask-{}.pt".format(epoch)
model_save_path_depthmask = "savedmodels" + os.path.sep + "checkpoint-depthmask-{}.pt".format(epoch)
Utils.savemodel(model=model_mask, epoch=epoch, path=model_save_path_mask,
train_acc=self.train_acc_mask, optimizer_state_dict=self.optimizer_mask.state_dict()
, train_losses=self.train_losses_mask, test_acc=self.test_acc_mask,
test_losses=self.test_losses_mask, lr_data=lr_data, class_correct=class_correct,
class_total=class_total)
Utils.savemodel(model=model_depthmask, epoch=epoch, path=model_save_path_depthmask,
train_acc=self.train_acc_depthmask, optimizer_state_dict=self.optimizer_depthmask.state_dict()
, train_losses=self.train_losses_depthmask, test_acc=self.test_acc_depthmask,
test_losses=self.test_losses_depthmask, lr_data=lr_data, class_correct=class_correct,
class_total=class_total)
return output_mask, output_depthmask, total_iou_mask, total_iou_depthmask
def gettraintestdatafordualmodels(self):
'''
:return: accuracy and loss for train and test for monocular depth estimation models
'''
return self.train_losses_mask, self.train_acc_mask, self.test_losses_mask, self.test_acc_mask \
, self.train_losses_depthmask, self.train_acc_depthmask, self.test_losses_depthmask, self.test_acc_depthmask
| 43.739075 | 120 | 0.592024 |
2bb62de970c8da45cf6859db80d551ef52927879 | 2,927 | py | Python | chia/util/chia_logging.py | hashgreen/chia-blockchain | b1acb5597ba242649d1dc97de7fd605148e33816 | [
"Apache-2.0"
] | 2 | 2022-03-22T22:00:46.000Z | 2022-03-22T22:42:45.000Z | chia/util/chia_logging.py | zcomputerwiz/experiments-blockchain | 841754b44494451a9e3e537575eeec431fe533d1 | [
"Apache-2.0"
] | 3 | 2022-03-21T22:00:11.000Z | 2022-03-21T22:00:40.000Z | chia/util/chia_logging.py | zcomputerwiz/experiments-blockchain | 841754b44494451a9e3e537575eeec431fe533d1 | [
"Apache-2.0"
] | 1 | 2022-03-20T14:51:39.000Z | 2022-03-20T14:51:39.000Z | import logging
from pathlib import Path
from typing import Dict
import colorlog
from concurrent_log_handler import ConcurrentRotatingFileHandler
from logging.handlers import SysLogHandler
from chia.util.path import mkdir, path_from_root
def initialize_logging(service_name: str, logging_config: Dict, root_path: Path):
log_path = path_from_root(root_path, logging_config.get("log_filename", "log/debug.log"))
log_date_format = "%Y-%m-%dT%H:%M:%S"
mkdir(str(log_path.parent))
file_name_length = 33 - len(service_name)
if logging_config["log_stdout"]:
handler = colorlog.StreamHandler()
handler.setFormatter(
colorlog.ColoredFormatter(
f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: "
f"%(log_color)s%(levelname)-8s%(reset)s %(message)s",
datefmt=log_date_format,
reset=True,
)
)
logger = colorlog.getLogger()
logger.addHandler(handler)
else:
logger = logging.getLogger()
maxrotation = logging_config.get("log_maxfilesrotation", 7)
maxbytesrotation = logging_config.get("log_maxbytesrotation", 50 * 1024 * 1024)
handler = ConcurrentRotatingFileHandler(log_path, "a", maxBytes=maxbytesrotation, backupCount=maxrotation)
handler.setFormatter(
logging.Formatter(
fmt=f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s",
datefmt=log_date_format,
)
)
logger.addHandler(handler)
if logging_config.get("log_syslog", False):
log_syslog_host = logging_config.get("log_syslog_host", "localhost")
log_syslog_port = logging_config.get("log_syslog_port", 514)
log_syslog_handler = SysLogHandler(address=(log_syslog_host, log_syslog_port))
log_syslog_handler.setFormatter(logging.Formatter(fmt=f"{service_name} %(message)s", datefmt=log_date_format))
logger = logging.getLogger()
logger.addHandler(log_syslog_handler)
if "log_level" in logging_config:
if logging_config["log_level"] == "CRITICAL":
logger.setLevel(logging.CRITICAL)
elif logging_config["log_level"] == "ERROR":
logger.setLevel(logging.ERROR)
elif logging_config["log_level"] == "WARNING":
logger.setLevel(logging.WARNING)
elif logging_config["log_level"] == "INFO":
logger.setLevel(logging.INFO)
elif logging_config["log_level"] == "DEBUG":
logger.setLevel(logging.DEBUG)
logging.getLogger("aiosqlite").setLevel(logging.INFO) # Too much logging on debug level
logging.getLogger("websockets").setLevel(logging.INFO) # Too much logging on debug level
else:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.INFO)
| 42.42029 | 119 | 0.663478 |
ebb158b9b4862af2b1ce189444f680bdde9820fe | 2,253 | py | Python | data/cirq_new/cirq_program/startCirq_noisy778.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_noisy778.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_noisy778.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=19
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.Z.on(input_qubit[1])) # number=13
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.Z.on(input_qubit[2])) # number=12
c.append(cirq.X.on(input_qubit[3])) # number=17
c.append(cirq.X.on(input_qubit[3])) # number=18
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy778.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 32.652174 | 77 | 0.686196 |
67f3385b9807806cec80c22d8ef52741fa7707fb | 19,730 | py | Python | models/qsar_fub.py | patlewig/httk | 02f5b370d701cb6c1f4b34e1448110b9c4a7174b | [
"MIT"
] | null | null | null | models/qsar_fub.py | patlewig/httk | 02f5b370d701cb6c1f4b34e1448110b9c4a7174b | [
"MIT"
] | null | null | null | models/qsar_fub.py | patlewig/httk | 02f5b370d701cb6c1f4b34e1448110b9c4a7174b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 24 16:22:39 2016
@author: ppradeep
"""
#%%
###########################################################################
## Import libraries
###########################################################################
import os
clear = lambda: os.system('cls')
clear()
## Import packages
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Classifiers
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import svm
from sklearn.neural_network import MLPRegressor
# Machine learning relevant
from sklearn.feature_selection import SelectPercentile, f_classif
from sklearn.feature_selection import RFE
from sklearn.feature_selection import VarianceThreshold
from sklearn import preprocessing
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import GridSearchCV
import sklearn.metrics as sm
from sklearn.metrics import r2_score
#%%
## User-defined functions
def selectFeatures_VarThresh(X, threshold):
sel = VarianceThreshold(threshold=(threshold * (1 - threshold)))
X_sel = sel.fit_transform(X)
# Convert it into a dataframe
x_tr = pd.DataFrame(X_sel, index = X.index)
x_tr.columns = X.columns[sel.get_support(indices = True)]
return x_tr
## Remove culumns with >80% correlation
def correlation(dataset, threshold):
col_corr = set() # Set of all the names of deleted columns
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] >= threshold:
colname = corr_matrix.columns[i] # getting the name of column
col_corr.add(colname)
if colname in dataset.columns:
del dataset[colname] # deleting the column from the dataset
return dataset
# Normalize descriptors: Transform variables to mean=0, variance=1
def normalizeDescriptors(X):
scaler = preprocessing.StandardScaler().fit(X)
transformed = scaler.transform(X)
x_norm = pd.DataFrame(transformed, index = X.index)
x_norm.columns = X.columns
return(x_norm)
def selectFeatures_perc(X, Y, percentile):
model = SelectPercentile(f_classif, percentile)
model = model.fit(X, Y) #convert datatype for use in the fit function
scores = -np.log10(model.pvalues_)
scores /= scores.max()
X_tr = model.transform(X)
## Convert it into a dataframe
X_tr = pd.DataFrame(X_tr, index = X.index)
X_tr.columns = X.columns[model.get_support(indices=True)]
return X_tr
def selectFeatures_RFE(X, Y, n_features_to_select):
model = LinearRegression()
rfe = RFE(model, n_features_to_select )
rfe = rfe.fit(X, Y) #convert datatype for use in the fit function
X_tr = rfe.transform(X)
## Convert it into a dataframe
X_tr = pd.DataFrame(X_tr, index = X.index)
X_tr.columns = X.columns[rfe.get_support(indices=True)]
return X_tr
def returnparams_knn(n_fold, X, Y):
parameters = {'weights':['uniform', 'distance'], 'n_neighbors':[3,4,5], 'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute']}
clf = KNeighborsRegressor()
grid_search = GridSearchCV(clf, cv = n_fold, param_grid = parameters)
grid_search.fit(X, Y)
knn_params = grid_search.best_params_
return knn_params
def returnparams_lasso(n_fold, X, Y):
parameters = {'alpha':[0.001, 0.05, 0.1, 1], 'tol': [0.01, 0.001], 'random_state':[5]}
clf = Lasso()
grid_search = GridSearchCV(clf, cv = n_fold, param_grid = parameters)
grid_search.fit(X, Y)
lasso_params = grid_search.best_params_
return lasso_params
def returnparams_svm(n_fold, X, Y):
parameters = {'kernel':['linear', 'rbf'], 'C':[0.1, 1, 10], 'gamma':[0.01, 0.1, 1], 'epsilon': [0.1, 1]}
#parameters = {'kernel':['rbf'], 'C':[10], 'gamma':[0.01], 'epsilon': [0.1]}
clf = svm.SVR()
grid_search = GridSearchCV(clf, cv = 5, param_grid = parameters)
grid_search.fit(X, Y)
svm_params = grid_search.best_params_
return svm_params
def returnparams_rf(n_fold, X, Y):
parameters = {'n_estimators': [250, 500, 750, 1000], 'max_features': ['sqrt', 'auto'], 'random_state':[5]}
#parameters = {'n_estimators': [1000], 'max_features': ['auto'], 'random_state':[5]}
clf = RandomForestRegressor()
grid_search = GridSearchCV(clf, cv = n_fold, param_grid = parameters)
grid_search.fit(X, Y)
rf_params = grid_search.best_params_
return rf_params
def returnparams_gbr(n_fold, X, Y):
parameters = {'n_estimators': [250, 500, 750], 'max_depth': [2,3,4], \
'random_state':[5], 'learning_rate': [0.01, 1], 'loss': ['ls', 'lad']}
clf = GradientBoostingRegressor()
grid_search = GridSearchCV(clf, cv = n_fold, param_grid = parameters)
grid_search.fit(X, Y)
gbr_params = grid_search.best_params_
return gbr_params
def returnparams_mlp(n_fold, X, Y):
parameters = {"solver": ['lbfgs', 'sgd', 'adam'], "activation": ['identity', 'logistic', 'tanh', 'relu'],\
'random_state':[5]}
clf = MLPRegressor()
grid_search = GridSearchCV(clf, cv = n_fold, param_grid = parameters)
grid_search.fit(X, Y)
mlp_params = grid_search.best_params_
return mlp_params
def predict_y(clf, X, Y, n_fold):
y = cross_val_predict(clf, X = X, y = Y, cv = n_fold)
return y
def predict_test_y(clf, X, Y, X_test):
clf = clf.fit(X,Y)
y = clf.predict(X_test)
return y
#%%
###########################################################################
## Set working directory
###########################################################################
path = 'C:/Users/Administrator/OneDrive/Profile/Desktop/HTTK/'
#path = 'Z:/Projects/HTTK/'
#%%
###########################################################################
## Read and analyze input data
###########################################################################
data1 = pd.read_csv(path+'data/Prachi-112117.txt', index_col = 'CAS').loc[:,['All.Compound.Names', 'Human.Funbound.plasma', 'Human.Clint']]
data1.rename(columns={'All.Compound.Names' : 'Name'}, inplace = True)
data2 = pd.read_excel(path+'data/AFFINITY_Model_Results-2018-02-27.xlsx', index_col = 'CAS').loc[:,['Name','Fup.Med']]
data2.rename(columns={'Name': 'All.Compound.Names','Fup.Med':'Human.Funbound.plasma'}, inplace = True)
data3 = pd.read_excel(path+'data/CLint-2018-03-01-Results.xlsx', index_col = 'CAS').loc[:,['Name','CLint.1uM.Median']]
data3.rename(columns={'Name': 'All.Compound.Names','CLint.1uM.Median':'Human.Clint'}, inplace = True)
#%%
## HTTK package data
# Set y variable
y_var = 'Human.Funbound.plasma'
# Create a new dataframe with chemical names and y variable value based on raw data
casList = list(set(data1.index.tolist()+data2.index.tolist()+data3.index.tolist()))
data = pd.DataFrame(index = casList, columns = ['Name',y_var])
# Update the training data. If y value is available from later data (data 2 or 3) use that, if not use from old data (data1)
for cas in data.index:
try:
if cas in data1.index:
data.loc[cas,'Name'] = data1.loc[cas,'Name']
data.loc[cas,y_var] = data1.loc[cas,y_var]
if cas in data2.index:
data.loc[cas,'Name'] = data2.loc[cas,'Name']
data.loc[cas,y_var] = data2.loc[cas,y_var]
except:
pass
data.dropna(inplace = True) #Retain data with y variable values
#%%
## Final Fub Data to Model
data.to_csv(path+'data/2-fub_data.csv', index_label = 'CASRN')
#%%
###########################################################################
## Read AR-ER data to keep those chemicals as an external test set
###########################################################################
#AR data
AR_data = pd.read_excel(path+'data/erar/data/Supplemental File 2_ARpathway_Results_ConfScores_CI_2016-08-30.xlsx', index_col='CASRN')
AR_ACC_columns = [col for col in AR_data if col.endswith('ACC')]
AR_data_subset = AR_data[(AR_data['AUC.Agonist']>0.1) | (AR_data['AUC.Antagonist']>0.1)][AR_ACC_columns]
#ER data
ER_data = pd.read_excel(path+'data/erar/data/S2 ER SuperMatrix 2015-03-24.xlsx', index_col='CASRN')
ER_ACC_columns = [col for col in ER_data if col.endswith('ACC')]
ER_data_subset = ER_data[(ER_data['AUC.Agonist']>0.1) | (ER_data['AUC.Antagonist']>0.1)][ER_ACC_columns]
## Combine ER-AR data
ERARdata = pd.concat([AR_data_subset, ER_data_subset], axis = 1)
ERARdata.replace(1000000, np.nan, inplace = True)
## Separate training data and external test data
trainingData = data.loc[data.index.difference(ERARdata.index)]
externaltestData = data.loc[ERARdata.index]
#%%
## Extract y data
Y = trainingData[y_var]
## Transform Y
#Y = Y[Y!= 0]
Y[Y==1.0] = 0.99
Y[Y==0] = 0.005
Y_model = (1-Y)/Y
Y_model = Y_model.apply(lambda x: np.log10(x))
Y_index = Y_model.index
## Histogram of transformed Y
#plt.gcf().subplots_adjust(bottom=0.5)
#plt.figure(figsize=[12,8], dpi = 300)
#Y_model.hist(bins=20, alpha = 0.8, grid=False)
#plt.annotate('N = %d' %len(Y_model), [-4.5,160], size = 28)
#plt.xticks(fontsize = 24)
#plt.yticks(fontsize = 24)
#plt.xlabel('Transformed Fraction Unbound', size = 36, labelpad = 20)
#plt.ylabel('Frequency', size = 36, labelpad = 20)
#plt.savefig(path+'output/%sTrans_Hist.png'%y_var, bbox_inches='tight')
#%%
###########################################################################
## Read fingerprints and perform feature selection
###########################################################################
## Chemotyper FPs: 779 Toxprints
df_chemotypes = pd.read_csv(path+'data/toxprint.txt', sep = ';', index_col='M_NAME') #Rename 'M_NAME' to 'CAS' in data file
## PubChem FPs: 881 bits
df_pubchem = pd.read_csv(path+'data/pubchem.txt', index_col='row ID')
# combine fingerprints
fingerprints = pd.concat([df_pubchem, df_chemotypes], axis=1)
# Remove culumns with >80% correlation
fingerprints = fingerprints.loc[Y_index,:].dropna()
fingerprints = selectFeatures_VarThresh(fingerprints, 0.80)
fingerprints = correlation(fingerprints, 0.80)
## Continuous descriptors
# OPERA
df_opera = pd.read_csv(path+'data/OPERA2.4_Pred_QSARreadyStructures.csv', index_col='MoleculeID')[['LogP_pred','pKa_a_pred', 'pKa_b_pred']] #In MOE: Right click on mol -> Name -> Extract -> new field 'CAS'
df_opera['pKa_pred']=df_opera[['pKa_a_pred','pKa_b_pred']].min(axis=1)
opera = normalizeDescriptors(df_opera)
# PADEL descriptors
df_padel = pd.read_csv(path+'data/padel.txt', index_col='Name')
df_padel = df_padel.loc[Y_index,:].dropna(axis=0, how='any') #drop columns that do not have PadDEL descriptors calculated
padel = normalizeDescriptors(df_padel)
# CDK descriptors
df_cdk = pd.read_csv(path+'data/cdk.txt', index_col='row ID') #Add CAS column to file
df_cdk = df_cdk.loc[Y_index,:].dropna(axis=0, how='any') #drop columns that do not have Y data or could not be calculated
cdk = normalizeDescriptors(df_cdk)
# Combine descriptors
descriptors = pd.concat([padel, cdk], axis=1).dropna()
# Drop correlated descriptors
descriptors = correlation(descriptors, 0.80)
# Select 10 descriptors
descriptors = selectFeatures_RFE(descriptors, Y.loc[descriptors.index], 10)
#descriptors = selectFeatures_perc(descriptors, Y_model.loc[descriptors.index], 1)
## Output file to capture the descriptors in the model for external predictions
features = pd.DataFrame({'Fingerprints': [fingerprints.columns.values.tolist()], 'opera': [opera.columns.values.tolist()], 'Padel+CDK': [descriptors.columns.values.tolist()]})
features.to_csv(path+'output/%s_Features.csv' %y_var)
#%%
###########################################################################
## Combine all the descriptors
###########################################################################
#1
#X_model = pd.concat([fingerprints], axis=1).dropna() #moe, descriptors, dft
#2
#X_model = pd.concat([fingerprints, opera[['LogP_pred', 'pKa_pred']]], axis=1).dropna() #moe, descriptors, dft
#3
X_model = pd.concat([fingerprints, opera[['LogP_pred', 'pKa_pred']], descriptors], axis=1).dropna() #moe, descriptors, dft
###########################################################################
## Select the training and validation set
###########################################################################
index_random = X_model.index.values.tolist()
np.random.RandomState(40).shuffle(index_random) #set the seed to 40 to replicate results
n_idx = int(80*len(X_model)/100)
Y_train, Y_test = Y_model.ix[index_random[:n_idx]], Y_model.ix[index_random[n_idx:]]
X_train, X_test = X_model.ix[index_random[:n_idx]], X_model.ix[index_random[n_idx:]]
## Histogram of FINAL training and test data superimposed on each other
sigma_train = np.std(Y_train)
sigma_test = np.std(Y_test)
plt.figure(figsize=(8, 6), dpi = 200)
Y_train.hist(label = 'Training (n = %d | $\sigma$ = %0.2f)' %(len(Y_train), sigma_train), alpha = 0.75, color = 'r')
Y_test.hist(label = 'Test (n = %d| $\sigma$ = %0.2f)' %(len(Y_test), sigma_test), alpha = 0.75, color = 'g')
plt.xlabel('POD$_{tr}$', size = 24, labelpad = 10)
plt.ylabel('Frequency', size = 24, labelpad = 10)
plt.xticks(fontsize = 24)#, rotation = 90)
plt.yticks(fontsize = 24)
plt.legend(fontsize = 14, loc='upper left')
plt.savefig(path+'/output/%s_TrainTestDist3.png' %y_var, bbox_inches='tight')
plt.show()
#%%
## Evaluate the hyper-parameters of each model
n_fold = 5
lasso_params = returnparams_lasso(n_fold, X_train, Y_train)
svm_params = returnparams_svm(n_fold, X_train, Y_train)
rf_params = returnparams_rf(n_fold, X_train, Y_train)
mlp_params = returnparams_mlp(n_fold, X_train, Y_train)
classifiers = [Lasso(**lasso_params),\
svm.SVR(**svm_params),\
RandomForestRegressor(**rf_params),\
MLPRegressor(**mlp_params)
]
## Make predictions
Y_predicted = pd.DataFrame(index = Y_train.index, columns = [str(clf).split('(')[0] for clf in classifiers])
Y_test_predicted = pd.DataFrame(index = Y_test.index, columns = [str(clf).split('(')[0] for clf in classifiers])
for clf in classifiers:
# 5-fold internal cross-validation
predicted = predict_y(clf, X_train, Y_train, n_fold)
Y_predicted.loc[:,str(clf).split('(')[0]] = predicted
# Fit model on entire training data and make predictions for test set
predicted = predict_test_y(clf, X_train, Y_train, X_test)
Y_test_predicted.loc[:,str(clf).split('(')[0]] = predicted
Y_predicted['Consensus (All)'] = Y_predicted.mean(axis = 1)
Y_test_predicted['Consensus (All)'] = Y_test_predicted.mean(axis = 1)
Y_predicted['Consensus (SVM,RF)'] = Y_predicted[['SVR', 'RandomForestRegressor']].mean(axis = 1)
Y_test_predicted['Consensus (SVM,RF)'] = Y_test_predicted[['SVR', 'RandomForestRegressor']].mean(axis = 1)
Y_predicted['Consensus (Lasso,RF)'] = Y_predicted[['Lasso', 'RandomForestRegressor']].mean(axis = 1)
Y_test_predicted['Consensus (Lasso,RF)'] = Y_test_predicted[['Lasso', 'RandomForestRegressor']].mean(axis = 1)
Y_predicted['Consensus (MLP,RF)'] = Y_predicted[['MLPRegressor', 'RandomForestRegressor']].mean(axis = 1)
Y_test_predicted['Consensus (MLP,RF)'] = Y_test_predicted[['MLPRegressor', 'RandomForestRegressor']].mean(axis = 1)
columns = ['MAE_int','RMSE_int', 'RMSE/sigma_int','R2_int', 'MAE_ext','RMSE_ext', 'RMSE/sigma_ext','R2_ext', 'params', 'coverage']
metrics = pd.DataFrame(index = Y_predicted.columns, columns = columns)
for key in Y_predicted:
# save params
if 'Lasso' in key:
metrics.loc[key, 'params'] = [lasso_params]
if 'SVR' in key:
metrics.loc[key, 'params'] = [svm_params]
if 'Random' in key:
metrics.loc[key, 'params'] = [rf_params]
if 'MLP' in key:
metrics.loc[key, 'params'] = [mlp_params]
# coverage
metrics.loc[key, 'coverage'] = [len(Y_predicted), len(Y_test_predicted)] #training, test
# internal
metrics.loc[key, 'MAE_int'] = round(sm.mean_absolute_error(Y_train, Y_predicted[key]),2)
metrics.loc[key, 'RMSE_int'] = round(np.sqrt(sm.mean_squared_error(Y_train, Y_predicted[key])),2)
metrics.loc[key, 'RMSE/sigma_int'] = round(np.sqrt(sm.mean_squared_error(Y_train, Y_predicted[key]))/np.std(Y_train),2)
metrics.loc[key, 'R2_int'] = round(r2_score(Y_train, Y_predicted[key]),2)
# external
metrics.loc[key, 'MAE_ext'] = round(sm.mean_absolute_error(Y_test, Y_test_predicted[key]),2)
metrics.loc[key, 'RMSE_ext'] = round(np.sqrt(sm.mean_squared_error(Y_test, Y_test_predicted[key])),2)
metrics.loc[key, 'RMSE/sigma_ext'] = round(np.sqrt(sm.mean_squared_error(Y_test, Y_test_predicted[key]))/np.std(Y_test),2)
metrics.loc[key, 'R2_ext'] = round(r2_score(Y_test, Y_test_predicted[key]),2)
metrics.to_csv(path+'output/%s_Metrics3.csv' %y_var)
#%%
## Plot true versus predicted for the winning consensus models X selection #2
# Internal
plt.figure(figsize=[10,8], dpi = 300) #figsize=[12,8], dpi = 300
plt.plot(Y_train, Y_train, 'k', label = '')
# training set
plt.scatter(Y_train, Y_predicted['Consensus (SVM,RF)'], alpha = 0.3, color = 'r', s = 25, label = None)
plt.plot([Y_train.min(), Y_train.max()-sigma_train],[Y_train.min()+sigma_train, Y_train.max()],'r', label = '$\pm1 \sigma$(training) error interval', linestyle = '--')
plt.plot([Y_train.min(),Y_train.max()],[Y_train.min()-sigma_train, Y_train.max()-sigma_train],'r', linestyle = '--', label = None)
# PUT ERROR bar = 0.4 unit on Y_train['32385-11-8']
plt.errorbar(x = Y_train.ix['32385-11-8'], xerr = 0.4, y = Y_predicted['Consensus (SVM,RF)'].ix['32385-11-8']\
,fmt = 'o', ecolor = 'r', color = 'r', markersize='8', alpha=1, label = None)#, label = 'Observed Error')
# test set
plt.scatter(Y_test, Y_test_predicted['Consensus (SVM,RF)'], marker = 's', alpha = 0.3, color = 'b', s = 25, label = None)
plt.plot([Y_train.min(), Y_train.max()-sigma_test],[Y_train.min()+sigma_test, Y_train.max()],'b', label = '$\pm1 \sigma$(test) error interval', linestyle = ':')
plt.plot([Y_train.min(),Y_train.max()],[Y_train.min()-sigma_test, Y_train.max()-sigma_test],'b', linestyle = ':', label = None)
plt.xlim([Y_train.min(), Y_train.max()])
plt.ylim([Y_train.min(), Y_train.max()])
#training
plt.annotate('$RMSE (Training):$ %.2f' %metrics.loc['Consensus (SVM,RF)', 'RMSE_int'], [Y_train.min()+0.1, Y_train.max()-0.5], fontsize = 22)
plt.annotate('$R^{2} (Training):$ %.2f' %metrics.loc['Consensus (SVM,RF)', 'R2_int'], [Y_train.min()+0.1, Y_train.max()-1], fontsize = 22)
#test
plt.annotate('$RMSE (Test):$ %.2f' %metrics.loc['Consensus (SVM,RF)', 'RMSE_ext'], [Y_train.min()+0.1, Y_train.max()-1.75], fontsize = 22)
plt.annotate('$R^{2} (Test):$ %.2f' %metrics.loc['Consensus (SVM,RF)', 'R2_ext'], [Y_train.min()+0.1, Y_train.max()-2.25], fontsize = 22)
plt.legend(loc='lower right', numpoints = 2, scatterpoints = 1, fontsize = 15)
plt.xlabel('Observed', size = 36, labelpad = 20)
plt.ylabel('Predicted', size = 36, labelpad = 20)
plt.xticks(fontsize = 24)
plt.yticks(fontsize = 24)
plt.savefig(path+'/output/RF-SVM_TvsP_%s2.jpg' %(y_var), bbox_inches='tight')
#%%
| 45.356322 | 206 | 0.640193 |
ee393bb19e84042dde473932678b17f7b78e0c11 | 1,881 | py | Python | src/adafruit_blinka/microcontroller/pico_u2if/i2c.py | caternuson/Adafruit_Blinka | 120c7a7f4c7559ede6a7d098e4800663381fc93d | [
"MIT"
] | 1 | 2020-11-28T18:22:32.000Z | 2020-11-28T18:22:32.000Z | src/adafruit_blinka/microcontroller/pico_u2if/i2c.py | caternuson/Adafruit_Blinka | 120c7a7f4c7559ede6a7d098e4800663381fc93d | [
"MIT"
] | null | null | null | src/adafruit_blinka/microcontroller/pico_u2if/i2c.py | caternuson/Adafruit_Blinka | 120c7a7f4c7559ede6a7d098e4800663381fc93d | [
"MIT"
] | null | null | null | """I2C Class for Pico u2if"""
from .pico_u2if import pico_u2if
class I2C:
"""Custom I2C Class for Pico u2if"""
def __init__(self, scl, sda, *, frequency=100000):
index = None
if scl.id == 5 and sda.id == 4:
index = 0
if scl.id == 15 and sda.id == 14:
index = 1
if index is None:
raise ValueError("I2C not found on specified pins.")
self._index = index
pico_u2if.i2c_set_port(self._index)
pico_u2if.i2c_configure(frequency)
def scan(self):
"""Perform an I2C Device Scan"""
pico_u2if.i2c_set_port(self._index)
return pico_u2if.i2c_scan()
# pylint: disable=unused-argument
def writeto(self, address, buffer, *, start=0, end=None, stop=True):
"""Write data from the buffer to an address"""
pico_u2if.i2c_set_port(self._index)
pico_u2if.i2c_writeto(address, buffer, start=start, end=end)
def readfrom_into(self, address, buffer, *, start=0, end=None, stop=True):
"""Read data from an address and into the buffer"""
pico_u2if.i2c_set_port(self._index)
pico_u2if.i2c_readfrom_into(address, buffer, start=start, end=end)
def writeto_then_readfrom(
self,
address,
buffer_out,
buffer_in,
*,
out_start=0,
out_end=None,
in_start=0,
in_end=None,
stop=False
):
"""Write data from buffer_out to an address and then
read data from an address and into buffer_in
"""
pico_u2if.i2c_set_port(self._index)
pico_u2if.i2c_writeto_then_readfrom(
address,
buffer_out,
buffer_in,
out_start=out_start,
out_end=out_end,
in_start=in_start,
in_end=in_end,
)
# pylint: enable=unused-argument
| 29.857143 | 78 | 0.592238 |
f471689396ece0bca8a5485020f0f1accadff522 | 1,059 | py | Python | python3/learn-python/take_screenshot.py | Nahid-Hassan/code-snippets | 24bd4b81564887822a0801a696001fcbeb6a7a75 | [
"MIT"
] | 2 | 2020-09-29T04:09:41.000Z | 2020-10-18T13:33:36.000Z | python3/learn-python/take_screenshot.py | Nahid-Hassan/code-snippets | 24bd4b81564887822a0801a696001fcbeb6a7a75 | [
"MIT"
] | null | null | null | python3/learn-python/take_screenshot.py | Nahid-Hassan/code-snippets | 24bd4b81564887822a0801a696001fcbeb6a7a75 | [
"MIT"
] | 1 | 2021-12-26T04:55:55.000Z | 2021-12-26T04:55:55.000Z | from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
driver.get('https://www.coursera.org/learn/understanding-visualization-data/lecture/KeSCz/what-is-statistics')
sleep(1)
driver.get_screenshot_as_file("screenshot.png")
'''
#coding=utf-8
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = webdriver.ChromeOptions()
options.headless = True
driver = webdriver.Chrome(options=options)
URL = 'https://pythonbasics.org'
driver.get(URL)
S = lambda X: driver.execute_script('return document.body.parentNode.scroll'+X)
driver.set_window_size(S('Width'),S('Height')) # May need manual adjustment
driver.find_element_by_tag_name('body').screenshot('web_screenshot.png')
driver.quit()
'''
| 34.16129 | 187 | 0.574127 |
8a7a2e093a97f1d623d6e259d86853a2df5fab9e | 548 | py | Python | output/models/nist_data/list_pkg/date/schema_instance/nistschema_sv_iv_list_date_pattern_2_xsd/nistschema_sv_iv_list_date_pattern_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/date/schema_instance/nistschema_sv_iv_list_date_pattern_2_xsd/nistschema_sv_iv_list_date_pattern_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/date/schema_instance/nistschema_sv_iv_list_date_pattern_2_xsd/nistschema_sv_iv_list_date_pattern_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-date-pattern-2-NS"
@dataclass
class NistschemaSvIvListDatePattern2:
class Meta:
name = "NISTSchema-SV-IV-list-date-pattern-2"
namespace = "NISTSchema-SV-IV-list-date-pattern-2-NS"
value: List[str] = field(
default_factory=list,
metadata={
"pattern": r"\d\d56-\d1-0\d \d\d57-0\d-2\d \d\d49-1\d-\d4 18\d\d-\d3-2\d 19\d\d-\d7-\d7 19\d\d-1\d-0\d",
"tokens": True,
}
)
| 27.4 | 116 | 0.616788 |
8d473b19dd9dc30625aaeecec9aa08300173f99b | 10,577 | py | Python | beyond/frames/frames.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | beyond/frames/frames.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | beyond/frames/frames.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module define the Frames available for computation and their relations
to each other.
The relations may be circular, thanks to the use of the Node class.
.. code-block:: text
,---. ,-------. ,----.
|G50|---bias---|EME2000|..bias..|GCRF|
`---' `-------' `----'
| |
Precession |
| |
,---. Precession
|MOD| +
`---' Nutation
| + model corrections
Nutation |
+ model corrections |
| |
,----. ,---. ,----.
|TEME|--Equinox--|TOD| |CIRF|
`----' `---' `----'
| |
Sideral time Sideral time
| |
,---. ,----.
|PEF| |TIRF|
`---' `----'
\\ /
IAU 1980 IAU 2010
Earth Orientation Earth Orientation
Parameters Parameters
\\ /
,-----. ,----.
|WGS84|--identity--|ITRF|
`-----' `----'
"""
import sys
import logging
import numpy as np
from ..errors import UnknownFrameError
from ..constants import Earth
from ..utils.matrix import rot3
from ..utils.node import Node
from . import iau1980, iau2010
from .local import to_qsw, to_tnw
CIO = ["ITRF", "TIRF", "CIRF", "GCRF"]
IAU1980 = ["TOD", "MOD"]
OTHER = ["EME2000", "TEME", "WGS84", "PEF", "G50"]
__all__ = CIO + IAU1980 + OTHER + ["get_frame"]
log = logging.getLogger(__name__)
class FrameCache(dict):
"""This class is here to emulate module behavior for dynamically
created frames.
It's useful when pickle is involved (e.g. multiprocessing)
"""
def __getattr__(self, name):
if name not in self:
raise AttributeError(name)
return self[name]
dynamic = FrameCache()
"""This dictionary contains all the frames. Those defined here, and those created on the fly
by the developer.
"""
sys.modules[__name__ + ".dynamic"] = dynamic
def get_frame(frame):
"""Frame factory
Args:
frame (str): name of the desired frame
Return:
~beyond.frames.frames.Frame
"""
if frame not in dynamic.keys():
raise UnknownFrameError(frame)
return dynamic[frame]
class _MetaFrame(type, Node):
"""This MetaClass is here to join the behaviors of ``type`` and ``Node``
"""
def __init__(cls, name, bases, dct):
bypass = dct.pop("bypass", False)
super(_MetaFrame, cls).__init__(name, bases, dct)
super(type, cls).__init__(name)
if not bypass and cls.__name__ in dynamic:
log.warning(
"A frame with the name '%s' is already registered. Overriding"
% cls.__name__
)
cls.__module__ = __name__ + ".dynamic"
# Making the frame available to the get_frame function
dynamic[cls.__name__] = cls
def __repr__(cls): # pragma: no cover
return "<Frame '{}'>".format(cls.name)
class Frame(metaclass=_MetaFrame):
"""Frame base class
"""
center = Earth
def __init__(self, date, orbit):
"""
Args:
date (~beyond.utils.Date)
orbit (numpy.ndarray)
"""
self.date = date
self.orbit = orbit
def __str__(self): # pragma: no cover
return self.name
def __repr__(self): # pragma: no cover
return "<Frame obj '{}'>".format(self.__class__.__name__)
@classmethod
def _convert(cls, x=None, y=None):
m = np.identity(6)
if x is not None:
m[:3, :3] = x
if y is not None:
m[3:, 3:] = y
return m
def transform(self, new_frame):
"""Change the frame of the orbit
Args:
new_frame (str)
Return:
numpy.ndarray
"""
steps = self.__class__.steps(new_frame)
orbit = self.orbit
for _from, _to in steps:
from_obj = _from(self.date, orbit)
direct = "_to_%s" % _to
if hasattr(from_obj, direct):
rotation, offset = getattr(from_obj, direct)()
else:
to_obj = _to(self.date, orbit)
inverse = "_to_%s" % _from
if hasattr(to_obj, inverse):
rotation, offset = getattr(to_obj, inverse)()
rotation = rotation.T
offset = -offset
else:
raise NotImplementedError(
"Unknown transformation {} to {}".format(_from, _to)
)
if getattr(_from, "_rotation_before_translation", False):
# In case of topocentric frame, the rotation is done before the translation
orbit = offset + (rotation @ orbit)
else:
orbit = rotation @ (offset + orbit)
return orbit
class TEME(Frame):
"""True Equator Mean Equinox"""
orientation = "TEME"
def _to_TOD(self):
equin = iau1980.equinox(
self.date, eop_correction=False, terms=4, kinematic=False
)
m = rot3(-np.deg2rad(equin))
return self._convert(m, m), np.zeros(6)
class GTOD(Frame):
"""Greenwich True Of Date"""
orientation = "GTOD"
class WGS84(Frame):
"""World Geodetic System 1984"""
orientation = "WGS84"
def _to_ITRF(self):
return np.identity(6), np.zeros(6)
class PEF(Frame):
"""Pseudo Earth Fixed"""
orientation = "PEF"
def _to_TOD(self):
m = iau1980.sideral(self.date, model="apparent", eop_correction=False)
offset = np.zeros(6)
offset[3:] = np.cross(iau1980.rate(self.date), self.orbit[:3])
return self._convert(m, m), offset
class TOD(Frame):
"""True (Equator) Of Date"""
orientation = "TOD"
def _to_MOD(self):
m = iau1980.nutation(self.date, eop_correction=False)
return self._convert(m, m), np.zeros(6)
class MOD(Frame):
"""Mean (Equator) Of Date"""
orientation = "MOD"
def _to_EME2000(self):
m = iau1980.precesion(self.date)
return self._convert(m, m), np.zeros(6)
class EME2000(Frame):
"""EME2000 inertial frame (also known as J2000)"""
orientation = "EME2000"
class ITRF(Frame):
"""International Terrestrial Reference Frame"""
orientation = "ITRF"
def _to_PEF(self):
m = iau1980.earth_orientation(self.date)
return self._convert(m, m), np.zeros(6)
def _to_TIRF(self):
m = iau2010.earth_orientation(self.date)
return self._convert(m, m), np.zeros(6)
class TIRF(Frame):
"""Terrestrial Intermediate Reference Frame"""
orientation = "TIRF"
def _to_CIRF(self):
m = iau2010.sideral(self.date)
offset = np.zeros(6)
offset[3:] = np.cross(iau2010.rate(self.date), self.orbit[:3])
return self._convert(m, m), offset
class CIRF(Frame):
"""Celestial Intermediate Reference Frame"""
orientation = "CIRF"
def _to_GCRF(self):
m = iau2010.precesion_nutation(self.date)
return self._convert(m, m), np.zeros(6)
class GCRF(Frame):
"""Geocentric Celestial Reference Frame"""
orientation = "GCRF"
class G50(Frame):
"""Gamma50 Reference Frame
"""
orientation = "G50"
def _to_EME2000(self):
m = [
[0.9999256794956877, -0.0111814832204662, -0.0048590038153592],
[0.0111814832391717, 0.9999374848933135, -0.0000271625947142],
[0.0048590037723143, -0.0000271702937440, 0.9999881946023742],
]
return self._convert(m, m), np.zeros(6)
def orbit2frame(name, ref_orbit, orientation=None, center=None, bypass=False):
"""Create a frame based on a Orbit or Ephem object.
Args:
name (str): Name to give the created frame
ref_orbit (Orbit or Ephem):
orientation (str): Orientation of the created frame
bypass (bool): By-pass the warning when creating a frame with an already
taken name
Return:
Frame:
If orientation is ``None``, the new frame will keep the orientation of the
reference frame of the Orbit and move along with the orbit.
Other acceptable values are ``"QSW"`` (and its aliases "LVLH" and "RSW") or ``"TNW"``.
See :py:func:`~beyond.frames.local.to_qsw` and :py:func:`~beyond.frames.local.to_tnw`
for informations regarding these orientations.
"""
if orientation is None:
orientation = ref_orbit.frame.orientation
elif orientation.upper() in ("RSW", "LVLH"):
orientation = "QSW"
elif orientation.upper() not in ("QSW", "TNW"):
raise ValueError("Unknown orientation '%s'" % orientation)
if center is None:
center = Earth
def _to_parent_frame(self):
"""Conversion from orbit frame to parent frame
"""
offset = ref_orbit.propagate(self.date).base.copy()
if orientation.upper() in ("QSW", "TNW"):
# propagation of the reference orbit to the date of the
# converted orbit
orb = ref_orbit.propagate(self.date)
m = to_qsw(orb) if orientation.upper() == "QSW" else to_tnw(orb)
# we transpose the matrix because it represents the conversion
# from inertial to local frame, and we'd like the other way around
rotation = Frame._convert(m, m).T
else:
# The orientation is the same as the parent reference frame
rotation = np.identity(6)
return rotation, offset
# define the name of the method of conversion
mtd = "_to_%s" % ref_orbit.frame.__name__
# dictionary which defines attributes of the created class
dct = {
mtd: _to_parent_frame,
"orientation": orientation,
"center": center,
"bypass": bypass,
}
# Creation of the class
cls = _MetaFrame(name, (Frame,), dct)
# Link to the parent
cls + ref_orbit.frame
return cls
WGS84 + ITRF + PEF + TOD + MOD + EME2000
TOD + TEME
# EME2000 + GCRF
ITRF + TIRF + CIRF + GCRF
EME2000 + G50
| 26.575377 | 92 | 0.549967 |
4505ce2c2814e4238ec64be05abad48ce9b3561c | 1,563 | py | Python | tests/links_tests/model_tests/fpn_tests/test_fpn.py | souravsingh/chainercv | 8f76510472bc95018c183e72f37bc6c34a89969c | [
"MIT"
] | null | null | null | tests/links_tests/model_tests/fpn_tests/test_fpn.py | souravsingh/chainercv | 8f76510472bc95018c183e72f37bc6c34a89969c | [
"MIT"
] | null | null | null | tests/links_tests/model_tests/fpn_tests/test_fpn.py | souravsingh/chainercv | 8f76510472bc95018c183e72f37bc6c34a89969c | [
"MIT"
] | 1 | 2019-03-21T04:29:59.000Z | 2019-03-21T04:29:59.000Z | from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import FPN
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class DummyExtractor(chainer.Link):
mean = _random_array(np, (3, 1, 1))
def __call__(self, x):
n, _, h, w = x.shape
return [
chainer.Variable(_random_array(self.xp, (n, 16, h // 2, w // 2))),
chainer.Variable(_random_array(self.xp, (n, 32, h // 4, w // 4))),
chainer.Variable(_random_array(self.xp, (n, 64, h // 8, w // 8))),
]
class TestFPN(unittest.TestCase):
def setUp(self):
self.link = FPN(
base=DummyExtractor(),
n_base_output=3,
scales=(1 / 2, 1 / 4, 1 / 8))
def test_mean(self):
np.testing.assert_equal(self.link.mean, self.link.base.mean)
def _check_call(self):
x = _random_array(self.link.xp, (2, 3, 32, 32))
hs = self.link(x)
self.assertEqual(len(hs), 3)
for l in range(len(hs)):
self.assertIsInstance(hs[l], chainer.Variable)
self.assertIsInstance(hs[l].array, self.link.xp.ndarray)
self.assertEqual(hs[l].shape, (2, 256, 16 >> l, 16 >> l))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
testing.run_module(__name__, __file__)
| 25.622951 | 78 | 0.598848 |
b5f27de61dcaa2e20d5708c51eef860e3f0df564 | 12,466 | py | Python | virtual/lib/python2.7/site-packages/ww/tools/iterables.py | DK-denno/awwards | af20f2da2bccf066a4da3fb4aa67ad839bae2af9 | [
"MIT"
] | 15 | 2016-10-15T10:15:08.000Z | 2021-04-06T08:31:02.000Z | virtual/lib/python2.7/site-packages/ww/tools/iterables.py | DK-denno/awwards | af20f2da2bccf066a4da3fb4aa67ad839bae2af9 | [
"MIT"
] | 7 | 2016-10-14T08:53:29.000Z | 2016-11-09T23:43:31.000Z | virtual/lib/python2.7/site-packages/ww/tools/iterables.py | DK-denno/awwards | af20f2da2bccf066a4da3fb4aa67ad839bae2af9 | [
"MIT"
] | 3 | 2016-10-13T11:44:46.000Z | 2016-10-14T08:58:03.000Z | # coding: utf-8
"""
:doc:`g() </iterable_wrapper>` is very convenient, but it's only a
thin wrapper on top of the tools from this module.
So if you want to apply some of the goodies from it without having to
turn your iterables into IterableWrapper objects, you can use the functions
from this module directly.
Example:
>>> from ww.tools.iterables import chunks # same as g().chunks()
>>> list(chunks(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)]
You'll find bellow the detailed documentation for each functions. Remember
they all take an iterable as input, and most often ouput a generator.
Go have a look, there is some great stuff here!
"""
from __future__ import division, absolute_import, print_function
import itertools
from future.utils import raise_from
import ww
from ww.types import Union, Callable, Iterable, Any, T # noqa
from ww.utils import renamed_argument
from collections import deque
# TODO: implement all https://docs.python.org/3/library/itertools.html
# which means backports and receipes
# TODO: cycle, but accept a max repeat
# TODO: filter() but:
# if an iterable is first element, lambda x: x in first_element
# if an iterable is a non callable scalare,
# lambda x: x == first_element
# a 3rd param to take an Exception or a list of exception to ignore so you can
# filter out stuff raisin exceptions
# TODO: map, but a 3rd param to take an Exception or a list of exception
# to ignore so you can filter out stuff raisin exceptions
def starts_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Start yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, start yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(starts_when(range(10), lambda x: x > 5))
[6, 7, 8, 9]
>>> list(starts_when(range(10), 7))
[7, 8, 9]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.dropwhile(lambda x: not condition(x), iterable)
def stops_when(iterable, condition):
# type: (Iterable, Union[Callable, Any]) -> Iterable
"""Stop yielding items when a condition arise.
Args:
iterable: the iterable to filter.
condition: if the callable returns True once, stop yielding
items. If it's not a callable, it will be converted
to one as `lambda condition: condition == item`.
Example:
>>> list(stops_when(range(10), lambda x: x > 5))
[0, 1, 2, 3, 4, 5]
>>> list(stops_when(range(10), 7))
[0, 1, 2, 3, 4, 5, 6]
"""
if not callable(condition):
cond_value = condition
def condition(x):
return x == cond_value
return itertools.takewhile(lambda x: not condition(x), iterable)
def skip_duplicates(iterable, key=None, fingerprints=()):
# type: (Iterable, Callable, Any) -> Iterable
"""
Returns a generator that will yield all objects from iterable, skipping
duplicates.
Duplicates are identified using the `key` function to calculate a
unique fingerprint. This does not use natural equality, but the
result use a set() to remove duplicates, so defining __eq__
on your objects would have no effect.
By default the fingerprint is the object itself,
which ensure the functions works as-is with an iterable of primitives
such as int, str or tuple.
:Example:
>>> list(skip_duplicates([1, 2, 3, 4, 4, 2, 1, 3 , 4]))
[1, 2, 3, 4]
The return value of `key` MUST be hashable, which means for
non hashable objects such as dict, set or list, you need to specify
a a function that returns a hashable fingerprint.
:Example:
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: tuple(x)))
[[], [1, 2]]
>>> list(skip_duplicates(([], [], (), [1, 2], (1, 2)),
... lambda x: (type(x), tuple(x))))
[[], (), [1, 2], (1, 2)]
For more complex types, such as custom classes, the default behavior
is to remove nothing. You MUST provide a `key` function is you wish
to filter those.
:Example:
>>> class Test(object):
... def __init__(self, foo='bar'):
... self.foo = foo
... def __repr__(self):
... return "Test('%s')" % self.foo
...
>>> list(skip_duplicates([Test(), Test(), Test('other')]))
[Test('bar'), Test('bar'), Test('other')]
>>> list(skip_duplicates([Test(), Test(), Test('other')],\
lambda x: x.foo))
[Test('bar'), Test('other')]
"""
fingerprints = fingerprints or set()
fingerprint = None # needed on type errors unrelated to hashing
try:
# duplicate some code to gain perf in the most common case
if key is None:
for x in iterable:
if x not in fingerprints:
yield x
fingerprints.add(x)
else:
for x in iterable:
fingerprint = key(x)
if fingerprint not in fingerprints:
yield x
fingerprints.add(fingerprint)
except TypeError:
try:
hash(fingerprint)
except TypeError:
raise TypeError(
"The 'key' function returned a non hashable object of type "
"'%s' when receiving '%s'. Make sure this function always "
"returns a hashable object. Hint: immutable primitives like"
"int, str or tuple, are hashable while dict, set and list are "
"not." % (type(fingerprint), x))
else:
raise
# TODO: test that on big iterators to check for recursion limit
def chunks(iterable, chunksize, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields items from an iterator in iterable chunks.
"""
it = iter(iterable)
while True:
yield cast(itertools.chain([next(it)],
itertools.islice(it, chunksize - 1)))
def window(iterable, size=2, cast=tuple):
# type: (Iterable, int, Callable) -> Iterable
"""
Yields iterms by bunch of a given size, but rolling only one item
in and out at a time when iterating.
>>> list(window([1, 2, 3]))
[(1, 2), (2, 3)]
By default, this will cast the window to a tuple before yielding it;
however, any function that will accept an iterable as its argument
is a valid target.
If you pass None as a cast value, the deque will be returned as-is,
which is more performant. However, since only one deque is used
for the entire iteration, you'll get the same reference everytime,
only the deque will contains different items. The result might not
be what you want :
>>> list(window([1, 2, 3], cast=None))
[deque([2, 3], maxlen=2), deque([2, 3], maxlen=2)]
"""
iterable = iter(iterable)
d = deque(itertools.islice(iterable, size), size)
if cast:
yield cast(d)
for x in iterable:
d.append(x)
yield cast(d)
else:
yield d
for x in iterable:
d.append(x)
yield d
def at_index(iterable, index):
# type: (Iterable[T], int) -> T
"""" Return the item at the index of this iterable or raises IndexError.
WARNING: this will consume generators.
Negative indices are allowed but be aware they will cause n items to
be held in memory, where n = abs(index)
"""
try:
if index < 0:
return deque(iterable, maxlen=abs(index)).popleft()
return next(itertools.islice(iterable, index, index + 1))
except (StopIteration, IndexError) as e:
raise_from(IndexError('Index "%d" out of range' % index), e)
# TODO: accept a default value if not value is found
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e)
def iterslice(iterable, start=0, stop=None, step=1):
# type: (Iterable[T], int, int, int) -> Iterable[T]
""" Like itertools.islice, but accept int and callables.
If `start` is a callable, start the slice after the first time
start(item) == True.
If `stop` is a callable, stop the slice after the first time
stop(item) == True.
"""
if step < 0:
raise ValueError("The step can not be negative: '%s' given" % step)
if not isinstance(start, int):
# [Callable:Callable]
if not isinstance(stop, int) and stop:
return stops_when(starts_when(iterable, start), stop)
# [Callable:int]
return starts_when(itertools.islice(iterable, None, stop, step), start)
# [int:Callable]
if not isinstance(stop, int) and stop:
return stops_when(itertools.islice(iterable, start, None, step), stop)
# [int:int]
return itertools.islice(iterable, start, stop, step)
# TODO: allow to disable auto sorting. Document how to make it behave
# like the original groupby
# TODO: allow cast to be None, which set cast to lambda x: x
@renamed_argument('key', 'keyfunc')
def groupby(iterable, keyfunc=None, reverse=False, cast=tuple):
# type: (Iterable, Callable, bool, Callable) -> Iterable
sorted_iterable = sorted(iterable, key=keyfunc, reverse=reverse)
for key, group in itertools.groupby(sorted_iterable, keyfunc):
yield key, cast(group)
# TODO: make the same things than in matrix, where the default value
# can be a callable, a non string iterable, or a value
def firsts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the first x items from this iterable or default. """
try:
items = int(items)
except (ValueError, TypeError):
raise ValueError("items should be usable as an int but is currently "
"'{}' of type '{}'".format(items, type(items)))
# TODO: replace this so that it returns lasts()
if items < 0:
raise ValueError(ww.f("items is {items} but should "
"be greater than 0. If you wish to get the last "
"items, use the lasts() function."))
i = 0
for i, item in zip(range(items), iterable):
yield item
for x in range(items - (i + 1)):
yield default
def lasts(iterable, items=1, default=None):
# type: (Iterable[T], int, T) -> Iterable[T]
""" Lazily return the last x items from this iterable or default. """
last_items = deque(iterable, maxlen=items)
for _ in range(items - len(last_items)):
yield default
for y in last_items:
yield y
# reduce is technically the last value of accumulate
# use ww.utils.EMPTY instead of EMPTY
# Put in the doc than scan=fold=accumulare and reduce=accumulate
# replace https://docs.python.org/3/library/itertools.html#itertools.accumulate
# that works only on Python 3.3 and doesn't have echo_start
# def accumulate(func, iterable, start=ww.utils.EMPTY, *, echo_start=True):
# """
# Scan higher-order function.
# The first 3 positional arguments are alike to the ``functools.reduce``
# signature. This function accepts an extra optional ``echo_start``
# parameter that controls whether the first value should be in the output.
# """
# it = iter(iterable)
# if start is ww.utils._EMPTY:
# start = next(it)
# if echo_start:
# yield start
# for item in it:
# start = func(start, item)
# yield start
| 34.153425 | 79 | 0.604925 |
f22bf3b2b9924fefdf21b633e986ed77efffa8ed | 5,201 | py | Python | MSSN_code/src/trainer.py | weiwenlan/Mobile-Lightweight-Super-Resolution-Construction-System | fe1552bf119795f25692d999e5cd375b105705ae | [
"MIT"
] | 14 | 2020-11-07T05:38:32.000Z | 2022-01-19T13:05:58.000Z | MSSN_code/src/trainer.py | weiwenlan/Mobile-Lightweight-Super-Resolution-Construction-System | fe1552bf119795f25692d999e5cd375b105705ae | [
"MIT"
] | 3 | 2021-09-01T13:29:10.000Z | 2021-12-02T08:57:08.000Z | MSSN_code/src/trainer.py | weiwenlan/Mobile-Lightweight-Super-Resolution-Construction-System | fe1552bf119795f25692d999e5cd375b105705ae | [
"MIT"
] | 5 | 2020-11-07T02:59:48.000Z | 2021-12-07T09:09:07.000Z | import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
import skimage
from skimage.measure import compare_ssim
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if self.args.load != '':
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
for batch, (lr, hr, _, idx_scale) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, idx_scale)
loss = self.loss(sr, hr)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
eval_ssim = 0
d.dataset.set_scale(idx_scale)
for lr, hr, filename, _ in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
eval_ssim += utility.calc_ssim(sr, hr, window_size = 11, size_average = True)
#eval_ssim += skimage.measure.compare_ssim(sr, hr, win_size=11, data_range=255, multichannel=True, gaussian_weights=True)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
mean_ssim = eval_ssim / len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.4f} SSIM:{:.4f} (Best: {:.4f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
mean_ssim,
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch >= self.args.epochs
| 33.993464 | 141 | 0.52836 |
a1632e042d9b1815a77150ebd1eb53a77cc9ee13 | 1,226 | py | Python | armi/bookkeeping/visualization/__init__.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 162 | 2019-11-01T17:35:58.000Z | 2022-03-18T04:22:39.000Z | armi/bookkeeping/visualization/__init__.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 315 | 2019-11-01T17:32:05.000Z | 2022-03-30T03:51:42.000Z | armi/bookkeeping/visualization/__init__.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 55 | 2019-11-01T16:59:59.000Z | 2022-03-25T18:19:06.000Z | # Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Visualization package contains functionality and entry points for producing files
amenable to visualization of ARMI run results.
This could theoretically support all sorts of visualization file formats, but for now,
only VTK files are supported. VTK was selected because it has wide support from vis
tools, while being a simple-enough format that quality pure-Python libraries exist to
produce them. Other formats (e.g., SILO) tend to require more system-dependent binary
dependencies, so optional support for them may be added later.
"""
from armi import plugins
from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint
| 43.785714 | 86 | 0.792007 |
b5ac8e496fd9466d0c63c18a84f20f2f3677c391 | 74,649 | py | Python | torch/testing/_internal/common_quantization.py | rkansal47/pytorch | 08f8d31fcf658563507a79334abaa135aeb9bddd | [
"Intel"
] | 1 | 2021-08-02T08:24:19.000Z | 2021-08-02T08:24:19.000Z | torch/testing/_internal/common_quantization.py | xiezhq-hermann/pytorch | fd8004b42e2a2348ec8837e3fb524b960c1b4cdb | [
"Intel"
] | null | null | null | torch/testing/_internal/common_quantization.py | xiezhq-hermann/pytorch | fd8004b42e2a2348ec8837e3fb524b960c1b4cdb | [
"Intel"
] | null | null | null | r"""Importing this file includes common utility methods and base clases for
checking quantization api and properties of resulting modules.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
from torch.nn.intrinsic import _FusedModule
import torch.distributed as dist
from torch.testing._internal.common_utils import TestCase
from torch.quantization import QuantWrapper, QuantStub, DeQuantStub, \
default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \
propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \
get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, QConfigDynamic, QuantType, quantize
from torch.quantization.quantization_mappings import (
get_default_dynamic_quant_module_mappings,
get_default_qconfig_propagation_list,
get_default_qat_module_mappings,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
)
from torch.jit.mobile import _load_for_lite_interpreter
try:
# graph mode quantization based on fx
from torch.quantization.quantize_fx import (
prepare_fx,
prepare_qat_fx,
convert_fx,
)
from torch.quantization.ns.ns_types import NSSingleResultValuesType, NSSubgraph
from torch.fx.graph import Node
from torch.fx import GraphModule
HAS_FX = True
except ImportError:
HAS_FX = False
import copy
import io
import functools
import time
import os
import unittest
import numpy as np
from torch.testing import FileCheck
from typing import Callable, Tuple, Dict, Any, Union, Type
class NodeSpec:
''' Used for checking GraphModule Node
'''
def __init__(self, op, target):
'''
op: call_function | call_module
target:
for call_function, target would be a function
for call_module, target would be the type of PyTorch module
'''
self.op = op
self.target = target
@classmethod
def call_function(cls, target):
return NodeSpec('call_function', target)
@classmethod
def call_method(cls, target):
return NodeSpec('call_method', target)
@classmethod
def call_module(cls, target):
return NodeSpec('call_module', target)
def __hash__(self):
return hash((self.op, self.target))
def __eq__(self, other):
if not isinstance(other, NodeSpec):
return NotImplemented
return self.op == other.op and self.target == other.target
def __repr__(self):
return repr(self.op) + " " + repr(self.target)
def test_only_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
"""
for inp in calib_data:
output = model(*inp)
_default_loss_fn = torch.nn.CrossEntropyLoss()
def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn):
r"""
Default train function takes a torch.utils.data.Dataset and train the model
on the dataset
"""
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_loss, correct, total = 0, 0, 0
for i in range(10):
model.train()
for data, target in train_data:
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return train_loss, correct, total
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches):
model.train()
cnt = 0
for image, target in data_loader:
start_time = time.time()
print('.', end='')
cnt += 1
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if cnt >= ntrain_batches:
return
return
def ddp_setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def ddp_cleanup():
dist.destroy_process_group()
def run_ddp(rank, world_size, prepared):
ddp_setup(rank, world_size)
prepared.cuda()
prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank])
prepared.to(rank)
model_with_ddp = prepared
optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001)
train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1)
ddp_cleanup()
def convert_dynamic(module):
convert(module, get_default_dynamic_quant_module_mappings(), inplace=True)
def prepare_dynamic(model, qconfig_dict=None):
propagate_qconfig_(model, qconfig_dict)
def _make_conv_test_input(
batch_size, in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale,
W_zero_point, use_bias, use_channelwise,
):
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
(X_value_min, X_value_max) = (0, 4)
X_init = torch.randint(
X_value_min, X_value_max,
(batch_size, in_channels,) + input_feature_map_size)
X = X_scale * (X_init - X_zero_point).float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)
W_scale = W_scale * out_channels
W_zero_point = W_zero_point * out_channels
# Resize W_scale and W_zero_points arrays equal to out_channels
W_scale = W_scale[:out_channels]
W_zero_point = W_zero_point[:out_channels]
# For testing, we use small values for weights and for activations so that
# no overflow occurs in vpmaddubsw instruction. If the overflow occurs in
# qconv implementation and if there is no overflow.
# In reference we can't exactly match the results with reference.
# Please see the comment in qconv implementation file
# aten/src/ATen/native/quantized/cpu/qconv.cpp for more details.
(W_value_min, W_value_max) = (-5, 5)
# The operator expects them in the format
# (out_channels, in_channels/groups,) + kernel_size
W_init = torch.randint(
W_value_min, W_value_max,
(out_channels, in_channels_per_group,) + kernel_size)
b_init = torch.randint(0, 10, (out_channels,))
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernel_size)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
W_q = torch.quantize_per_channel(
W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0,
dtype=torch.qint8)
else:
W = W_scale[0] * (W_init - W_zero_point[0]).float()
b = X_scale * W_scale[0] * b_init.float()
W_q = torch.quantize_per_tensor(
W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8)
return (X, X_q, W, W_q, b if use_bias else None)
def skipIfNoFBGEMM(fn):
reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.'
if isinstance(fn, type):
if 'fbgemm' not in torch.backends.quantized.supported_engines:
fn.__unittest_skip__ = True
fn.__unittest_skip_why__ = reason
return fn
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if 'fbgemm' not in torch.backends.quantized.supported_engines:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
def skipIfNoQNNPACK(fn):
reason = 'Quantized operations require QNNPACK.'
if isinstance(fn, type):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
fn.__unittest_skip__ = True
fn.__unittest_skip_why__ = reason
return fn
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
try:
import torchvision # noqa: F401
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
def get_script_module(model, tracing, data):
return torch.jit.trace(model, data) if tracing else torch.jit.script(model)
def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):
"""
Convert lengths to offsets for embedding_bag
"""
tt = np.zeros((t.shape[0] + 1,), dtype=offset_type)
tt[1:] = t
tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))
if use_begin_offset:
return tt[:-1]
return tt[1:]
# QuantizationTestCase used as a base class for testing quantization on modules
class QuantizationTestCase(TestCase):
def setUp(self):
super().setUp()
self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)]
self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)]
self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)]
for _ in range(2)]
self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)]
for _ in range(2)]
self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)]
for _ in range(2)]
self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_dict = {1 : self.img_data_1d,
2 : self.img_data_2d,
3 : self.img_data_3d}
# Quant types that produce statically quantized ops
self.static_quant_types = [QuantType.STATIC, QuantType.QAT]
# All quant types for (fx based) graph mode quantization
self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT]
def checkNoPrepModules(self, module):
r"""Checks the module does not contain child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertFalse(hasattr(module, 'quant'))
self.assertFalse(hasattr(module, 'dequant'))
def checkNoQconfig(self, module):
r"""Checks the module does not contain qconfig
"""
self.assertFalse(hasattr(module, 'qconfig'))
for child in module.children():
self.checkNoQconfig(child)
def checkHasPrepModules(self, module):
r"""Checks the module contains child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertTrue(hasattr(module, 'module'))
self.assertTrue(hasattr(module, 'quant'))
self.assertTrue(hasattr(module, 'dequant'))
def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None):
r"""Checks the module or module's leaf descendants
have observers in preperation for quantization
"""
if propagate_qconfig_list is None:
propagate_qconfig_list = get_default_qconfig_propagation_list()
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
float_to_observed_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {})
# check if a module is a leaf module, ignoring activation_post_process attribute
def is_leaf_module(module):
submodule_name_count = 0
for name, _ in module.named_children():
if name != 'activation_post_process':
submodule_name_count += 1
return submodule_name_count == 0
if hasattr(module, 'qconfig') and module.qconfig is not None and \
((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential)
and type(module) in propagate_qconfig_list) or
type(module) in float_to_observed_module_class_mapping.keys()) and \
not isinstance(module, torch.quantization.DeQuantStub):
self.assertTrue(hasattr(module, 'activation_post_process'),
'module: ' + str(type(module)) + ' do not have observer')
# we don't need to check observers for child modules of the
# qat modules
if type(module) not in get_default_qat_module_mappings().values() and \
type(module) not in float_to_observed_module_class_mapping.values() and \
not isinstance(module, _FusedModule):
for child in module.children():
self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict)
def checkQuantDequant(self, mod):
r"""Checks that mod has nn.Quantize and
nn.DeQuantize submodules inserted
"""
self.assertEqual(type(mod.quant), nnq.Quantize)
self.assertEqual(type(mod.dequant), nnq.DeQuantize)
def checkWrappedQuantizedLinear(self, mod):
r"""Checks that mod has been swapped for an nnq.Linear
module, the bias is qint32, and that the module
has Quantize and DeQuantize submodules
"""
self.assertEqual(type(mod.module), nnq.Linear)
self.checkQuantDequant(mod)
def checkQuantizedLinear(self, mod):
self.assertEqual(type(mod), nnq.Linear)
def checkDynamicQuantizedLinear(self, mod, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
self.assertEqual(type(mod), nnqd.Linear)
self.assertEqual(mod._packed_params.dtype, dtype)
def check_eager_serialization(self, ref_model, loaded_model, x):
# Check state dict serialization and torch.save APIs
model_dict = ref_model.state_dict()
b = io.BytesIO()
torch.save(model_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
loaded_model.load_state_dict(loaded_dict)
ref_out = ref_model(*x)
load_out = loaded_model(*x)
def check_outputs(ref_out, load_out):
self.assertEqual(ref_out[0], load_out[0])
if isinstance(ref_out[1], tuple):
self.assertEqual(ref_out[1][0], load_out[1][0])
self.assertEqual(ref_out[1][1], load_out[1][1])
else:
self.assertEqual(ref_out[1], load_out[1])
check_outputs(ref_out, load_out)
b = io.BytesIO()
torch.save(ref_model, b)
b.seek(0)
loaded = torch.load(b)
load_out = loaded(*x)
check_outputs(ref_out, load_out)
def check_weight_bias_api(self, ref_model, weight_keys, bias_keys):
weight = ref_model.get_weight()
bias = ref_model.get_bias()
self.assertEqual(weight_keys ^ weight.keys(), set())
self.assertEqual(bias_keys ^ bias.keys(), set())
def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype):
r"""Checks that mod has been swapped for an nnqd.LSTM type
module, the bias is float.
"""
wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}
self.assertEqual(type(mod), reference_module_type)
for packed_params in mod._all_weight_values:
self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])
def checkLinear(self, mod):
self.assertEqual(type(mod), torch.nn.Linear)
def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}
self.assertEqual(type(mod), reference_module_type)
if hasattr(mod, '_all_weight_values'):
for packed_params in mod._all_weight_values:
self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])
def checkScriptable(self, orig_mod, calib_data, check_save_load=False):
scripted = torch.jit.script(orig_mod)
self._checkScriptable(orig_mod, scripted, calib_data, check_save_load)
# Use first calib_data entry as trace input
traced = torch.jit.trace(orig_mod, calib_data[0])
self._checkScriptable(orig_mod, traced, calib_data, check_save_load)
# Call this twice: once for a scripted module and once for a traced module
def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load):
self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data)
# Test save/load
buffer = io.BytesIO()
torch.jit.save(script_mod, buffer)
buffer.seek(0)
loaded_mod = torch.jit.load(buffer)
# Pending __get_state_ and __set_state__ support
# See tracking task https://github.com/pytorch/pytorch/issues/23984
if check_save_load:
self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data)
def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data):
for inp in calib_data:
ref_output = orig_mod(*inp)
scripted_output = test_mod(*inp)
self.assertEqual(scripted_output, ref_output)
def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False,
check=True, eval_mode=True, dynamic=False, qconfig=None):
if debug:
print('Testing:', str(module))
qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)}
if eval_mode:
module = module.eval()
if dynamic:
qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig}
model = get_script_module(module, tracing, inputs[0]).eval()
if debug:
print('input graph:', model.graph)
models = {}
outputs = {}
for debug in [True, False]:
if dynamic:
models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug)
# make sure it runs
outputs[debug] = models[debug](inputs)
else:
# module under test can contain in-place ops, and we depend on
# input data staying constant for comparisons
inputs_copy = copy.deepcopy(inputs)
models[debug] = quantize_jit(
model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False,
debug=debug)
# make sure it runs
outputs[debug] = models[debug](*inputs[0])
if debug:
print('debug graph:', models[True].graph)
print('non debug graph:', models[False].graph)
if check:
# debug and non-debug option should have the same numerics
self.assertEqual(outputs[True], outputs[False])
# non debug graph should produce quantized op
FileCheck().check(quantized_op) \
.run(models[False].graph)
return models[False]
def checkGraphModuleNodes(
self, graph_module,
expected_node=None,
expected_node_occurrence=None,
expected_node_list=None):
""" Check if GraphModule contains the target node
Args:
graph_module: the GraphModule instance we want to check
expected_node, expected_node_occurrence, expected_node_list:
see docs for checkGraphModeFxOp
"""
nodes_in_graph = dict()
node_list = []
modules = dict(graph_module.named_modules(remove_duplicate=False))
for node in graph_module.graph.nodes:
n = None
if node.op == 'call_function' or node.op == 'call_method':
n = NodeSpec(node.op, node.target)
elif node.op == 'call_module':
n = NodeSpec(node.op, type(modules[node.target]))
if n is not None:
node_list.append(n)
if n in nodes_in_graph:
nodes_in_graph[n] += 1
else:
nodes_in_graph[n] = 1
if expected_node is not None:
self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) +
' not found in the graph module')
if expected_node_occurrence is not None:
for expected_node, occurrence in expected_node_occurrence.items():
if occurrence != 0:
self.assertTrue(
expected_node in nodes_in_graph,
'Check failed for node:' + str(expected_node) +
' not found')
self.assertTrue(
nodes_in_graph[expected_node] == occurrence,
'Check failed for node:' + str(expected_node) +
' Expected occurrence:' + str(occurrence) +
' Found occurrence:' + str(nodes_in_graph[expected_node]))
else:
self.assertTrue(
expected_node not in nodes_in_graph,
'Check failed for node:' + str(expected_node) +
' expected no occurrence but found')
if expected_node_list is not None:
cur_index = 0
for n in node_list:
if cur_index == len(expected_node_list):
return
if n == expected_node_list[cur_index]:
cur_index += 1
self.assertTrue(
cur_index == len(expected_node_list),
"Check failed for graph:" +
self.printGraphModule(graph_module, print_str=False) +
"Expected ordered list:" +
str(expected_node_list))
def printGraphModule(self, graph_module, print_str=True):
modules = dict(graph_module.named_modules())
node_infos = []
for n in graph_module.graph.nodes:
node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs]))
if n.op == 'call_module':
node_info += ' module type: ' + repr(type(modules[n.target]))
node_infos.append(node_info)
str_to_print = '\n'.join(node_infos)
if print_str:
print(str_to_print)
return str_to_print
if HAS_FX:
def assert_types_for_matched_subgraph_pairs(
self,
matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
expected_types: Dict[str, Tuple[Tuple[Callable, Callable], Tuple[Callable, Callable]]],
gm_a: GraphModule,
gm_b: GraphModule,
) -> None:
"""
Verifies that the types specified in expected_types match
the underlying objects pointed to by the nodes in matched_subgraph_pairs.
An example successful test case:
matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)}
expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)}
The function tests for key equivalence, and verifies types with
instance checks.
"""
def _get_underlying_op_type(
node: Node, gm: GraphModule
) -> Union[Callable, str]:
if node.op == 'call_module':
mod = getattr(gm, node.target)
return type(mod)
else:
assert node.op in ('call_function', 'call_method')
return node.target
self.assertTrue(
len(matched_subgraph_pairs) == len(expected_types),
'Expected length of results to match, but got %d and %d' %
(len(matched_subgraph_pairs), len(expected_types))
)
for k, v in expected_types.items():
expected_types_a, expected_types_b = v
exp_type_start_a, exp_type_end_a = expected_types_a
exp_type_start_b, exp_type_end_b = expected_types_b
subgraph_a, subgraph_b = matched_subgraph_pairs[k]
act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a)
act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b)
act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a)
act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b)
types_match = (exp_type_start_a is act_type_start_a) and \
(exp_type_end_a is act_type_end_a) and \
(exp_type_start_b is act_type_start_b) and \
(exp_type_end_b is act_type_end_b)
self.assertTrue(
types_match,
'Type mismatch at %s: expected %s, got %s' %
(k, (exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b),
(act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b))
)
def assert_ns_compare_dict_valid(
self,
act_compare_dict: Dict[str, Dict[str, Dict[str, Any]]],
) -> None:
"""
Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid:
1. for each layer, results are recorded for two models
2. number of seen tensors match
3. shapes of each pair of seen tensors match
"""
for layer_name, result_type_to_data in act_compare_dict.items():
for result_type, layer_data in result_type_to_data.items():
self.assertTrue(
len(layer_data) == 2,
f"Layer {layer_name} does not have exactly two model results.")
model_name_0, model_name_1 = layer_data.keys()
for res_idx in range(len(layer_data[model_name_0])):
layer_data_0 = layer_data[model_name_0][res_idx]
layer_data_1 = layer_data[model_name_1][res_idx]
self.assertTrue(
layer_data_0['type'] == layer_data_0['type'],
f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.")
self.assertTrue(
len(layer_data_0['values']) ==
len(layer_data_1['values']),
f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.")
# F.conv1d weight has rank 3, and toq.conv1d unpacked weight
# has rank 4. For now, skip the length check for conv1d only.
is_weight_functional_conv1d = (
result_type == NSSingleResultValuesType.WEIGHT.value and
(
'conv1d' in layer_data_0['prev_node_target_type'] or
'conv1d' in layer_data_1['prev_node_target_type']
)
)
if not is_weight_functional_conv1d:
for idx in range(len(layer_data_0['values'])):
values_0 = layer_data_0['values'][idx]
values_1 = layer_data_1['values'][idx]
if isinstance(values_0, torch.Tensor):
self.assertTrue(
values_0.shape == values_1.shape,
f"Layer {layer_name}, {model_name_0} and {model_name_1} " +
f"have a shape mismatch at idx {idx}.")
elif isinstance(values_0, list):
values_0 = values_0[0]
values_1 = values_1[0]
self.assertTrue(
values_0.shape == values_1.shape,
f"Layer {layer_name}, {model_name_0} and {model_name_1} " +
f"have a shape mismatch at idx {idx}.")
else:
assert isinstance(values_0, tuple), \
f"unhandled type {type(values_0)}"
assert len(values_0) == 2
assert len(values_0[1]) == 2
assert values_0[0].shape == values_1[0].shape
assert values_0[1][0].shape == values_1[1][0].shape
assert values_0[1][1].shape == values_1[1][1].shape
# verify that ref_node_name is valid
ref_node_name_0 = layer_data_0['ref_node_name']
ref_node_name_1 = layer_data_1['ref_node_name']
prev_node_name_0 = layer_data_0['prev_node_name']
prev_node_name_1 = layer_data_1['prev_node_name']
if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value:
self.assertTrue(ref_node_name_0 == prev_node_name_0)
self.assertTrue(ref_node_name_1 == prev_node_name_1)
elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value:
self.assertTrue(ref_node_name_0 != prev_node_name_0)
self.assertTrue(ref_node_name_1 != prev_node_name_1)
def checkGraphModeFxOp(self, model, inputs, quant_type,
expected_node=None,
expected_node_occurrence=None,
expected_node_list=None,
is_reference=False,
print_debug_info=False,
custom_qconfig_dict=None,
prepare_expected_node=None,
prepare_expected_node_occurrence=None,
prepare_expected_node_list=None,
prepare_custom_config_dict=None):
""" Quantizes model with graph mode quantization on fx and check if the
quantized model contains the quantized_node
Args:
model: floating point torch.nn.Module
inputs: one positional sample input arguments for model
expected_node: NodeSpec
e.g. NodeSpec.call_function(torch.quantize_per_tensor)
expected_node_occurrence: a dict from NodeSpec to
expected number of occurences (int)
e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1,
NodeSpec.call_method('dequantize'): 1}
expected_node_list: a list of NodeSpec, used to check the order
of the occurrence of Node
e.g. [NodeSpec.call_function(torch.quantize_per_tensor),
NodeSpec.call_module(nnq.Conv2d),
NodeSpec.call_function(F.hardtanh_),
NodeSpec.call_method('dequantize')]
is_reference: if True, enables reference mode
print_debug_info: if True, prints debug info
custom_qconfig_dict: overrides default qconfig_dict
prepare_expected_node: same as expected_node, but for prepare
prepare_expected_node_occurrence: same as
expected_node_occurrence, but for prepare
prepare_expected_node_list: same as expected_node_list, but
for prepare
"""
# TODO: make img_data a single example instead of a list
if type(inputs) == list:
inputs = inputs[0]
if quant_type == QuantType.QAT:
qconfig = get_default_qat_qconfig(torch.backends.quantized.engine)
model.train()
elif quant_type == QuantType.STATIC:
qconfig = get_default_qconfig(torch.backends.quantized.engine)
model.eval()
else:
qconfig = default_dynamic_qconfig
model.eval()
if quant_type == QuantType.QAT:
prepare = prepare_qat_fx
else:
prepare = prepare_fx
qconfig_dict = {"": qconfig}
# overwrite qconfig_dict with custom_qconfig_dict
if custom_qconfig_dict is not None:
qconfig_dict = custom_qconfig_dict
prepared = prepare(
model, qconfig_dict,
prepare_custom_config_dict=prepare_custom_config_dict)
if not quant_type == QuantType.DYNAMIC:
prepared(*inputs)
if print_debug_info:
print()
print('quant type:\n', quant_type)
print('original model:\n', model)
print()
print('prepared model:\n', prepared)
self.checkGraphModuleNodes(
prepared, prepare_expected_node,
prepare_expected_node_occurrence, prepare_expected_node_list)
prepared_copy = copy.deepcopy(prepared)
qgraph = convert_fx(prepared)
qgraph_reference = convert_fx(prepared_copy, is_reference=True)
result = qgraph(*inputs)
result_reference = qgraph_reference(*inputs)
qgraph_to_check = qgraph_reference if is_reference else qgraph
if print_debug_info:
print()
print('quantized model:\n', qgraph_to_check)
self.printGraphModule(qgraph_to_check)
print()
self.checkGraphModuleNodes(
qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list)
# TODO: change this to return prepared model, qgraph and result
return result
def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets,
set_qconfig, is_emb_bag, dtype=torch.quint8):
# Test serialization of dynamic EmbeddingBag module using state_dict
if is_emb_bag:
inputs = [indices, offsets]
else:
inputs = [indices]
emb_dict = qemb.state_dict()
b = io.BytesIO()
torch.save(emb_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
embedding_unpack = torch.ops.quantized.embedding_bag_unpack
# Check unpacked weight values explicitly
for key in emb_dict:
if isinstance(emb_dict[key], torch._C.ScriptObject):
assert isinstance(loaded_dict[key], torch._C.ScriptObject)
emb_weight = embedding_unpack(emb_dict[key])
loaded_weight = embedding_unpack(loaded_dict[key])
self.assertEqual(emb_weight, loaded_weight)
# Check state dict serialization and torch.save APIs
if is_emb_bag:
loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, mode='sum', dtype=dtype)
else:
loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype)
self.check_eager_serialization(qemb, loaded_qemb, inputs)
loaded_qemb.load_state_dict(loaded_dict)
self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight),
embedding_unpack(loaded_qemb._packed_params._packed_weight))
# Test JIT serialization
self.checkScriptable(qemb, [inputs], check_save_load=True)
# Test from_float call
if is_emb_bag:
float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
else:
float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)
if set_qconfig:
float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,
qscheme=torch.per_channel_affine_float_qparams,
ch_axis=0)
float_embedding.qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=float_qparams_observer)
prepare_dynamic(float_embedding)
float_embedding(*inputs)
if is_emb_bag:
q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding)
expected_name = "QuantizedEmbeddingBag"
else:
q_embeddingbag = nnq.Embedding.from_float(float_embedding)
expected_name = "QuantizedEmbedding"
q_embeddingbag(*inputs)
self.assertTrue(expected_name in str(q_embeddingbag))
class QuantizationLiteTestCase(QuantizationTestCase):
def setUp(self):
super().setUp()
def _create_quantized_model(self, model_class: Type[torch.nn.Module], **kwargs):
# Creates quantized model for testing mobile script modules
qengine = "qnnpack"
with override_quantized_engine(qengine):
qconfig = torch.quantization.get_default_qconfig(qengine)
model = model_class(**kwargs)
model = quantize(model, test_only_eval_fn, [self.calib_data])
return model
def _compare_script_and_mobile(self,
model: torch.nn.Module,
input: torch.Tensor):
# Compares the numerical outputs for script and lite modules
qengine = "qnnpack"
with override_quantized_engine(qengine):
script_module = torch.jit.script(model)
script_module_result = script_module(input)
max_retry = 5
for retry in range(1, max_retry + 1):
# retries `max_retry` times; breaks iff succeeds else throws exception
try:
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
mobile_module_result = mobile_module(input)
torch.testing.assert_allclose(script_module_result, mobile_module_result)
mobile_module_forward_result = mobile_module.forward(input)
torch.testing.assert_allclose(script_module_result, mobile_module_forward_result)
mobile_module_run_method_result = mobile_module.run_method("forward", input)
torch.testing.assert_allclose(script_module_result, mobile_module_run_method_result)
except AssertionError as e:
if retry == max_retry:
raise e
else:
continue
break
# Below are a series of toy models to use in testing quantization
class SingleLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
class AnnotatedSingleLayerLinearModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
def forward(self, x):
x = self.fc1(x)
return x
class SingleLayerLinearDynamicModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
class LinearAddModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = torch.add(x, 5)
x = self.fc2(x)
return x
class RNNDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == 'GRU':
self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float)
if mod_type == 'LSTM':
self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, x):
x = self.mod(x)
return x
class RNNCellDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == 'GRUCell':
self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float)
if mod_type == 'LSTMCell':
self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float)
if mod_type == 'RNNReLU':
self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float)
if mod_type == 'RNNTanh':
self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float)
def forward(self, x):
x = self.mod(x)
return x
class LSTMwithHiddenDynamicModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, x, hid):
x, hid = self.lstm(x, hid)
return x, hid
class ConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
return x
class ConvTransposeModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
return x
class AnnotatedConvModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
class AnnotatedConvTransposeModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
class ConvBnModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class AnnotatedConvBnModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = default_qconfig
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.dequant(x)
return x
class ConvBnReLUModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class AnnotatedConvBnReLUModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super(AnnotatedConvBnReLUModel, self).__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=True)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.dequant(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True)
class TwoLayerConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class TwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class LinearModelWithSubmodule(nn.Module):
def __init__(self):
super(LinearModelWithSubmodule, self).__init__()
self.subm = TwoLayerLinearModel()
self.fc = nn.Linear(5, 5)
def forward(self, x):
x = self.subm(x)
x = self.fc(x)
return x
class AnnotatedTwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float))
self.fc2.qconfig = torch.quantization.get_default_qconfig("fbgemm")
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class ActivationsTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig("fbgemm")
self.quant = torch.quantization.QuantStub()
self.hardswish = torch.nn.Hardswish().to(dtype=torch.float)
self.elu = torch.nn.ELU().to(dtype=torch.float)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.hardswish(x)
x = self.elu(x)
x = self.dequant(x)
return x
class LinearReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(self.fc(x))
return x
class LinearReluLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class LinearReluAddModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = torch.add(x, 5)
x = self.fc2(x)
self.relu = torch.nn.ReLU()
return x
class ConvReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(self.fc(x))
return x
class ConvReluConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class ConvReluAddModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = torch.add(x, 5)
x = self.fc2(x)
self.relu = torch.nn.ReLU()
return x
class NormalizationTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.layer_norm = torch.nn.LayerNorm((8))
self.group_norm = torch.nn.GroupNorm(2, 8)
self.instance_norm1d = torch.nn.InstanceNorm1d(8)
self.instance_norm2d = torch.nn.InstanceNorm2d(8)
self.instance_norm3d = torch.nn.InstanceNorm3d(8)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.layer_norm(x)
x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3))
x = self.instance_norm1d(x)
x = self.instance_norm2d(x.unsqueeze(-1))
x = self.instance_norm3d(x.unsqueeze(-1))
return x
class NestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedNestedModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
if qengine == 'fbgemm':
self.sub2.fc1.qconfig = default_per_channel_qconfig
else:
self.sub2.fc1.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedSubNestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedCustomConfigNestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
custom_options = {
'dtype': torch.quint8,
'qscheme': torch.per_tensor_affine
}
custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options),
weight=default_weight_observer)
self.sub2.fc1.qconfig = custom_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
self.sub2.fc2 = QuantWrapper(self.sub2.fc2)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class QuantSubModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.sub2.qconfig = default_qconfig
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.fc3.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class InnerModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.relu1 = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
self.relu2 = torch.nn.ReLU()
def forward(self, x):
return self.relu2(self.fc2(self.relu1(self.fc1(x))))
def fuse_modules(self):
fusable_layers = []
named_children = list(self.named_children())
for idx, (current_name, layer) in enumerate(named_children):
if isinstance(layer, torch.nn.Linear):
if idx >= len(named_children) - 1:
break
if isinstance(named_children[idx + 1][1], torch.nn.ReLU):
fusable_layers.append([current_name,
named_children[idx + 1][0]])
torch.quantization.fuse_modules(self, fusable_layers, inplace=True)
class FunctionalLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.rand((5, 5))
self.bias = torch.zeros(5)
def forward(self, x):
return F.linear(x, self.weight, self.bias)
class SingleLayerFunctionalLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
return x
class TwoLayerFunctionalLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
self.linear2 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
class FunctionalLinearAddModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
self.linear2 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
x = torch.add(x, 5)
x = self.linear2(x)
return x
class FunctionalLinearReluModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = FunctionalLinear()
def forward(self, x):
x = self.linear(x)
x = F.relu(x)
return x
class FunctionalLinearReluLinearModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = FunctionalLinear()
self.relu = nn.ReLU()
self.linear2 = FunctionalLinear()
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
return x
class FunctionalConv2d(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.rand(3, 3, 3, 3)
self.bias = torch.rand(3)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class SingleLayerFunctionalConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
return x
class TwoLayerFunctionalConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = FunctionalConv2d()
self.conv2 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class FunctionalConvReluModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = FunctionalConv2d()
def forward(self, x):
x = self.conv(x)
x = F.relu(x)
return x
class FunctionalConvReluConvModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = FunctionalConv2d()
self.relu = nn.ReLU()
self.conv2 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
return x
class SkipQuantModel(torch.nn.Module):
r"""We can skip quantization by explicitly
setting qconfig of a submodule to None
"""
def __init__(self):
super().__init__()
self.sub = InnerModule()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.fuse_modules()
class AnnotatedSkipQuantModel(torch.nn.Module):
r"""We can skip quantization by explicitly
setting qconfig of a submodule to None
"""
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.sub = QuantWrapper(InnerModule())
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
# don't quantize this fc
self.fc.qconfig = None
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.module.fuse_modules()
class QuantStubModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc(x)
return self.dequant(x)
class ManualLinearQATModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig(qengine)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class ManualConvLinearQATModel(torch.nn.Module):
r"""A module with manually inserted `QuantStub` and `DeQuantStub`
and contains both linear and conv modules
"""
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float)
self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(-1, 64).contiguous()
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class SubModelForFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.bn = nn.BatchNorm2d(2).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class SubModelWithoutFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=False).to(dtype=torch.float)
def forward(self, x):
return self.relu(self.conv(x))
class ModelForFusion(nn.Module):
def __init__(self, qconfig):
super().__init__()
self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float)
self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.sub1 = SubModelForFusion()
self.sub2 = SubModelWithoutFusion()
self.fc = nn.Linear(36, 10).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.qconfig = qconfig
self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float)
self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float)
self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float)
self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float)
self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float)
# don't quantize sub2
self.sub2.qconfig = None
self.fc.qconfig = None
def forward(self, x):
x = x.squeeze(2)
x = self.quant(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu4(x)
x = x.unsqueeze(2)
y = x.unsqueeze(2)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.sub1(x)
x = self.dequant(x)
x = self.sub2(x)
x = x.view(-1, 36).contiguous()
x = self.fc(x)
y = self.conv2(y)
y = self.relu2(y)
y = self.bn2(y)
y = self.relu3(y)
y = self.dequant(y)
return x
class ConvBNReLU(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(3, 3, 1, 1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=False)
)
class ModelWithSequentialFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1)
self.relu1 = nn.ReLU(inplace=False)
layers = []
for i in range(3):
layers.append(ConvBNReLU())
self.features = nn.Sequential(*layers)
head = [nn.Linear(300, 10), nn.ReLU(inplace=False)]
self.classifier = nn.Sequential(*head)
self.seq = nn.Sequential()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.features(x)
x = torch.reshape(x, (-1, 3 * 10 * 10))
x = self.classifier(x)
x = self.seq(x)
x = self.dequant(x)
return x
class ModelForFusionWithBias(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float)
self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float)
self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.dequant(x)
return x
class ModelForLinearBNFusion(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(20, 10)
self.bn = nn.BatchNorm1d(10)
nn.init.uniform_(self.bn.weight)
nn.init.uniform_(self.bn.bias)
def forward(self, x):
return self.bn(self.fc(x))
class DummyObserver(torch.nn.Module):
def calculate_qparams(self):
return 1.0, 0
def forward(self, x):
return x
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super().__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# self.my_scalar_add = nnq.FloatFunctional()
# self.my_scalar_mul = nnq.FloatFunctional()
def forward(self, x):
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# w = self.my_scalar_add.add_scalar(w, -0.5)
# w = self.my_scalar_mul.mul_scalar(w, 0.5)
return w
class ResNetBase(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.downsample = torch.nn.Identity()
self.myop = nn.quantized.FloatFunctional()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = torch.nn.Linear(inplanes, 1)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
identity = self.downsample(x)
out = self.myop.add(out, identity)
out = self.relu2(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
out = self.fc(out)
return out
def fuse_model(self):
torch.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True)
class ModelMultipleOps(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.downsample = torch.nn.Identity()
self.skip_add = nn.quantized.FloatFunctional()
self.cat = nn.quantized.FloatFunctional()
self.avgpool = nn.AdaptiveAvgPool2d((4, 4))
self.fc = nn.Linear(12, 6)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
identity = self.downsample(x)
out = self.skip_add.add(out, identity)
out = self.relu2(out)
out = self.avgpool(out)
out = self.conv2(out)
out = torch.nn.functional.max_pool2d(out, 2, 2)
out = self.cat.cat([out, out])
out = out.reshape(-1, 3 * 2 * 2)
out = self.fc(out)
return out
# Model to ensure consistency of fake quant with true quant
# Average pooling and mean operations are not modelled
# accurately with fake-quant so this model does not
# contain those operations
class ModelMultipleOpsNoAvgPool(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.skip_add = nn.quantized.FloatFunctional()
self.cat = nn.quantized.FloatFunctional()
self.maxpool = nn.MaxPool2d((4, 4))
self.fc = nn.Linear(12, 6)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
skip = self.conv2(x)
out = self.skip_add.add(out, skip)
out = self.relu2(out)
out = self.maxpool(out)
out = self.conv2(out)
out = torch.nn.functional.max_pool2d(out, 2, 2)
out = self.cat.cat([out, out])
out = out.reshape(-1, 3 * 2 * 2)
out = self.fc(out)
return out
class EmbeddingBagModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
def forward(self, indices, offsets, per_sample_weights):
return self.emb(indices, offsets, per_sample_weights)
class EmbeddingModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
def forward(self, indices):
return self.emb(indices)
class EmbeddingWithLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
self.fc = torch.nn.Linear(5, 5)
self.emb.qconfig = float_qparams_weight_only_qconfig
self.qconfig = default_qconfig
def forward(self, indices, linear_in):
return self.emb(indices), self.fc(linear_in)
class DenseTopMLP(nn.Module):
def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None:
super(DenseTopMLP, self).__init__()
self.dense_mlp = nn.Sequential(
nn.Linear(dense_dim, dense_out),
)
self.top_mlp = nn.Sequential(
nn.Linear(dense_out + embedding_dim, top_out_in),
nn.Linear(top_out_in, top_out_out),
)
def forward(
self,
sparse_feature: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
dense_feature = self.dense_mlp(dense)
features = torch.cat([dense_feature] + [sparse_feature], dim=1)
out = self.top_mlp(features)
return out
# thin wrapper around embedding bag, because tracing inside nn.Embedding
# bag is not supported at the moment and this is top level
class EmbBagWrapper(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum')
def forward(self, indices, offsets):
return self.emb_bag(indices, offsets)
class SparseNNModel(nn.Module):
_NUM_EMBEDDINGS = 10
_EMBEDDING_DIM = 5
_DENSE_DIM = 4
_DENSE_OUTPUT = 2
_TOP_OUT_IN = 2
_TOP_OUT_OUT = 2
_TOP_MLP_DIM = 1
def __init__(self) -> None:
super(SparseNNModel, self).__init__()
self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM)
self.dense_top = DenseTopMLP(
self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN,
self._TOP_OUT_OUT)
def forward(
self,
sparse_indices: torch.Tensor,
sparse_offsets: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
sparse_feature = self.model_sparse(sparse_indices, sparse_offsets)
out = self.dense_top(sparse_feature, dense)
return out
| 37.606549 | 130 | 0.599767 |
a677836218d9caa0db62c03723151ef0a4cfcec5 | 2,779 | py | Python | bentoml/frameworks/sklearn.py | alvarogg777/BentoML | 2bb297dca0330228c27b14aeeba0742820c6f0ef | [
"Apache-2.0"
] | null | null | null | bentoml/frameworks/sklearn.py | alvarogg777/BentoML | 2bb297dca0330228c27b14aeeba0742820c6f0ef | [
"Apache-2.0"
] | null | null | null | bentoml/frameworks/sklearn.py | alvarogg777/BentoML | 2bb297dca0330228c27b14aeeba0742820c6f0ef | [
"Apache-2.0"
] | null | null | null | import os
from bentoml.service.env import BentoServiceEnv
from bentoml.exceptions import MissingDependencyException
from bentoml.service.artifacts import BentoServiceArtifact
def _import_joblib_module():
try:
import joblib
except ImportError:
joblib = None
if joblib is None:
try:
from sklearn.externals import joblib
except ImportError:
pass
if joblib is None:
raise MissingDependencyException(
"sklearn module is required to use SklearnModelArtifact"
)
return joblib
class SklearnModelArtifact(BentoServiceArtifact):
"""
Abstraction for saving/loading scikit learn models using sklearn.externals.joblib
Args:
name (str): Name for the artifact
pickle_extension (str): The extension format for pickled file
Raises:
MissingDependencyException: sklearn package is required for SklearnModelArtifact
Example usage:
>>> from sklearn import svm
>>>
>>> model_to_save = svm.SVC(gamma='scale')
>>> # ... training model, etc.
>>>
>>> import bentoml
>>> from bentoml.frameworks.sklearn import SklearnModelArtifact
>>> from bentoml.adapters import DataframeInput
>>>
>>> @bentoml.env(infer_pip_packages=True)
>>> @bentoml.artifacts([SklearnModelArtifact('model')])
>>> class SklearnModelService(bentoml.BentoService):
>>>
>>> @bentoml.api(input=DataframeInput(), batch=True)
>>> def predict(self, df):
>>> result = self.artifacts.model.predict(df)
>>> return result
>>>
>>> svc = SklearnModelService()
>>>
>>> # Pack directly with sklearn model object
>>> svc.pack('model', model_to_save)
>>> svc.save()
"""
def __init__(self, name, pickle_extension=".pkl"):
super(SklearnModelArtifact, self).__init__(name)
self._pickle_extension = pickle_extension
self._model = None
def _model_file_path(self, base_path):
return os.path.join(base_path, self.name + self._pickle_extension)
def pack(self, sklearn_model, metadata=None): # pylint:disable=arguments-differ
self._model = sklearn_model
return self
def load(self, path):
joblib = _import_joblib_module()
model_file_path = self._model_file_path(path)
sklearn_model = joblib.load(model_file_path, mmap_mode='r')
return self.pack(sklearn_model)
def get(self):
return self._model
def save(self, dst):
joblib = _import_joblib_module()
joblib.dump(self._model, self._model_file_path(dst))
def set_dependencies(self, env: BentoServiceEnv):
if env._infer_pip_packages:
env.add_pip_packages(['scikit-learn'])
| 28.357143 | 88 | 0.65923 |
5fc30670d5a556a4faec02e0528c8cb8d0113c53 | 9,037 | py | Python | src/utils/check-pr/check-pr.py | stishkin/onefuzz | eca88cb35f60c30fe7a6dbfbc436be0f7ddc36c9 | [
"MIT"
] | null | null | null | src/utils/check-pr/check-pr.py | stishkin/onefuzz | eca88cb35f60c30fe7a6dbfbc436be0f7ddc36c9 | [
"MIT"
] | null | null | null | src/utils/check-pr/check-pr.py | stishkin/onefuzz | eca88cb35f60c30fe7a6dbfbc436be0f7ddc36c9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
import os
import subprocess
import tempfile
import time
import uuid
from typing import List, Optional
from cleanup_ad import delete_current_user_app_registrations
from .github_client import GithubClient
def venv_path(base: str, name: str) -> str:
for subdir in ["bin", "Scripts"]:
path = os.path.join(base, subdir, name)
for ext in ["", ".exe"]:
path += ext
if os.path.exists(path):
return path
raise Exception("missing venv")
class Deployer:
def __init__(
self,
*,
pr: int,
branch: str,
instance: str,
region: str,
subscription_id: Optional[str],
authority: Optional[str],
skip_tests: bool,
test_args: List[str],
repo: str,
unattended: bool,
):
self.downloader = GithubClient()
self.pr = pr
self.branch = branch
self.instance = instance
self.region = region
self.subscription_id = subscription_id
self.skip_tests = skip_tests
self.test_args = test_args or []
self.repo = repo
self.unattended = unattended
self.client_id: Optional[str] = None
self.client_secret: Optional[str] = None
self.authority = authority
def merge(self) -> None:
if self.pr:
self.downloader.merge_pr(self.branch, self.pr)
def deploy(self, filename: str) -> None:
print(f"deploying {filename} to {self.instance}")
venv = "deploy-venv"
subprocess.check_call(f"python -mvenv {venv}", shell=True)
pip = venv_path(venv, "pip")
py = venv_path(venv, "python")
config = os.path.join(os.getcwd(), "config.json")
commands = [
("extracting release-artifacts", f"unzip -qq {filename}"),
("extracting deployment", "unzip -qq onefuzz-deployment*.zip"),
("installing wheel", f"{pip} install -q wheel"),
("installing prereqs", f"{pip} install -q -r requirements.txt"),
(
"running deploment",
(
f"{py} deploy.py {self.region} "
f"{self.instance} {self.instance} cicd {config}"
f" {' --subscription_id ' + self.subscription_id if self.subscription_id else ''}"
),
),
]
for (msg, cmd) in commands:
print(msg)
subprocess.check_call(cmd, shell=True)
if self.unattended:
self.register()
def register(self) -> None:
sp_name = "sp_" + self.instance
print(f"registering {sp_name} to {self.instance}")
venv = "deploy-venv"
pip = venv_path(venv, "pip")
py = venv_path(venv, "python")
az_cmd = ["az", "account", "show", "--query", "id", "-o", "tsv"]
subscription_id = subprocess.check_output(az_cmd, encoding="UTF-8")
subscription_id = subscription_id.strip()
commands = [
("installing prereqs", f"{pip} install -q -r requirements.txt"),
(
"running cli registration",
(
f"{py} ./deploylib/registration.py create_cli_registration "
f"{self.instance} {subscription_id}"
f" --registration_name {sp_name}"
),
),
]
for (msg, cmd) in commands:
print(msg)
output = subprocess.check_output(cmd, shell=True, encoding="UTF-8")
if "client_id" in output:
output_list = output.split("\n")
for line in output_list:
if "client_id" in line:
line_list = line.split(":")
client_id = line_list[1].strip()
self.client_id = client_id
print(("client_id: " + client_id))
if "client_secret" in line:
line_list = line.split(":")
client_secret = line_list[1].strip()
self.client_secret = client_secret
time.sleep(30)
return
def test(self, filename: str) -> None:
venv = "test-venv"
subprocess.check_call(f"python -mvenv {venv}", shell=True)
py = venv_path(venv, "python")
test_dir = "integration-test-artifacts"
script = "integration-test.py"
endpoint = f"https://{self.instance}.azurewebsites.net"
test_args = " ".join(self.test_args)
unattended_args = (
f"--client_id {self.client_id} --client_secret {self.client_secret}"
if self.unattended
else ""
)
authority_args = f"--authority {self.authority}" if self.authority else ""
commands = [
(
"extracting integration-test-artifacts",
f"unzip -qq {filename} -d {test_dir}",
),
("test venv", f"python -mvenv {venv}"),
("installing wheel", f"./{venv}/bin/pip install -q wheel"),
("installing sdk", f"./{venv}/bin/pip install -q sdk/*.whl"),
(
"running integration",
(
f"{py} {test_dir}/{script} test {test_dir} "
f"--region {self.region} --endpoint {endpoint} "
f"{authority_args} "
f"{unattended_args} {test_args}"
),
),
]
for (msg, cmd) in commands:
print(msg)
print(cmd)
subprocess.check_call(cmd, shell=True)
def cleanup(self, skip: bool) -> None:
os.chdir(tempfile.gettempdir())
if skip:
return
cmd = ["az", "group", "delete", "-n", self.instance, "--yes", "--no-wait"]
print(cmd)
subprocess.call(cmd)
delete_current_user_app_registrations(self.instance)
print("done")
def run(self, *, merge_on_success: bool = False) -> None:
release_filename = "release-artifacts.zip"
self.downloader.get_artifact(
self.repo,
"ci.yml",
self.branch,
self.pr,
"release-artifacts",
release_filename,
)
test_filename = "integration-test-artifacts.zip"
self.downloader.get_artifact(
self.repo,
"ci.yml",
self.branch,
self.pr,
"integration-test-artifacts",
test_filename,
)
self.deploy(release_filename)
if not self.skip_tests:
self.test(test_filename)
if merge_on_success:
self.merge()
def main() -> None:
# Get a name that can be added to the resource group name
# to make it easy to identify the owner
cmd = ["az", "ad", "signed-in-user", "show", "--query", "mailNickname", "-o", "tsv"]
name = subprocess.check_output(cmd, encoding="UTF-8")
# The result from az includes a newline
# which we strip out.
name = name.strip()
default_instance = f"pr-check-{name}-%s" % uuid.uuid4().hex
parser = argparse.ArgumentParser()
parser.add_argument("--instance", default=default_instance)
group = parser.add_mutually_exclusive_group()
group.add_argument("--branch")
group.add_argument("--pr", type=int)
parser.add_argument("--repo", default="microsoft/onefuzz")
parser.add_argument("--region", default="eastus2")
parser.add_argument("--skip-tests", action="store_true")
parser.add_argument("--skip-cleanup", action="store_true")
parser.add_argument("--skip-cleanup-on-failure", action="store_true")
parser.add_argument("--merge-on-success", action="store_true")
parser.add_argument("--subscription_id")
parser.add_argument("--authority", default=None)
parser.add_argument("--test_args", nargs=argparse.REMAINDER)
parser.add_argument("--unattended", action="store_true")
args = parser.parse_args()
if not args.branch and not args.pr:
raise Exception("--branch or --pr is required")
d = Deployer(
branch=args.branch,
pr=args.pr,
instance=args.instance,
region=args.region,
subscription_id=args.subscription_id,
skip_tests=args.skip_tests,
test_args=args.test_args,
repo=args.repo,
unattended=args.unattended,
authority=args.authority,
)
with tempfile.TemporaryDirectory() as directory:
os.chdir(directory)
print(f"running from within {directory}")
try:
d.run(merge_on_success=args.merge_on_success)
d.cleanup(args.skip_cleanup)
return
finally:
if not args.skip_cleanup_on_failure:
d.cleanup(args.skip_cleanup)
os.chdir(tempfile.gettempdir())
if __name__ == "__main__":
main()
| 33.224265 | 102 | 0.556269 |
7b840fb3a7d0f710ca637bddc658df2ae4e68ce0 | 10,410 | py | Python | src/tngsdk/package/tests/test_packager_onap.py | nandoooo/tng-sdk-package | ef1b6b4f6f7df95a7014f264437c4b2d14c1c1e9 | [
"Apache-2.0"
] | 7 | 2018-02-13T11:45:56.000Z | 2021-03-01T04:38:22.000Z | src/tngsdk/package/tests/test_packager_onap.py | nandoooo/tng-sdk-package | ef1b6b4f6f7df95a7014f264437c4b2d14c1c1e9 | [
"Apache-2.0"
] | 68 | 2018-02-13T12:26:04.000Z | 2019-08-21T08:00:19.000Z | src/tngsdk/package/tests/test_packager_onap.py | nandoooo/tng-sdk-package | ef1b6b4f6f7df95a7014f264437c4b2d14c1c1e9 | [
"Apache-2.0"
] | 8 | 2018-01-29T14:31:51.000Z | 2020-06-30T10:29:02.000Z | import unittest
import tempfile
import os
import zipfile
import yaml
from tngsdk.package.tests.fixtures import misc_file
from tngsdk.package.packager import PM
from tngsdk.package.packager.packager import NapdRecord
from tngsdk.package.packager.onap_packager import OnapPackager, OnapPackage,\
OnapPackageSet
from tngsdk.package.cli import parse_args
class TngSdkPackageOnapPackager(unittest.TestCase):
def setUp(self):
self.tmp_files = []
def reset_tmp_files(self):
self.tmp_files = []
def substring_in_list(self, substr, L):
for element in L:
if substr in element:
return True
return False
def test_do_package(self):
# prepare test
project = misc_file("mixed-ns-project")
output = tempfile.mkdtemp()
args = parse_args(["--format", "eu.lf.onap",
"-p", project,
"-o", output])
p = PM.new_packager(args, pkg_format=args.pkg_format)
# execute
p._do_package()
packages = os.listdir(output)
self.assertEqual(len(packages), 2)
for package in packages:
self.assertEqual(os.path.splitext(package)[1], ".csar")
self.assertTrue(self.substring_in_list("onap_nsd", packages),
msg="onap_nsd not as substr in {}".format(packages))
self.assertTrue(self.substring_in_list("onap_vnfd", packages),
msg="onap_vnfd not as substr in {}".format(packages))
with open(os.path.join(project, "project.yml")) as f:
pd = yaml.load(f)
files = pd["files"]
files = [os.path.basename(file["path"]) for file in files
if "onap" in file["type"] or "lf.onap" in file["tags"]]
nsd = None
vnfd = None
for file in files:
if "nsd" in file:
nsd = file
if "vnfd" in file:
vnfd = file
files.remove(nsd)
files.remove(vnfd)
for package in packages:
with zipfile.ZipFile(os.path.join(output, package)) as zf:
names = zf.namelist()
for file in files:
self.assertTrue(self.substring_in_list(file, names),
msg="{} not in {}".format(file, names))
if "nsd" in package:
self.assertIn(nsd, names)
self.assertIn(os.path.splitext(nsd)[0]+".mf", names)
if "vnfd" in package:
self.assertIn(vnfd, names)
self.assertIn(os.path.splitext(vnfd)[0]+".mf", names)
self.assertIn(os.path.join("TOSCA-Metadata", "TOSCA.meta"),
names)
def test_pack_package_source_path(self):
inputs = [{"tags": []},
{"tags": ["lf.onap"]},
{"tags": ["lf.onap", "onap-target:new"]},
{"tags": ["lf.onap", "onap-target:new/bla/here"]}]
outputs = ["", "Artifacts", "new", "new/bla/here"]
args = parse_args([])
p = OnapPackager(args)
for inp, out in zip(inputs, outputs):
self.assertEqual(p._pack_package_source_path(inp), out)
def _create_tmp_file(self):
self.tmp_files.append(tempfile.NamedTemporaryFile())
return self.tmp_files[-1].name
def create_test_OnapPackage(self, project_name, folders):
package = OnapPackage({"filename": self._create_tmp_file()},
project_name=project_name, folders=folders)
package.temp_dir = tempfile.mkdtemp()
package.package_content = []
for i in range(12):
name = self._create_tmp_file()
package.package_content.append(
{"source": folders[i % len(folders)],
"_project_source": name,
"filename": name,
"hash": "hash_value_{}".format(str(i))}
)
return package
def create_test_OnapPackageSet(self, project_name):
package_set = OnapPackageSet(NapdRecord())
package_set.nsd = self.create_test_OnapPackage(
project_name, OnapPackageSet.folders
)
package_set.vnfds = {
"vnf{}".format(i):
self.create_test_OnapPackage(
project_name, OnapPackageSet.folders)
for i in range(12)
}
return package_set
def test_pack_packages(self):
# prepare test
args = parse_args(["--format", "eu.lf.onap"])
args.package = ""
p = PM.new_packager(args, pkg_format=args.pkg_format)
project_name = "project"
wd = tempfile.mkdtemp()
self.reset_tmp_files()
package_set = self.create_test_OnapPackageSet(project_name)
p.attach_files(package_set)
p.pack_packages(wd, package_set)
for package in package_set.packages():
package_path = os.path.join(
wd, "{}.csar".format(package.package_name))
self.assertTrue(os.path.exists(package_path),
msg=str((package_path, os.listdir(wd))))
for vnf in package_set.vnfds.values():
package_path = os.path.join(
wd, "{}.csar".format(vnf.package_name))
with zipfile.ZipFile(package_path) as f:
member_names = f.namelist()
for folder in OnapPackageSet.folders:
self.assertTrue(
self.substring_in_list(folder, member_names))
file_members = list(
map(lambda member: os.path.basename(member), member_names)
)
for file in vnf.package_content:
filename = os.path.basename(file["filename"])
self.assertIn(filename, file_members)
def test_generate_tosca_generate_etsi_mf(self):
args = parse_args([])
p = OnapPackager(args)
package = OnapPackage({"filename": "test_file",
"content-type": "application/vnd.onap.nsd",
"source": "testdir",
"algorithm": "SHA-256",
"hash": "value1"})
for i in range(12):
package.package_content.append(
{"filename": "test_file_pc" + str(i),
"source": "testdir_pc" + str(i),
"algorithm": "SHA-256",
"hash": "value" + str(i)}
)
package_set = OnapPackageSet(NapdRecord())
tosca = p.generate_tosca(package, package_set)
self.assertEqual(tosca[0], {"TOSCA-Meta-Version": "1.0",
"CSAR-Version": "1.0",
"Created-By": None,
"Entry-Definitions": "test_file"})
etsi_mf = p.generate_etsi_mf(package, package_set)
self.assertEqual(etsi_mf[0], {"ns_product_name": None,
"ns_provider_id": None,
"ns_package_version": None,
"ns_release_date_time": None})
package.descriptor_file["content-type"] = "application/vnd.onap.vnfd"
etsi_mf = p.generate_etsi_mf(package, package_set)
self.assertEqual(etsi_mf[0], {"vnf_product_name": None,
"vnf_provider_id": None,
"vnf_package_version": None,
"vnf_release_date_time": None})
package.descriptor_file["content-type"] = "application/vnd.onap.pnfd"
etsi_mf = p.generate_etsi_mf(package, package_set)
self.assertEqual(etsi_mf[0], {"pnfd_name": None,
"pnfd_provider": None,
"pnfd_archive_version": None,
"pnfd_release_date_time": None})
package.descriptor_file["content-type"] = "application/vnd.onap.nsd"
maintainer = "maintainer"
name = "name"
vendor = "vendor"
version = "1.1"
release_date_time = "2018_08_01"
package_set.maintainer = maintainer
package_set.name = name
package_set.vendor = vendor
package_set.version = version
package_set.release_date_time = release_date_time
tosca = p.generate_tosca(package, package_set)
self.assertEqual(tosca[0], {"TOSCA-Meta-Version": "1.0",
"CSAR-Version": "1.0",
"Created-By": maintainer,
"Entry-Definitions": "test_file"})
etsi_mf = p.generate_etsi_mf(package, package_set)
self.assertEqual(etsi_mf[0], {"ns_product_name": name,
"ns_provider_id": vendor,
"ns_package_version": version,
"ns_release_date_time":
release_date_time})
package.descriptor_file["content-type"] = "application/vnd.onap.vnfd"
etsi_mf = p.generate_etsi_mf(package, package_set)
self.assertEqual(etsi_mf[0], {"vnf_product_name": name,
"vnf_provider_id": vendor,
"vnf_package_version": version,
"vnf_release_date_time":
release_date_time})
package.descriptor_file["content-type"] = "application/vnd.onap.pnfd"
etsi_mf = p.generate_etsi_mf(package, package_set)
self.assertEqual(etsi_mf[0], {"pnfd_name": name,
"pnfd_provider": vendor,
"pnfd_archive_version": version,
"pnfd_release_date_time":
release_date_time})
for i, pc in enumerate(etsi_mf[2:]):
self.assertEqual(pc, {"Source": "testdir_pc" + str(i) +
"/test_file_pc" + str(i),
"Algorithm": "SHA-256",
"Hash": "value" + str(i)})
| 42.145749 | 78 | 0.522574 |
78374e1b3e98504f8756969f866dd2ccf71bfaa3 | 3,030 | py | Python | pyLipsum/main.py | MajorcaDevs/pyLipsum | 943ca3dfea2b26df970e485ad089acc62b2c0b9a | [
"MIT"
] | 1 | 2020-01-11T18:12:11.000Z | 2020-01-11T18:12:11.000Z | pyLipsum/main.py | RaulWhite/pyLipsum | 943ca3dfea2b26df970e485ad089acc62b2c0b9a | [
"MIT"
] | 3 | 2020-01-10T20:22:20.000Z | 2020-01-19T23:58:32.000Z | pyLipsum/main.py | MajorcaDevs/pyLipsum | 943ca3dfea2b26df970e485ad089acc62b2c0b9a | [
"MIT"
] | 1 | 2020-01-10T21:22:39.000Z | 2020-01-10T21:22:39.000Z | from random import choice, randint
import logging
# Import dictionaries
from dicts.chiquitoDict import chiquitoDict
from dicts.ipsumDict import ipsumDict
logging.basicConfig(format='\n%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def ipsum(dict: classmethod, numParags: int, lorem: bool):
### Start
output = "" # Variable declaration
if lorem:
output = start = dict.start.strip() # Trim spaces on start
### Paragraph loop
for i in range(numParags):
numPhrases = randint(5, 10)
### Phrase loop
j = 0
while j < numPhrases:
numWords = randint(4, 12)
### "Word" or array item loop
k = 0
while k < numWords:
### Check start
if i == j == 0 and lorem: # At first phrase
if start[-1:] == ".":
break # If start is a phrase, generate 1 less
elif len(start.split()) > numWords:
if output[-1:] != ".":
output = output + "."
break # If start has no period, and has more words than numWords, make it a phrase
elif k == 0: # Check words on start, and generate number of words less on first random phrase
k += len(start.split())
### Random text
word = choice(dict.array) # Random array item
if len(word.strip().split()) > 1: # If dictionary item has more than 1 word, generate number less
k += (len(word.strip().split()) - 1)
if word.strip()[-1:] == ".":
k = numWords # If dictionary item is a phrase (has period), generate 1 phrase less
if output.strip()[-1:] != "." and output != "":
output = output + "." # Punctuate aready generated paragraph output if necessary
j += 1 # Count phrase if there was more output generated
if output.strip()[-1:] == "." or output == "":
word = word.capitalize() # Capitalize every start of phrase
if j < numPhrases: # Check if phrase on item array is generating more phrases than expected
if output == "": # Space every word and start of phrase, not start of paragraph
output = word
else:
output = output + " " + word
k += 1
### Punctuation.
if output.strip()[-1:] != ".":
output = output + "." # Dot every end of phrase
###Phrase loop
j += 1
### Blank lines every paragraph
if numParags > 1 and i < (numParags - 1):
output = output + "\n" # Separate paragraphs if more than one
### Print paragraph and clear output variable
print(output)
output = ""
if __name__ == '__main__':
ipsum(chiquitoDict, 5, True) | 46.615385 | 113 | 0.511551 |
75adafebb74211f8c6f76867e4e852dff0ab5263 | 867 | py | Python | wikidata_panglaodb/similarity.py | jvfe/wikidata_panglaodb | a1dd854393c9c81229dcf639d62fb758cf145973 | [
"BSD-2-Clause"
] | 1 | 2020-11-12T21:28:34.000Z | 2020-11-12T21:28:34.000Z | wikidata_panglaodb/similarity.py | jvfe/wikidata_panglaodb | a1dd854393c9c81229dcf639d62fb758cf145973 | [
"BSD-2-Clause"
] | 2 | 2020-09-16T21:09:36.000Z | 2020-12-25T19:02:41.000Z | wikidata_panglaodb/similarity.py | jvfe/wikidata_panglaodb | a1dd854393c9c81229dcf639d62fb758cf145973 | [
"BSD-2-Clause"
] | null | null | null | """Similarity checking functions"""
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
def get_string_match(string1, string2):
"""Checks if the stemmed version of two strings is the same
Sometimes matches from the reconciliation service return as false since
the item has few statements or no statements at all. To take care of those
cases we'll perform a simple string similarity check, using the stemmed version
of both strings.
Args:
string1 (str): A string to compare.
string2 (str): A string to compare.
Returns:
bool: If they match, return True, else return False.
"""
tokenized = [[tokenized] for tokenized in [string1, string2]]
ps = PorterStemmer()
stemmed = [[ps.stem(w)] for tokens in tokenized for w in tokens]
return stemmed[0] == stemmed[1]
| 29.896552 | 83 | 0.696655 |
394e39bf85a75912913d3a8f0d2ecad08312f458 | 1,360 | py | Python | galaxy/main/migrations/0085_auto_20180328_1130.py | akaRem/galaxy | 567947171579fcdf7c0192316812ee0c59ccce6e | [
"Apache-2.0"
] | null | null | null | galaxy/main/migrations/0085_auto_20180328_1130.py | akaRem/galaxy | 567947171579fcdf7c0192316812ee0c59ccce6e | [
"Apache-2.0"
] | null | null | null | galaxy/main/migrations/0085_auto_20180328_1130.py | akaRem/galaxy | 567947171579fcdf7c0192316812ee0c59ccce6e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-28 15:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0084_content_block_update'),
]
operations = [
migrations.AlterField(
model_name='content',
name='role_type',
field=models.CharField(choices=[('ANS', 'Ansible'), ('CON', 'Container Enabled'), ('APP', 'Container App'), ('DEM', 'Demo')], default=None, editable=False, max_length=3, null=True),
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(choices=[('role', 'Role'), ('module', 'Module'), ('apb', 'Ansible Playbook Bundle'), ('action_plugin', 'Action Plugin'), ('cache_plugin', 'Cache Plugin'), ('callback_plugin', 'Callback Plugin'), ('cliconf_plugin', 'CLI Conf Plugin'), ('connection_plugin', 'Connection Plugin'), ('filter_plugin', 'Filter Plugin'), ('inventory_plugin', 'Inventory Plugin'), ('lookup_plugin', 'Lookup Plugin'), ('netconf_plugin', 'Netconf Plugin'), ('shell_plugin', 'Shell Plugin'), ('strategy_plugin', 'Strategy Plugin'), ('terminal_plugin', 'Terminal Plugin'), ('test_plugin', 'Test Plugin')], db_index=True, max_length=512, unique=True),
),
]
| 52.307692 | 656 | 0.641912 |
ed1cfa8d32e0712d1a1ff607fe8cbd7ae2f2066a | 149 | py | Python | main.py | nenkoru/okama | 1e202bc801aea8adaf4c2ad033cd51af0c957df5 | [
"MIT"
] | null | null | null | main.py | nenkoru/okama | 1e202bc801aea8adaf4c2ad033cd51af0c957df5 | [
"MIT"
] | null | null | null | main.py | nenkoru/okama | 1e202bc801aea8adaf4c2ad033cd51af0c957df5 | [
"MIT"
] | null | null | null | import okama as ok
ls3 = ['MCFTR.INDX', 'RGBITR.INDX', 'GC.COMM']
y = ok.EfficientFrontier(assets=ls3, ccy='USD', n_points=10)
print(y.mdp_points)
| 21.285714 | 60 | 0.697987 |
fd9981ed6b88bb7edffbbd376d4ac53bccc3bc83 | 19,917 | py | Python | mars/tensor/execution/tests/test_reduction_execute.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | 1 | 2018-12-26T08:37:04.000Z | 2018-12-26T08:37:04.000Z | mars/tensor/execution/tests/test_reduction_execute.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | null | null | null | mars/tensor/execution/tests/test_reduction_execute.py | lmatz/mars | 45f9166b54eb91b21e66cef8b590a41aa8ac9569 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars.tensor.expressions.datasource import ones, tensor
from mars.tensor.expressions.reduction import mean, nansum, nanmax, nanmin, nanmean, nanprod, nanargmax, \
nanargmin, nanvar, nanstd, count_nonzero, allclose, array_equal, var, std, nancumsum, nancumprod
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testSumProdExecution(self):
arr = ones((10, 8), chunks=3)
self.assertEqual([80], self.executor.execute_tensor(arr.sum()))
self.assertEqual((10,) * 8,
tuple(np.concatenate(self.executor.execute_tensor(arr.sum(axis=0)))))
arr = ones((3, 3), chunks=2)
self.assertEqual([512], self.executor.execute_tensor((arr * 2).prod()))
self.assertEqual((8,) * 3,
tuple(np.concatenate(self.executor.execute_tensor((arr * 2).prod(axis=0)))))
raw = sps.random(10, 20, density=.1)
arr = tensor(raw, chunks=3)
res = self.executor.execute_tensor(arr.sum())[0]
self.assertAlmostEqual(res, raw.sum())
def testMaxMinExecution(self):
raw = np.random.randint(10000, size=(10, 10, 10))
arr = tensor(raw, chunks=3)
self.assertEqual([raw.max()], self.executor.execute_tensor(arr.max()))
self.assertEqual([raw.min()], self.executor.execute_tensor(arr.min()))
np.testing.assert_array_equal(
raw.max(axis=0), self.executor.execute_tensor(arr.max(axis=0), concat=True)[0])
np.testing.assert_array_equal(
raw.min(axis=0), self.executor.execute_tensor(arr.min(axis=0), concat=True)[0])
np.testing.assert_array_equal(
raw.max(axis=(1, 2)), self.executor.execute_tensor(arr.max(axis=(1, 2)), concat=True)[0])
np.testing.assert_array_equal(
raw.min(axis=(1, 2)), self.executor.execute_tensor(arr.min(axis=(1, 2)), concat=True)[0])
raw = sps.random(10, 10, density=.5)
arr = tensor(raw, chunks=3)
self.assertEqual([raw.max()], self.executor.execute_tensor(arr.max()))
self.assertEqual([raw.min()], self.executor.execute_tensor(arr.min()))
def testAllAnyExecution(self):
raw1 = np.zeros((10, 15))
raw2 = np.ones((10, 15))
raw3 = np.array([[True, False, True, False], [True, True, True, True],
[False, False, False, False], [False, True, False, True]])
arr1 = tensor(raw1, chunks=3)
arr2 = tensor(raw2, chunks=3)
arr3 = tensor(raw3, chunks=4)
self.assertFalse(self.executor.execute_tensor(arr1.all())[0])
self.assertTrue(self.executor.execute_tensor(arr2.all())[0])
self.assertFalse(self.executor.execute_tensor(arr1.any())[0])
self.assertTrue(self.executor.execute_tensor(arr1.any()))
np.testing.assert_array_equal(raw3.all(axis=1),
self.executor.execute_tensor(arr3.all(axis=1))[0])
np.testing.assert_array_equal(raw3.any(axis=0),
self.executor.execute_tensor(arr3.any(axis=0))[0])
raw = sps.random(10, 10, density=.5) > .5
arr = tensor(raw, chunks=3)
self.assertEqual(raw.A.all(), self.executor.execute_tensor(arr.all())[0])
self.assertEqual(raw.A.any(), self.executor.execute_tensor(arr.any())[0])
def testMeanExecution(self):
raw1 = np.random.random((20, 25))
raw2 = np.random.randint(10, size=(20, 25))
arr1 = tensor(raw1, chunks=3)
res1 = self.executor.execute_tensor(arr1.mean())
expected1 = raw1.mean()
self.assertTrue(np.allclose(res1[0], expected1))
res2 = self.executor.execute_tensor(arr1.mean(axis=0))
expected2 = raw1.mean(axis=0)
self.assertTrue(np.allclose(np.concatenate(res2), expected2))
res3 = self.executor.execute_tensor(arr1.mean(axis=1, keepdims=True))
expected3 = raw1.mean(axis=1, keepdims=True)
self.assertTrue(np.allclose(np.concatenate(res3), expected3))
arr2 = tensor(raw2, chunks=3)
res1 = self.executor.execute_tensor(arr2.mean())
expected1 = raw2.mean()
self.assertEqual(res1[0], expected1)
res2 = self.executor.execute_tensor(arr2.mean(axis=0))
expected2 = raw2.mean(axis=0)
self.assertTrue(np.allclose(np.concatenate(res2), expected2))
res3 = self.executor.execute_tensor(arr2.mean(axis=1, keepdims=True))
expected3 = raw2.mean(axis=1, keepdims=True)
self.assertTrue(np.allclose(np.concatenate(res3), expected3))
raw1 = sps.random(20, 25, density=.1)
arr1 = tensor(raw1, chunks=3)
res1 = self.executor.execute_tensor(arr1.mean())
expected1 = raw1.mean()
self.assertTrue(np.allclose(res1[0], expected1))
arr2 = tensor(raw1, chunks=30)
res1 = self.executor.execute_tensor(arr2.mean())
expected1 = raw1.mean()
self.assertTrue(np.allclose(res1[0], expected1))
arr = mean(1)
self.assertEqual(self.executor.execute_tensor(arr)[0], 1)
def testVarExecution(self):
raw1 = np.random.random((20, 25))
raw2 = np.random.randint(10, size=(20, 25))
arr1 = tensor(raw1, chunks=3)
res1 = self.executor.execute_tensor(arr1.var())
expected1 = raw1.var()
self.assertTrue(np.allclose(res1[0], expected1))
res2 = self.executor.execute_tensor(arr1.var(axis=0))
expected2 = raw1.var(axis=0)
self.assertTrue(np.allclose(np.concatenate(res2), expected2))
res3 = self.executor.execute_tensor(arr1.var(axis=1, keepdims=True))
expected3 = raw1.var(axis=1, keepdims=True)
self.assertTrue(np.allclose(np.concatenate(res3), expected3))
arr2 = tensor(raw2, chunks=3)
res1 = self.executor.execute_tensor(arr2.var())
expected1 = raw2.var()
self.assertAlmostEqual(res1[0], expected1)
res2 = self.executor.execute_tensor(arr2.var(axis=0))
expected2 = raw2.var(axis=0)
self.assertTrue(np.allclose(np.concatenate(res2), expected2))
res3 = self.executor.execute_tensor(arr2.var(axis=1, keepdims=True))
expected3 = raw2.var(axis=1, keepdims=True)
self.assertTrue(np.allclose(np.concatenate(res3), expected3))
res4 = self.executor.execute_tensor(arr2.var(ddof=1))
expected4 = raw2.var(ddof=1)
self.assertAlmostEqual(res4[0], expected4)
raw1 = sps.random(20, 25, density=.1)
arr1 = tensor(raw1, chunks=3)
res1 = self.executor.execute_tensor(arr1.var())
expected1 = raw1.toarray().var()
self.assertTrue(np.allclose(res1[0], expected1))
arr2 = tensor(raw1, chunks=30)
res1 = self.executor.execute_tensor(arr2.var())
expected1 = raw1.toarray().var()
self.assertTrue(np.allclose(res1[0], expected1))
arr = var(1)
self.assertEqual(self.executor.execute_tensor(arr)[0], 0)
def testStdExecution(self):
raw1 = np.random.random((20, 25))
raw2 = np.random.randint(10, size=(20, 25))
arr1 = tensor(raw1, chunks=3)
res1 = self.executor.execute_tensor(arr1.std())
expected1 = raw1.std()
self.assertTrue(np.allclose(res1[0], expected1))
res2 = self.executor.execute_tensor(arr1.std(axis=0))
expected2 = raw1.std(axis=0)
self.assertTrue(np.allclose(np.concatenate(res2), expected2))
res3 = self.executor.execute_tensor(arr1.std(axis=1, keepdims=True))
expected3 = raw1.std(axis=1, keepdims=True)
self.assertTrue(np.allclose(np.concatenate(res3), expected3))
arr2 = tensor(raw2, chunks=3)
res1 = self.executor.execute_tensor(arr2.std())
expected1 = raw2.std()
self.assertAlmostEqual(res1[0], expected1)
res2 = self.executor.execute_tensor(arr2.std(axis=0))
expected2 = raw2.std(axis=0)
self.assertTrue(np.allclose(np.concatenate(res2), expected2))
res3 = self.executor.execute_tensor(arr2.std(axis=1, keepdims=True))
expected3 = raw2.std(axis=1, keepdims=True)
self.assertTrue(np.allclose(np.concatenate(res3), expected3))
res4 = self.executor.execute_tensor(arr2.std(ddof=1))
expected4 = raw2.std(ddof=1)
self.assertAlmostEqual(res4[0], expected4)
raw1 = sps.random(20, 25, density=.1)
arr1 = tensor(raw1, chunks=3)
res1 = self.executor.execute_tensor(arr1.std())
expected1 = raw1.toarray().std()
self.assertTrue(np.allclose(res1[0], expected1))
arr2 = tensor(raw1, chunks=30)
res1 = self.executor.execute_tensor(arr2.std())
expected1 = raw1.toarray().std()
self.assertTrue(np.allclose(res1[0], expected1))
arr = std(1)
self.assertEqual(self.executor.execute_tensor(arr)[0], 0)
def testArgReduction(self):
raw = np.random.random((20, 20, 20))
arr = tensor(raw, chunks=3)
self.assertEqual(raw.argmax(),
self.executor.execute_tensor(arr.argmax())[0])
self.assertEqual(raw.argmin(),
self.executor.execute_tensor(arr.argmin())[0])
np.testing.assert_array_equal(
raw.argmax(axis=0), self.executor.execute_tensor(arr.argmax(axis=0), concat=True)[0])
np.testing.assert_array_equal(
raw.argmin(axis=0), self.executor.execute_tensor(arr.argmin(axis=0), concat=True)[0])
raw_format = sps.random(20, 20, density=.1, format='lil')
random_min = np.random.randint(0, 200)
random_max = np.random.randint(200, 400)
raw_format[np.unravel_index(random_min, raw_format.shape)] = -1
raw_format[np.unravel_index(random_max, raw_format.shape)] = 2
raw = raw_format.tocoo()
arr = tensor(raw, chunks=3)
self.assertEqual(raw.argmax(),
self.executor.execute_tensor(arr.argmax())[0])
self.assertEqual(raw.argmin(),
self.executor.execute_tensor(arr.argmin())[0])
def testNanReduction(self):
raw = np.random.choice(a=[0, 1, np.nan], size=(10, 10), p=[0.3, 0.4, 0.3])
arr = tensor(raw, chunks=3)
self.assertEqual(np.nansum(raw), self.executor.execute_tensor(nansum(arr))[0])
self.assertEqual(np.nanprod(raw), self.executor.execute_tensor(nanprod(arr))[0])
self.assertEqual(np.nanmax(raw), self.executor.execute_tensor(nanmax(arr))[0])
self.assertEqual(np.nanmin(raw), self.executor.execute_tensor(nanmin(arr))[0])
self.assertEqual(np.nanmean(raw), self.executor.execute_tensor(nanmean(arr))[0])
self.assertAlmostEqual(np.nanvar(raw), self.executor.execute_tensor(nanvar(arr))[0])
self.assertAlmostEqual(np.nanvar(raw, ddof=1), self.executor.execute_tensor(nanvar(arr, ddof=1))[0])
self.assertAlmostEqual(np.nanstd(raw), self.executor.execute_tensor(nanstd(arr))[0])
self.assertAlmostEqual(np.nanstd(raw, ddof=1), self.executor.execute_tensor(nanstd(arr, ddof=1))[0])
arr = tensor(raw, chunks=10)
self.assertEqual(np.nansum(raw), self.executor.execute_tensor(nansum(arr))[0])
self.assertEqual(np.nanprod(raw), self.executor.execute_tensor(nanprod(arr))[0])
self.assertEqual(np.nanmax(raw), self.executor.execute_tensor(nanmax(arr))[0])
self.assertEqual(np.nanmin(raw), self.executor.execute_tensor(nanmin(arr))[0])
self.assertEqual(np.nanmean(raw), self.executor.execute_tensor(nanmean(arr))[0])
self.assertAlmostEqual(np.nanvar(raw), self.executor.execute_tensor(nanvar(arr))[0])
self.assertAlmostEqual(np.nanvar(raw, ddof=1), self.executor.execute_tensor(nanvar(arr, ddof=1))[0])
self.assertAlmostEqual(np.nanstd(raw), self.executor.execute_tensor(nanstd(arr))[0])
self.assertAlmostEqual(np.nanstd(raw, ddof=1), self.executor.execute_tensor(nanstd(arr, ddof=1))[0])
raw = np.random.random((10, 10))
raw[:3, :3] = np.nan
arr = tensor(raw, chunks=3)
self.assertEqual(np.nanargmin(raw), self.executor.execute_tensor(nanargmin(arr))[0])
self.assertEqual(np.nanargmax(raw), self.executor.execute_tensor(nanargmax(arr))[0])
raw = np.full((10, 10), np.nan)
arr = tensor(raw, chunks=3)
self.assertEqual(0, self.executor.execute_tensor(nansum(arr))[0])
self.assertEqual(1, self.executor.execute_tensor(nanprod(arr))[0])
self.assertTrue(np.isnan(self.executor.execute_tensor(nanmax(arr))[0]))
self.assertTrue(np.isnan(self.executor.execute_tensor(nanmin(arr))[0]))
self.assertTrue(np.isnan(self.executor.execute_tensor(nanmean(arr))[0]))
self.assertRaises(ValueError, lambda: self.executor.execute_tensor(nanargmin(arr))[0])
self.assertRaises(ValueError, lambda: self.executor.execute_tensor(nanargmax(arr))[0])
raw = sps.random(10, 10, density=.1, format='csr')
raw[:3, :3] = np.nan
arr = tensor(raw, chunks=3)
self.assertAlmostEqual(np.nansum(raw.A), self.executor.execute_tensor(nansum(arr))[0])
self.assertAlmostEqual(np.nanprod(raw.A), self.executor.execute_tensor(nanprod(arr))[0])
self.assertAlmostEqual(np.nanmax(raw.A), self.executor.execute_tensor(nanmax(arr))[0])
self.assertAlmostEqual(np.nanmin(raw.A), self.executor.execute_tensor(nanmin(arr))[0])
self.assertAlmostEqual(np.nanmean(raw.A), self.executor.execute_tensor(nanmean(arr))[0])
self.assertAlmostEqual(np.nanvar(raw.A), self.executor.execute_tensor(nanvar(arr))[0])
self.assertAlmostEqual(np.nanvar(raw.A, ddof=1), self.executor.execute_tensor(nanvar(arr, ddof=1))[0])
self.assertAlmostEqual(np.nanstd(raw.A), self.executor.execute_tensor(nanstd(arr))[0])
self.assertAlmostEqual(np.nanstd(raw.A, ddof=1), self.executor.execute_tensor(nanstd(arr, ddof=1))[0])
arr = nansum(1)
self.assertEqual(self.executor.execute_tensor(arr)[0], 1)
def testCumReduction(self):
raw = np.random.randint(5, size=(8, 8, 8))
arr = tensor(raw, chunks=3)
res1 = self.executor.execute_tensor(arr.cumsum(axis=1), concat=True)
res2 = self.executor.execute_tensor(arr.cumprod(axis=1), concat=True)
expected1 = raw.cumsum(axis=1)
expected2 = raw.cumprod(axis=1)
np.testing.assert_array_equal(res1[0], expected1)
np.testing.assert_array_equal(res2[0], expected2)
raw = sps.random(8, 8, density=.1)
arr = tensor(raw, chunks=3)
res1 = self.executor.execute_tensor(arr.cumsum(axis=1), concat=True)
res2 = self.executor.execute_tensor(arr.cumprod(axis=1), concat=True)
expected1 = raw.A.cumsum(axis=1)
expected2 = raw.A.cumprod(axis=1)
self.assertTrue(np.allclose(res1[0], expected1))
self.assertTrue(np.allclose(res2[0], expected2))
def testNanCumReduction(self):
raw = np.random.randint(5, size=(8, 8, 8))
raw[:2, 2:4, 4:6] = np.nan
arr = tensor(raw, chunks=3)
res1 = self.executor.execute_tensor(nancumsum(arr, axis=1), concat=True)
res2 = self.executor.execute_tensor(nancumprod(arr, axis=1), concat=True)
expected1 = np.nancumsum(raw, axis=1)
expected2 = np.nancumprod(raw, axis=1)
np.testing.assert_array_equal(res1[0], expected1)
np.testing.assert_array_equal(res2[0], expected2)
raw = sps.random(8, 8, density=.1, format='lil')
raw[:2, 2:4] = np.nan
arr = tensor(raw, chunks=3)
res1 = self.executor.execute_tensor(nancumsum(arr, axis=1), concat=True)[0]
res2 = self.executor.execute_tensor(nancumprod(arr, axis=1), concat=True)[0]
expected1 = np.nancumsum(raw.A, axis=1)
expected2 = np.nancumprod(raw.A, axis=1)
self.assertTrue(np.allclose(res1, expected1))
self.assertTrue(np.allclose(res2, expected2))
def testOutReductionExecution(self):
raw = np.random.randint(5, size=(8, 8, 8))
arr = tensor(raw, chunks=3)
arr2 = ones((8, 8), dtype='i8', chunks=3)
arr.sum(axis=1, out=arr2)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = raw.sum(axis=1)
np.testing.assert_array_equal(res, expected)
def testOutCumReductionExecution(self):
raw = np.random.randint(5, size=(8, 8, 8))
arr = tensor(raw, chunks=3)
arr.cumsum(axis=0, out=arr)
res = self.executor.execute_tensor(arr, concat=True)[0]
expected = raw.cumsum(axis=0)
np.testing.assert_array_equal(res, expected)
def testCountNonzeroExecution(self):
raw = [[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]
arr = tensor(raw, chunks=2)
t = count_nonzero(arr)
res = self.executor.execute_tensor(t)[0]
expected = np.count_nonzero(raw)
np.testing.assert_equal(res, expected)
t = count_nonzero(arr, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.count_nonzero(raw, axis=0)
np.testing.assert_equal(res, expected)
t = count_nonzero(arr, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.count_nonzero(raw, axis=1)
np.testing.assert_equal(res, expected)
raw = sps.csr_matrix(raw)
arr = tensor(raw, chunks=2)
t = count_nonzero(arr)
res = self.executor.execute_tensor(t)[0]
expected = np.count_nonzero(raw.A)
np.testing.assert_equal(res, expected)
t = count_nonzero(arr, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.count_nonzero(raw.A, axis=0)
np.testing.assert_equal(res, expected)
t = count_nonzero(arr, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.count_nonzero(raw.A, axis=1)
np.testing.assert_equal(res, expected)
def testAllcloseExecution(self):
a = tensor([1e10, 1e-7], chunks=1)
b = tensor([1.00001e10, 1e-8], chunks=1)
t = allclose(a, b)
res = self.executor.execute_tensor(t)[0]
self.assertFalse(res)
a = tensor([1e10, 1e-8], chunks=1)
b = tensor([1.00001e10, 1e-9], chunks=1)
t = allclose(a, b)
res = self.executor.execute_tensor(t)[0]
self.assertTrue(res)
a = tensor([1.0, np.nan], chunks=1)
b = tensor([1.0, np.nan], chunks=1)
t = allclose(a, b, equal_nan=True)
res = self.executor.execute_tensor(t)[0]
self.assertTrue(res)
a = tensor(sps.csr_matrix([[1e10, 1e-7], [0, 0]]), chunks=1)
b = tensor(sps.csr_matrix([[1.00001e10, 1e-8], [0, 0]]), chunks=1)
t = allclose(a, b)
res = self.executor.execute_tensor(t)[0]
self.assertFalse(res)
def testArrayEqual(self):
a = ones((10, 5), chunks=1)
b = ones((10, 5), chunks=2)
c = array_equal(a, b)
res = bool(self.executor.execute_tensor(c)[0])
self.assertTrue(res)
| 39.754491 | 110 | 0.638199 |
5c4219bacb246d4bb9aff78a308c2750f63459a1 | 3,357 | py | Python | tests/objects/guild/test_guild.py | Arthurdw/Pincer | eebb8e8f4e7173ba37b8d3049c1d7de793776ed5 | [
"MIT"
] | 118 | 2021-08-30T15:00:47.000Z | 2022-03-31T11:06:16.000Z | tests/objects/guild/test_guild.py | Arthurdw/Pincer | eebb8e8f4e7173ba37b8d3049c1d7de793776ed5 | [
"MIT"
] | 343 | 2021-08-30T12:25:57.000Z | 2022-03-31T07:02:11.000Z | tests/objects/guild/test_guild.py | Arthurdw/Pincer | eebb8e8f4e7173ba37b8d3049c1d7de793776ed5 | [
"MIT"
] | 62 | 2021-08-31T22:30:20.000Z | 2022-03-25T18:29:11.000Z | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from pincer.objects import Guild, Emoji, Channel, Role
FAKE_GUILD = {
'id': '0',
'name': 'test-server',
'features': [],
'emojis': [
{'name': 'test-emoji',
'roles': [],
'id': '0',
'require_colons': True,
'managed': False,
'animated': False,
'available': True
}
],
'stickers': [],
'owner_id': '0',
'region': 'us-east',
'afk_timeout': 300,
'system_channel_id': '0',
'widget_enabled': False,
'widget_channel_id': '0',
'verification_level': 0,
'roles': [
{'id': '0',
'name': '@everyone',
'permissions': '0',
'position': 0,
'color': 0,
'hoist': False,
'managed': False,
'mentionable': False,
}
],
'default_message_notifications': 0,
'mfa_level': 0,
'explicit_content_filter': 0,
'max_members': 250000,
'max_video_channel_users': 25,
'premium_tier': 0,
'premium_subscription_count': 0,
'system_channel_flags': 8,
'preferred_locale': 'en-US',
'premium_progress_bar_enabled': False,
'nsfw': False,
'nsfw_level': 0,
'channels':
[
{'id': '0',
'type': 4,
'name': 'Text Channels',
'position': 0,
'guild_id': '0',
'permission_overwrites': [],
'nsfw': False
},
]
}
class TestChannel:
@staticmethod
def test_get():
guild = Guild.from_dict(FAKE_GUILD)
assert guild == Guild(
id=0,
name="test-server",
features=[],
emojis=[
Emoji(
name="test-emoji",
roles=[],
id=0,
require_colons=True,
managed=False,
animated=False,
available=True
)
],
stickers=[],
owner_id=0,
region="us-east",
afk_timeout=300,
system_channel_id=0,
widget_enabled=False,
widget_channel_id=0,
verification_level=0,
roles=[
Role(
id=0,
name="@everyone",
permissions=0,
position=0,
color=0,
hoist=False,
managed=False,
mentionable=False,
)
],
default_message_notifications=0,
mfa_level=0,
explicit_content_filter=0,
max_members=250000,
max_video_channel_users=25,
premium_tier=0,
premium_subscription_count=0,
system_channel_flags=8,
preferred_locale="en-US",
premium_progress_bar_enabled=False,
nsfw=False,
nsfw_level=0,
channels=[
Channel(
id=0,
type=4,
name="Text Channels",
position=0,
guild_id=0,
permission_overwrites=[],
nsfw=False
)
]
)
| 25.625954 | 65 | 0.443551 |
6ecf24c086f49ce87a33a1a1ebfeabed59f9ae50 | 1,241 | py | Python | variational/utils.py | pytorchbearer/variational | c9f700e308211c52322bb424a8414ea11eab79a4 | [
"MIT"
] | 3 | 2019-06-05T09:24:33.000Z | 2019-08-13T00:53:40.000Z | variational/utils.py | pytorchbearer/variational | c9f700e308211c52322bb424a8414ea11eab79a4 | [
"MIT"
] | null | null | null | variational/utils.py | pytorchbearer/variational | c9f700e308211c52322bb424a8414ea11eab79a4 | [
"MIT"
] | null | null | null | import sys
if sys.version_info[0] < 3:
def set_doc(inner, doc):
return None # Not simple to do in Python 2.7 so we can leave it for now, just build docs with Python 3+
else:
def set_doc(inner, doc):
inner.__doc__ = doc
def cite(bibtex):
"""A decorator which adds a reference to the **Google style** docstring of the given object. The ``Args:`` or
``Returns:`` line is then prepended with the given bibtex string at runtime. Otherwise, the last line is used.
Args:
bibtex (str): The bibtex string to insert
Returns:
The decorator
"""
def decorator(inner):
doc = inner.__doc__.split('\n')
i = 0
s = 0
for line in doc:
sline = line.strip()
if sline == 'Args:' or sline == 'Returns:':
for char in line:
if char == ' ':
s += 1
break
i += 1
spaces = ' ' * (s + 4)
to_insert = ' ' * s + '::\n\n' + spaces
to_insert += bibtex.strip().replace('\n', '\n' + spaces).rstrip()
doc.insert(i, '')
doc.insert(i, to_insert)
set_doc(inner, '\n'.join(doc))
return inner
return decorator | 31.025 | 114 | 0.5278 |
8a8dbbbefddebb4ef42febb92ddfd25f39ed2f4f | 81 | py | Python | tests/test_pymautic.py | danimaribeiro/py-mautic | 0f821fb8c356bc40581a331897ee2a406e2a6070 | [
"BSD-2-Clause"
] | 4 | 2016-05-16T23:24:56.000Z | 2018-03-19T09:04:40.000Z | tests/test_pymautic.py | danimaribeiro/py-mautic | 0f821fb8c356bc40581a331897ee2a406e2a6070 | [
"BSD-2-Clause"
] | null | null | null | tests/test_pymautic.py | danimaribeiro/py-mautic | 0f821fb8c356bc40581a331897ee2a406e2a6070 | [
"BSD-2-Clause"
] | null | null | null |
import pymautic
def test_main():
assert pymautic # use your library here
| 11.571429 | 44 | 0.716049 |
0bc16d4f9423477a4b8111c3f7dd262716cd7569 | 3,919 | py | Python | sectors/module/file2ws.py | vantagecrypto/OV_Data_Bridge | b09f58e9c4664fa9842eead95b3e54a8027870e2 | [
"MIT"
] | null | null | null | sectors/module/file2ws.py | vantagecrypto/OV_Data_Bridge | b09f58e9c4664fa9842eead95b3e54a8027870e2 | [
"MIT"
] | null | null | null | sectors/module/file2ws.py | vantagecrypto/OV_Data_Bridge | b09f58e9c4664fa9842eead95b3e54a8027870e2 | [
"MIT"
] | null | null | null | import _thread as thread
import time
from datetime import datetime
import json
from . import common, log
from sectors.common import admin_config
from db.models import (
TBLBridge
)
class Bridge:
"""
File to WebSocket Data Bridge
"""
def __init__(self, bridge_info):
self.bridge_info = bridge_info
self.connection_status = None
self.connection_text = 'Waiting for connect'
self.log = log.BridgeLog(bridge_info)
self.cache = self.log.get_last_log()
self.ws_id = f'{admin_config.BRIDGE_CONSUMER_PREFIX}_{bridge_info["id"]}'
self.ws_clients = []
self.FILE_FREQUENCY = self.bridge_info['frequency']
self.prev_file_data = None
def notify_event(self, event):
data = event['data']
if event['type'] == 'on_add_ws_client':
self.add_ws_client(data['group_name'])
elif event['type'] == 'on_remove_ws_client':
self.remove_ws_client(data['group_name'])
def run_download(self):
count = 0
while True:
if not self.connection_status:
break
if count == 0 or count >= self.FILE_FREQUENCY:
count = 0
self.add_cache(f"FILE:Download - {self.bridge_info['src_address']}")
resp_data, status_code = common.get_remote_file_data(None, self.bridge_info)
self.add_cache(f'FILE:Recv - {resp_data}')
if status_code < 300:
self.send_message(resp_data)
time.sleep(1)
count += 1
def open(self):
self.connection_status = True
self.connection_text = 'FILE:Open - Ready'
thread.start_new_thread(self.run_download, ())
self.add_cache(self.connection_text)
def close_log(self):
self.log.close()
def close(self):
self.connection_status = False
self.connection_text = f'FILE:Closed'
self.add_cache(self.connection_text)
def is_connected(self):
while self.connection_status is None:
time.sleep(0.1)
return self.connection_status
def add_ws_client(self, ws_id):
self.ws_clients.append(ws_id)
def remove_ws_client(self, ws_id):
if ws_id in self.ws_clients:
self.ws_clients.remove(ws_id)
def send_message(self, message):
try:
if not message:
self.add_cache(f'WS:Send - Ignored! - Empty Data!')
return
bridge = TBLBridge.objects.get(id=self.bridge_info['id'])
if bridge.is_status == 1:
self.add_cache(f'WS:Send - Ignored! - Out of Funds!')
return
new_message = message
if self.prev_file_data:
new_message = common.get_diff_lists(None, self.prev_file_data, message)
if not new_message:
self.add_cache(f'WS:Send - Ignored! - Same Data!')
return
self.add_cache(f'WS:Send - {new_message}')
common.send_ws_message(self.ws_id, {'data': new_message})
bridge.api_calls += 1
bridge.save()
self.prev_file_data = message
except Exception as e:
self.add_cache(f'WS:Send - Exception - {e}')
def add_cache(self, data):
self.trace(data)
if len(self.cache) > admin_config.LOCAL_CACHE_LIMIT:
self.cache.pop(0)
cache_data = {
'date': datetime.utcnow().strftime('%m/%d/%Y, %H:%M:%S'),
'data': data
}
self.cache.append(cache_data)
self.log.write_log(json.dumps(cache_data))
def get_cache(self):
return self.cache
def trace(self, trace_log):
if admin_config.TRACE_MODE:
print(f"{datetime.utcnow()}: {self.bridge_info['name']}_{self.bridge_info['user_id']}: {trace_log}")
| 29.466165 | 112 | 0.588415 |
ebed391c84fa09177eb7bd1a41a43decf14a1d19 | 1,117 | py | Python | tests/resources.py | lukasbindreiter/white-brush | 67e5dcda8043f2d7bba440bcb9a84c2fa85ec9ba | [
"MIT"
] | 3 | 2018-04-27T23:04:04.000Z | 2019-11-01T02:54:45.000Z | tests/resources.py | lukasbindreiter/white-brush | 67e5dcda8043f2d7bba440bcb9a84c2fa85ec9ba | [
"MIT"
] | 33 | 2018-04-29T15:12:29.000Z | 2022-01-19T21:44:19.000Z | tests/resources.py | lukasbindreiter/white-brush | 67e5dcda8043f2d7bba440bcb9a84c2fa85ec9ba | [
"MIT"
] | 2 | 2018-06-16T17:09:25.000Z | 2020-01-14T01:23:34.000Z | import os
from typing import Tuple, Iterator
import numpy as np
from white_brush.io import read_image
def get_test_image() -> Tuple[str, np.ndarray]:
"""
Return the name and the data of the first image in the `test_images`
directory
Usage example:
>>> img_name, img = get_test_image()
>>> img_name
"01.png"
Returns:
name and data of the image
"""
return next(get_test_images())
def get_test_images() -> Iterator[Tuple[str, np.ndarray]]:
"""
Iterate over all images in the `test_images` directory
Usage example:
>>> for img_name, img in get_test_images():
>>> print(img_name)
Returns:
Generator over all images in the `test_images` directory
"""
if os.path.exists("test_images"):
return __get_test__images_from_path__("test_images")
else:
return __get_test__images_from_path__("../test_images")
def __get_test__images_from_path__(path: str):
for image in os.listdir(path):
if image.startswith("."):
continue
yield image, read_image(os.path.join(path, image))
| 22.34 | 72 | 0.656222 |
0ae380aaeb8ec5de4b8fe769802de9caa364ea52 | 1,463 | py | Python | Game_Life/env/lib/python3.7/site-packages/pygame/tests/sysfont_test.py | munoztd0/AI_games | de2a45b1a68b26b21b8efbc140c679b5ff90cb9a | [
"MIT"
] | 1 | 2022-03-03T05:13:14.000Z | 2022-03-03T05:13:14.000Z | Game_Life/env/lib/python3.7/site-packages/pygame/tests/sysfont_test.py | munoztd0/AI_games | de2a45b1a68b26b21b8efbc140c679b5ff90cb9a | [
"MIT"
] | null | null | null | Game_Life/env/lib/python3.7/site-packages/pygame/tests/sysfont_test.py | munoztd0/AI_games | de2a45b1a68b26b21b8efbc140c679b5ff90cb9a | [
"MIT"
] | null | null | null | import unittest
import platform
class SysfontModuleTest(unittest.TestCase):
def test_create_aliases(self):
import pygame.sysfont
pygame.sysfont.initsysfonts()
pygame.sysfont.create_aliases()
self.assertTrue(len(pygame.sysfont.Sysalias) > 0)
def test_initsysfonts(self):
import pygame.sysfont
pygame.sysfont.initsysfonts()
self.assertTrue(len(pygame.sysfont.get_fonts()) > 0)
@unittest.skipIf("Darwin" not in platform.platform(), "Not mac we skip.")
def test_initsysfonts_darwin(self):
import pygame.sysfont
self.assertTrue(len(pygame.sysfont.get_fonts()) > 10)
def test_sysfont(self):
import pygame.font
pygame.font.init()
arial = pygame.font.SysFont("Arial", 40)
self.assertTrue(isinstance(arial, pygame.font.Font))
@unittest.skipIf(
("Darwin" in platform.platform() or "Windows" in platform.platform()),
"Not unix we skip.",
)
def test_initsysfonts_unix(self):
import pygame.sysfont
self.assertTrue(len(pygame.sysfont.get_fonts()) > 0)
@unittest.skipIf("Windows" not in platform.platform(), "Not windows we skip.")
def test_initsysfonts_win32(self):
import pygame.sysfont
self.assertTrue(len(pygame.sysfont.get_fonts()) > 10)
###############################################################################
if __name__ == "__main__":
unittest.main()
| 28.134615 | 82 | 0.628161 |
0137cf6f12cff84a9b196c2559977b4bd185128c | 8,748 | py | Python | tools/dataset_converters/preprocess_with_clip_bbox.py | wusize/mmdetection | c167fb1af78d910d9a8304ad2a2e6ddd32c70281 | [
"Apache-2.0"
] | null | null | null | tools/dataset_converters/preprocess_with_clip_bbox.py | wusize/mmdetection | c167fb1af78d910d9a8304ad2a2e6ddd32c70281 | [
"Apache-2.0"
] | null | null | null | tools/dataset_converters/preprocess_with_clip_bbox.py | wusize/mmdetection | c167fb1af78d910d9a8304ad2a2e6ddd32c70281 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
from mmdet.datasets import build_dataloader, DATASETS
from mmdet.models import clip
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
from mmcv.ops import roi_align
import argparse
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
SEEN_CLASSES = ('truck', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
'bear', 'zebra', 'backpack', 'umbrella', 'tie', 'suitcase', 'frisbee', 'skis', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'cup', 'knife',
'spoon', 'apple', 'sandwich', 'broccoli', 'hot dog', 'pizza', 'donut', 'bed', 'toilet',
'laptop', 'mouse', 'keyboard', 'cell phone', 'microwave', 'toaster',
'sink', 'book', 'vase', 'toothbrush')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_type', type=str, default='CocoCaptionDataset')
parser.add_argument('--prefix', type=str, default='captions')
parser.add_argument('--subset', type=str, default='val')
parser.add_argument('--with_bbox', type=int, default=0)
parser.add_argument('--img_affine', type=bool, default=False)
parser.add_argument('--seen_classes', type=str, default=None)
parser.add_argument('--suffix', type=str, default='coco_caption')
parser.add_argument('--color', default='r')
parser.add_argument('--clip_model', type=str, default='ViT-B/32')
parser.add_argument('--embeddings', type=str, default=None)
args = parser.parse_args()
def generate_label_embeddings(labels_training, labels_testing, model, save_file):
labels_training_prompts = [f'A photo of a {label}' for label in labels_training]
labels_testing_prompts = [f'A photo of a {label}' for label in labels_testing]
embeddings_training = model.encode_text(clip.tokenize(labels_training_prompts).cuda())
embeddings_testing = model.encode_text(clip.tokenize(labels_testing_prompts).cuda())
print(embeddings_training.shape)
torch.save(dict(embeddings_training=embeddings_training,
embeddings_testing=embeddings_testing), save_file)
def visualize_bbox_scores(model, data_loader, save_dir, suffix='all', color='r', embeddings=''):
embeddings = torch.load(embeddings)['embeddings_testing'].cuda()
embeddings = embeddings / embeddings.norm(dim=-1, keepdim=True)
scale = model.visual.input_resolution
with torch.no_grad():
scores = []
logits = []
correct = []
correct_5 = []
for data in tqdm(data_loader):
img = data['img'].data[0].cuda()
gt_label = data['gt_labels'].data[0]
gt_bboxes = data['gt_bboxes'].data[0]
gt_label = torch.cat(gt_label, dim=0).long().cuda()
# text_feature = embeddings[torch.cat(gt_label, dim=0).long()].cuda()
rois = []
for batch_id, bbox in enumerate(gt_bboxes):
roi = bbox.new_ones(bbox.shape[0], 5) * batch_id
roi[:, 1:] = bbox
rois.append(roi)
rois = torch.cat(rois, dim=0).cuda()
img_crops = roi_align(img, rois, (scale, scale), 1.0, 1)
img_feature = model.encode_image(img_crops, True)
score = (img_feature[:, None] * embeddings[None]).sum(-1)
logits.append(score[range(score.shape[0]), gt_label])
score = F.softmax(score, dim=-1)
_, preds = torch.topk(score, k=1, dim=-1)
_, preds_5 = torch.topk(score, k=5, dim=-1)
correct.append((preds[:, 0] == gt_label).float())
correct_5.append((preds_5 == gt_label[:, None]).sum(-1).float())
# print(score)
scores.append(score[range(score.shape[0]), gt_label])
scores = torch.cat(scores)
correct = torch.cat(correct)
correct_5 = torch.cat(correct_5)
logits = torch.cat(logits)
print(scores.mean(), flush=True)
print(correct.mean(), flush=True)
print(logits.mean(), flush=True)
scores = scores.view(-1).cpu().numpy()
correct_5 = correct_5.view(-1).cpu().numpy()
correct = correct.view(-1).cpu().numpy()
logits = logits.view(-1).cpu().numpy()
acc = correct.sum() / len(logits)
acc_5 = correct_5.sum() / len(logits)
print(acc, acc_5)
# plt.subplot(121)
plt.hist(logits, bins=1000, color=color)
plt.title(f'scores_{suffix}')
# plt.subplot(122)
# plt.hist(logits, bins=1000, color=color)
# plt.title(f'acc_{acc}_scores_{logits.mean().item()}_{suffix}')
plt.savefig(os.path.join(save_dir, f'bbox_scores_{suffix}.png'))
plt.close()
def get_dataloader_bbox(data_root, dataset_type, prefix='captions', subset='val',
with_bbox=True, seen_classes=None):
dataset_class = DATASETS.get(dataset_type)
# dataset settings
img_norm_cfg = dict(
mean=[122.7709383, 116.7460125, 104.09373615],
std=[68.5005327, 66.6321579, 70.32316305], to_rgb=True)
load_anns = \
dict(type='LoadOpenAnnotations',
with_bbox=False,
with_img_id=False,
with_label=False,
with_unseen=False,
with_ann_id=False)
meta_keys = ['filename', 'caption']
if with_bbox:
load_anns.update(dict(
with_bbox=True,
with_img_id=True,
with_label=True,
with_unseen=True,
with_ann_id=True))
meta_keys.extend(['gt_bboxes', 'gt_labels', 'ann_ids', 'img_id'])
resize = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=['filename'])]
dataset_cfg = dict(
ann_file=data_root + f'annotations/{prefix}_{subset}2017.json',
img_prefix=data_root + f'{subset}2017/',
pipeline=pipeline,
test_mode=False)
if seen_classes is not None:
dataset_cfg.update(dict(seen_classes=seen_classes))
dataset = dataset_class(**dataset_cfg)
return build_dataloader(dataset,
seed=None, dist=False, shuffle=False,
drop_last=False, workers_per_gpu=4,
samples_per_gpu=32)
if __name__ == '__main__':
device = "cuda" if torch.cuda.is_available() else "cpu"
model, _ = clip.load(args.clip_model, device=device, download_root='models', use_text_encoder=False)
model.init_weights()
model.eval()
input_scale = model.visual.input_resolution
print(input_scale)
data_root = r'data/coco/'
save_dir = r'data/coco'
os.makedirs(save_dir, exist_ok=True)
# dataset_type = 'CocoOpenDataset'
# prefix = 'instances'
dataset_type = args.dataset_type
prefix = args.prefix
subset = args.subset
with_bbox = args.with_bbox
seen_classes = args.seen_classes
img_affine = args.img_affine
suffix = args.suffix
color = args.color
data_loader = get_dataloader_bbox(data_root, dataset_type, prefix=prefix,
subset=subset, with_bbox=with_bbox,
seen_classes=seen_classes)
visualize_bbox_scores(model, data_loader, save_dir, suffix=suffix, color=color, embeddings=args.embeddings)
# generate_label_embeddings(SEEN_CLASSES, CLASSES, model,
# os.path.join(save_dir, 'class_embeddings_vitB16.pt'))
| 43.093596 | 112 | 0.616141 |
53bdf2b662c02b991e5c25a7af758dce7940dc6f | 10,249 | py | Python | aerich/cli.py | yusukefs/aerich | 919d56c936a45fab57ad32ee01b1631177bca235 | [
"Apache-2.0"
] | null | null | null | aerich/cli.py | yusukefs/aerich | 919d56c936a45fab57ad32ee01b1631177bca235 | [
"Apache-2.0"
] | null | null | null | aerich/cli.py | yusukefs/aerich | 919d56c936a45fab57ad32ee01b1631177bca235 | [
"Apache-2.0"
] | null | null | null | import asyncio
import os
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import List
import click
from click import Context, UsageError
from tortoise import Tortoise, generate_schema_for_client
from tortoise.exceptions import OperationalError
from tortoise.transactions import in_transaction
from tortoise.utils import get_schema_sql
from aerich.inspectdb import InspectDb
from aerich.migrate import Migrate
from aerich.utils import (
add_src_path,
get_app_connection,
get_app_connection_name,
get_models_describe,
get_tortoise_config,
get_version_content_from_file,
write_version_file,
)
from . import __version__
from .enums import Color
from .models import Aerich
parser = ConfigParser()
def coro(f):
@wraps(f)
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
# Close db connections at the end of all all but the cli group function
try:
loop.run_until_complete(f(*args, **kwargs))
finally:
if f.__name__ != "cli":
loop.run_until_complete(Tortoise.close_connections())
return wrapper
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option(__version__, "-V", "--version")
@click.option(
"-c", "--config", default="aerich.ini", show_default=True, help="Config file.",
)
@click.option("--app", required=False, help="Tortoise-ORM app name.")
@click.option(
"-n",
"--name",
default="aerich",
show_default=True,
help="Name of section in .ini file to use for aerich config.",
)
@click.pass_context
@coro
async def cli(ctx: Context, config, app, name):
ctx.ensure_object(dict)
ctx.obj["config_file"] = config
ctx.obj["name"] = name
invoked_subcommand = ctx.invoked_subcommand
if invoked_subcommand != "init":
if not Path(config).exists():
raise UsageError("You must exec init first", ctx=ctx)
parser.read(config)
location = parser[name]["location"]
tortoise_orm = parser[name]["tortoise_orm"]
src_folder = parser[name]["src_folder"]
# Add specified source folder to path
add_src_path(src_folder)
tortoise_config = get_tortoise_config(ctx, tortoise_orm)
app = app or list(tortoise_config.get("apps").keys())[0]
ctx.obj["config"] = tortoise_config
ctx.obj["location"] = location
ctx.obj["app"] = app
Migrate.app = app
if invoked_subcommand != "init-db":
if not Path(location, app).exists():
raise UsageError("You must exec init-db first", ctx=ctx)
await Migrate.init(tortoise_config, app, location)
@cli.command(help="Generate migrate changes file.")
@click.option("--name", default="update", show_default=True, help="Migrate name.")
@click.pass_context
@coro
async def migrate(ctx: Context, name):
ret = await Migrate.migrate(name)
if not ret:
return click.secho("No changes detected", fg=Color.yellow)
click.secho(f"Success migrate {ret}", fg=Color.green)
@cli.command(help="Upgrade to specified version.")
@click.pass_context
@coro
async def upgrade(ctx: Context):
config = ctx.obj["config"]
app = ctx.obj["app"]
migrated = False
for version_file in Migrate.get_all_version_files():
try:
exists = await Aerich.exists(version=version_file, app=app)
except OperationalError:
exists = False
if not exists:
async with in_transaction(get_app_connection_name(config, app)) as conn:
file_path = Path(Migrate.migrate_location, version_file)
content = get_version_content_from_file(file_path)
upgrade_query_list = content.get("upgrade")
for upgrade_query in upgrade_query_list:
await conn.execute_script(upgrade_query)
await Aerich.create(
version=version_file, app=app, content=get_models_describe(app),
)
click.secho(f"Success upgrade {version_file}", fg=Color.green)
migrated = True
if not migrated:
click.secho("No upgrade items found", fg=Color.yellow)
@cli.command(help="Downgrade to specified version.")
@click.option(
"-v",
"--version",
default=-1,
type=int,
show_default=True,
help="Specified version, default to last.",
)
@click.option(
"-d",
"--delete",
is_flag=True,
default=False,
show_default=True,
help="Delete version files at the same time.",
)
@click.pass_context
@click.confirmation_option(
prompt="Downgrade is dangerous, which maybe lose your data, are you sure?",
)
@coro
async def downgrade(ctx: Context, version: int, delete: bool):
app = ctx.obj["app"]
config = ctx.obj["config"]
if version == -1:
specified_version = await Migrate.get_last_version()
else:
specified_version = await Aerich.filter(app=app, version__startswith=f"{version}_").first()
if not specified_version:
return click.secho("No specified version found", fg=Color.yellow)
if version == -1:
versions = [specified_version]
else:
versions = await Aerich.filter(app=app, pk__gte=specified_version.pk)
for version in versions:
file = version.version
async with in_transaction(get_app_connection_name(config, app)) as conn:
file_path = Path(Migrate.migrate_location, file)
content = get_version_content_from_file(file_path)
downgrade_query_list = content.get("downgrade")
if not downgrade_query_list:
click.secho("No downgrade items found", fg=Color.yellow)
return
for downgrade_query in downgrade_query_list:
await conn.execute_query(downgrade_query)
await version.delete()
if delete:
os.unlink(file_path)
click.secho(f"Success downgrade {file}", fg=Color.green)
@cli.command(help="Show current available heads in migrate location.")
@click.pass_context
@coro
async def heads(ctx: Context):
app = ctx.obj["app"]
versions = Migrate.get_all_version_files()
is_heads = False
for version in versions:
if not await Aerich.exists(version=version, app=app):
click.secho(version, fg=Color.green)
is_heads = True
if not is_heads:
click.secho("No available heads,try migrate first", fg=Color.green)
@cli.command(help="List all migrate items.")
@click.pass_context
@coro
async def history(ctx: Context):
versions = Migrate.get_all_version_files()
for version in versions:
click.secho(version, fg=Color.green)
if not versions:
click.secho("No history,try migrate", fg=Color.green)
@cli.command(help="Init config file and generate root migrate location.")
@click.option(
"-t",
"--tortoise-orm",
required=True,
help="Tortoise-ORM config module dict variable, like settings.TORTOISE_ORM.",
)
@click.option(
"--location", default="./migrations", show_default=True, help="Migrate store location.",
)
@click.option(
"-s",
"--src_folder",
default=".",
show_default=False,
help="Folder of the source, relative to the project root.",
)
@click.pass_context
@coro
async def init(ctx: Context, tortoise_orm, location, src_folder):
config_file = ctx.obj["config_file"]
name = ctx.obj["name"]
if Path(config_file).exists():
return click.secho("Configuration file already created", fg=Color.yellow)
if os.path.isabs(src_folder):
src_folder = os.path.relpath(os.getcwd(), src_folder)
# Add ./ so it's clear that this is relative path
if not src_folder.startswith("./"):
src_folder = "./" + src_folder
# check that we can find the configuration, if not we can fail before the config file gets created
add_src_path(src_folder)
get_tortoise_config(ctx, tortoise_orm)
parser.add_section(name)
parser.set(name, "tortoise_orm", tortoise_orm)
parser.set(name, "location", location)
parser.set(name, "src_folder", src_folder)
with open(config_file, "w", encoding="utf-8") as f:
parser.write(f)
Path(location).mkdir(parents=True, exist_ok=True)
click.secho(f"Success create migrate location {location}", fg=Color.green)
click.secho(f"Success generate config file {config_file}", fg=Color.green)
@cli.command(help="Generate schema and generate app migrate location.")
@click.option(
"--safe",
type=bool,
default=True,
help="When set to true, creates the table only when it does not already exist.",
show_default=True,
)
@click.pass_context
@coro
async def init_db(ctx: Context, safe):
config = ctx.obj["config"]
location = ctx.obj["location"]
app = ctx.obj["app"]
dirname = Path(location, app)
try:
dirname.mkdir(parents=True)
click.secho(f"Success create app migrate location {dirname}", fg=Color.green)
except FileExistsError:
return click.secho(
f"Inited {app} already, or delete {dirname} and try again.", fg=Color.yellow
)
await Tortoise.init(config=config)
connection = get_app_connection(config, app)
await generate_schema_for_client(connection, safe)
schema = get_schema_sql(connection, safe)
version = await Migrate.generate_version()
await Aerich.create(
version=version, app=app, content=get_models_describe(app),
)
content = {
"upgrade": [schema],
}
write_version_file(Path(dirname, version), content)
click.secho(f'Success generate schema for app "{app}"', fg=Color.green)
@cli.command(help="Introspects the database tables to standard output as TortoiseORM model.")
@click.option(
"-t", "--table", help="Which tables to inspect.", multiple=True, required=False,
)
@click.pass_context
@coro
async def inspectdb(ctx: Context, table: List[str]):
config = ctx.obj["config"]
app = ctx.obj["app"]
connection = get_app_connection(config, app)
inspect = InspectDb(connection, table)
await inspect.inspect()
def main():
cli()
if __name__ == "__main__":
main()
| 31.829193 | 102 | 0.669431 |
790edeae3cebe9f07d21898f2b3d872ed481a19f | 5,793 | py | Python | train.py | stroblme/hqsp-main | add585604912f0dec6d02118d4643435525a8df1 | [
"MIT"
] | null | null | null | train.py | stroblme/hqsp-main | add585604912f0dec6d02118d4643435525a8df1 | [
"MIT"
] | null | null | null | train.py | stroblme/hqsp-main | add585604912f0dec6d02118d4643435525a8df1 | [
"MIT"
] | null | null | null | import sys
sys.path.append("./stqft")
sys.path.append("./qcnn")
import os
#Activate the cuda env
os.environ["LD_LIBRARY_PATH"] = "$LD_LIBRARY_PATH:/usr/local/cuda/lib64/:/usr/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.2/lib64:/usr/local/cuda/targets/x86_64-linux/lib/"
import time
import multiprocessing
import glob
import numpy as np
datasetPath = "/storage/mstrobl/dataset"
featurePath = "/storage/mstrobl/features"
checkpointsPath = "/storage/mstrobl/checkpoints"
modelsPath = "/storage/mstrobl/models"
quantumPath = "/storage/mstrobl/dataQuantum"
waveformPath = "/storage/mstrobl/waveforms"
checkpointsPath = "/storage/mstrobl/checkpoints"
exportPath = "/storage/mstrobl/versioning"
TOPIC = "PrepGenTrain"
batchSize = 28
kernelSize = 2
epochs = 40
portion = 1
PoolSize = int(multiprocessing.cpu_count()*0.6) #be gentle..
# PoolSize = 1 #be gentle..
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--waveform", default = 1, help = "Generate Waveforms")
parser.add_argument("--quantum", default= 1, help = "Generate Quantum Data")
parser.add_argument("--train", default = 1, action='store_true', help = "Fit the model")
parser.add_argument("--checkTree", default = 1, help = "Checks if the working tree is dirty")
args = parser.parse_args()
from stqft.frontend import export
if int(args.checkTree) == 1:
export.checkWorkingTree(exportPath)
print(f"\n\n\n-----------------------\n\n\n")
print(f"Train Time @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
multiprocessing.set_start_method('spawn')
print(f"Running {PoolSize} processes")
datasetFiles = glob.glob(datasetPath + "/**/*.wav", recursive=True)
print(f"Found {len(datasetFiles)} files in the dataset")
exp = export(topic=TOPIC, identifier="dataset", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Dataset {len(datasetFiles)} in {datasetPath}")
exp.setData(export.GENERICDATA, datasetFiles)
exp.doExport()
print(f"\n\n\n-----------------------\n\n\n")
print(f"Generating Waveforms @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
from generateFeatures import gen_features, gen_quantum, reportSettings, samplingRate
from qcnn.small_qsr import labels
if int(args.waveform)==1:
x_train, x_valid, y_train, y_valid = gen_features(labels, datasetPath, featurePath, PoolSize, waveformPath=waveformPath, portion=portion)
else:
print("Loading from disk...")
x_train = np.load(f"{featurePath}/x_train_speech.npy")
x_valid = np.load(f"{featurePath}/x_valid_speech.npy")
y_train = np.load(f"{featurePath}/y_train_speech.npy")
y_valid = np.load(f"{featurePath}/y_valid_speech.npy")
exp = export(topic=TOPIC, identifier="waveformData", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Waveforms generated (T)/ loaded (F): {args.waveform}; Labels used: {labels}; FeaturePath: {featurePath}; PoolSize: {PoolSize}; WaveformPath: {waveformPath}; Portioning: {portion}, SamplingRate: {samplingRate}, {reportSettings()}")
exp.setData(export.GENERICDATA, {"x_train":x_train, "x_valid":x_valid, "y_train":y_train, "y_valid":y_valid})
exp.doExport()
print(f"\n\n\n-----------------------\n\n\n")
print(f"Generating Quantum Data @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
# disable quanv and pix chan mal
if int(args.quantum)==-2:
q_train = x_train
q_valid = x_valid
# enable quanv
elif int(args.quantum)==1:
q_train, q_valid = gen_quantum(x_train, x_valid, kernelSize, output=quantumPath, poolSize=PoolSize)
# pix chan map
elif int(args.quantum)==-1:
q_train, q_valid = gen_quantum(x_train, x_valid, kernelSize, output=quantumPath, poolSize=PoolSize, quanv=False)
# load from disk
else:
print("Loading from disk...")
q_train = np.load(f"{quantumPath}/quanv_train.npy")
q_valid = np.load(f"{quantumPath}/quanv_valid.npy")
exp = export(topic=TOPIC, identifier="quantumData", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Quantum data generated (T)/ loaded (F): {args.quantum}; FeaturePath: {quantumPath}; PoolSize: {PoolSize};")
exp.setData(export.GENERICDATA, {"q_train":q_train, "q_valid":q_valid})
exp.doExport()
print(f"\n\n\n-----------------------\n\n\n")
print(f"Starting Training @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
from fitModel import fit_model
if args.train:
#if quanv completely disabled and no pix channel map
if int(args.quantum)==-2 or q_train.shape[3]==1:
print("using ablation")
# pass quanv data for training and validation
model, history = fit_model(q_train, y_train, q_valid, y_valid, checkpointsPath, epochs=epochs, batchSize=batchSize, ablation=True)
else:
# pass quanv data for training and validation
model, history = fit_model(q_train, y_train, q_valid, y_valid, checkpointsPath, epochs=epochs, batchSize=batchSize, ablation=False)
data_ix = time.strftime("%Y%m%d_%H%M")
model.save(f"{modelsPath}/model_{time.time()}")
else:
print("Training disabled")
exp = export(topic=TOPIC, identifier="model", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Model trained (T)/ loaded (F): {args.train}; CheckpointsPath: {checkpointsPath}; ModelsPath: {modelsPath}")
exp.setData(export.GENERICDATA, {"history_acc":history.history['accuracy'], "history_val_acc":history.history['val_accuracy'], "history_loss":history.history['loss'], "history_val_loss":history.history['val_loss']})
exp.doExport() | 42.595588 | 268 | 0.669256 |
232e24dd9b77c3ac3bb95d26721dda0ad49e7e9c | 3,330 | py | Python | sm64ex-nightly/tools/util/generate_audiofile_cpp.py | alex-free/sm64ex-creator | e7089df69fb43f266b2165078d94245b33b8e72a | [
"Intel",
"X11",
"Unlicense"
] | 2 | 2022-03-12T08:27:53.000Z | 2022-03-12T18:26:06.000Z | sm64ex-nightly/tools/util/generate_audiofile_cpp.py | alex-free/sm64ex-creator | e7089df69fb43f266b2165078d94245b33b8e72a | [
"Intel",
"X11",
"Unlicense"
] | null | null | null | sm64ex-nightly/tools/util/generate_audiofile_cpp.py | alex-free/sm64ex-creator | e7089df69fb43f266b2165078d94245b33b8e72a | [
"Intel",
"X11",
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import os
import re
import sys
file_list = [
'Features.h',
'Compiler.h',
'error.h',
'extended.h',
'compression.h',
'aupvinternal.h',
'aupvlist.h',
'audiofile.h',
'afinternal.h',
'byteorder.h',
'AudioFormat.h',
'debug.h',
'util.h',
'units.h',
'UUID.h',
'Shared.h',
'Buffer.h',
'File.h',
'FileHandle.h',
'Instrument.h',
'Track.h',
'Marker.h',
'Setup.h',
'Tag.h',
'PacketTable.h',
'pcm.h',
'g711.h',
'af_vfs.h',
'Raw.h',
'WAVE.h',
'SampleVision.h',
'modules/Module.h',
'modules/ModuleState.h',
'modules/SimpleModule.h',
'modules/FileModule.h',
'modules/RebufferModule.h',
'modules/BlockCodec.h',
'modules/BlockCodec.cpp',
'modules/FileModule.cpp',
'modules/G711.h',
'modules/G711.cpp',
'modules/Module.cpp',
'modules/ModuleState.cpp',
'modules/MSADPCM.h',
'modules/MSADPCM.cpp',
'modules/PCM.h',
'modules/PCM.cpp',
'modules/SimpleModule.cpp',
'modules/RebufferModule.cpp',
'AIFF.h',
'AIFF.cpp',
'AudioFormat.cpp',
'Buffer.cpp',
'File.cpp',
'FileHandle.cpp',
'Instrument.cpp',
'Loop.cpp',
'Marker.cpp',
'Miscellaneous.cpp',
'PacketTable.cpp',
'Raw.cpp',
'Setup.cpp',
'Track.cpp',
'UUID.cpp',
'WAVE.cpp',
'aes.cpp',
'af_vfs.cpp',
'aupv.c',
'compression.cpp',
'data.cpp',
'debug.cpp',
'error.c',
'extended.c',
'format.cpp',
'g711.c',
'openclose.cpp',
'pcm.cpp',
'query.cpp',
'units.cpp',
'util.cpp',
]
file_header = \
"""// libaudiofile b62c902
// https://github.com/mpruett/audiofile
// To simplify compilation, all files have been concatenated into one.
// Support for all formats except WAVE, AIFF(C) and RAW has been stripped out.
"""
prepend_defs = \
"""#define HAVE_UNISTD_H 1
#if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
#endif
#include <stdlib.h>
"""
def banned(line):
return '#pragma once' in line or '#include "' in line or '#include <config.h>' in line
def cat_file(fout, fin_name):
with open(fin_name) as fin:
lines = fin.readlines()
lines = [l.rstrip() for l in lines if not banned(l)]
for l in lines:
fout.write(l + '\n')
fout.write('\n')
def combine_libaudiofile(fout_name, libaudiofile_path):
with open(fout_name, 'w') as fout:
fout.write(file_header + "\n")
fout.write("/*\n")
cat_file(fout, os.path.join(libaudiofile_path, '../COPYING'))
fout.write("*/\n\n")
fout.write(prepend_defs + "\n")
for f in file_list:
fout.write(f"// file: {f}\n")
cat_file(fout, os.path.join(libaudiofile_path, f))
def main():
if len(sys.argv) > 1 and sys.argv[1] in ['-h', '--help']:
print('Usage: generate_audiofile_cpp.py [output_filename] [libaudiofile_src_dir]')
print('Defaults: [output_filename = "audiofile.cpp"] [libaudiofile_src_dir = "./audiofile/libaudiofile"]')
return
fout_name = sys.argv[1] if len(sys.argv) > 1 else 'audiofile.cpp'
libaudiofile_path = sys.argv[2] if len(sys.argv) > 2 else './audiofile/libaudiofile'
combine_libaudiofile(fout_name, os.path.expanduser(libaudiofile_path))
main()
| 24.306569 | 114 | 0.592793 |
aac1f58291f84ce1b9db5fa70e3881ba8daad42b | 1,322 | py | Python | app/tests/api/test_forms.py | hndrewaall/league | 13737b4d0c6c813dbf125db8a57f48e5b3acd8fa | [
"MIT"
] | 4 | 2017-01-26T17:51:16.000Z | 2021-06-05T14:26:22.000Z | app/tests/api/test_forms.py | hndrewaall/league | 13737b4d0c6c813dbf125db8a57f48e5b3acd8fa | [
"MIT"
] | 190 | 2016-11-27T19:34:23.000Z | 2020-02-10T17:17:39.000Z | app/tests/api/test_forms.py | hndrewaall/league | 13737b4d0c6c813dbf125db8a57f48e5b3acd8fa | [
"MIT"
] | 14 | 2016-11-27T18:34:03.000Z | 2021-10-09T16:04:26.000Z | # -*- coding: utf-8 -*-
"""Test forms."""
import pytest
from league.dashboard.forms import GameCreateForm
class TestGameCreateForm:
"""Game create form."""
@pytest.mark.parametrize('winner', ['white', 'black'])
@pytest.mark.parametrize('handicap', [0, 8])
@pytest.mark.parametrize('komi', [0, 7])
@pytest.mark.parametrize('season', [1])
@pytest.mark.parametrize('episode', [1])
def test_validate_success(self, players, winner, handicap, komi, season,
episode, season_choices, episode_choices):
"""Create a valid game."""
form = GameCreateForm(white_id=players[0].id,
black_id=players[1].id,
winner=winner,
handicap=handicap,
komi=komi,
season=season,
episode=episode)
player_choices = [(player.id, player.full_name) for player in players]
form.white_id.choices = player_choices
form.black_id.choices = player_choices
form.season.choices = season_choices
form.episode.choices = episode_choices
assert form.validate() is True, ('Validation failed: {}'
''.format(form.errors))
| 38.882353 | 78 | 0.553707 |
790292a54e8eb8cdf5d33f844f48868af3da1b12 | 11,103 | py | Python | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "streamtube.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.streamtube.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.streamtube.hoverlabel.Font
constructor must be a dict or
an instance of plotly_study.graph_objs.streamtube.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.streamtube.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["colorsrc"] = v_font.ColorsrcValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["familysrc"] = v_font.FamilysrcValidator()
self._validators["size"] = v_font.SizeValidator()
self._validators["sizesrc"] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| 34.268519 | 88 | 0.565973 |
75c9f4da20ffb98f77fece5d0c996890435b0ee6 | 2,307 | py | Python | backend/ideapros_llc_pop_st_32583/urls.py | crowdbotics-apps/ideapros-llc-pop-st-32583 | 544441bdbe62fad9416c4a302672c0b994682e33 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/ideapros_llc_pop_st_32583/urls.py | crowdbotics-apps/ideapros-llc-pop-st-32583 | 544441bdbe62fad9416c4a302672c0b994682e33 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/ideapros_llc_pop_st_32583/urls.py | crowdbotics-apps/ideapros-llc-pop-st-32583 | 544441bdbe62fad9416c4a302672c0b994682e33 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """ideapros_llc_pop_st_32583 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "IdeaPros LLC - Pop Studio"
admin.site.site_title = "IdeaPros LLC - Pop Studio Admin Portal"
admin.site.index_title = "IdeaPros LLC - Pop Studio Admin"
# swagger
api_info = openapi.Info(
title="IdeaPros LLC - Pop Studio API",
default_version="v1",
description="API documentation for IdeaPros LLC - Pop Studio App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 36.619048 | 87 | 0.713481 |
890d90b04a0f77e7bfb4de79616a922be9f292a0 | 1,322 | py | Python | h2o-py/tests/testdir_munging/unop/pyunit_scale.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-py/tests/testdir_munging/unop/pyunit_scale.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-py/tests/testdir_munging/unop/pyunit_scale.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def center_scale():
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))[0:4]
# frame (default args)
foo = iris.scale()
# TODO: the below assertion fails. Should it?
#assert abs(foo[0,0] - -0.8976739) < 1e-6 and abs(foo[0,1] - 1.01560199) < 1e-6 and abs(foo[0,2] - -1.335752) < 1e-6 \
# and abs(foo[0,3] - -1.311052) < 1e-6, "h2o differed from r. h2o got {0}, {1}, {2}, and {3}" \
# "".format(foo[0,0],foo[0,1],foo[0,2],foo[0,3])
# frame (centers=True, scale=False)
foo = iris.scale(center=True, scale=False)
# frame (centers=False, scale=True)
foo = iris.scale(center=False, scale=True)
# frame (centers=False, scale=False)
foo = iris.scale(center=False, scale=False)
# vec (default args)
foo = iris[0].scale()
# vec (centers=True, scale=False)
foo = iris[1].scale(center=True, scale=False)
# vec (centers=False, scale=True)
foo = iris[2].scale(center=False, scale=True)
# vec (centers=False, scale=False)
foo = iris[3].scale(center=False, scale=False)
if __name__ == "__main__":
pyunit_utils.standalone_test(center_scale)
else:
center_scale()
| 26.979592 | 123 | 0.599092 |
54b1dedf5899db45ee3264e1bae8bddb8b4048d4 | 2,582 | py | Python | TDS_Image_Proj/code/process_image_dir.py | Tulsa-Data-Science/Playground | 0a2fbc9a321db2a5147959f405bd5af7ad2848c3 | [
"MIT"
] | 3 | 2018-05-07T23:48:36.000Z | 2018-08-30T00:14:37.000Z | TDS_Image_Proj/code/process_image_dir.py | Tulsa-Data-Science/Playground | 0a2fbc9a321db2a5147959f405bd5af7ad2848c3 | [
"MIT"
] | 2 | 2018-04-23T23:52:32.000Z | 2018-06-06T06:03:10.000Z | TDS_Image_Proj/code/process_image_dir.py | Tulsa-Data-Science/Playground | 0a2fbc9a321db2a5147959f405bd5af7ad2848c3 | [
"MIT"
] | 10 | 2018-04-22T21:44:09.000Z | 2018-06-26T00:17:54.000Z | #!/usr/bin/env python
import cv2
import glob
import numpy as np
import os
from PIL import Image
import sys
def processDir(input_dir):
# iterate through the files in the input directory with a '.jpg' extension
# and skip anything with oldformat in its path
for img in glob.iglob(input_dir + '/*/*.jpg'):
if 'oldformat' not in img:
print(img)
# read the image from disk
in_img = cv2.imread(img)
# configure blob detector parameters
params = cv2.SimpleBlobDetector_Params()
# set it up to filter by minimum area of the blob
params.filterByArea = True
params.minArea = 250
# and by minimum circularity so it hopefully gets only circles
params.filterByCircularity = True
params.minCircularity = 0.9
# work around differences between opencv 2 and 3
is_v2 = cv2.__version__.startswith('2.')
if is_v2:
detector = cv2.SimpleBlobDetector(params)
else:
detector = cv2.SimpleBlobDetector_create(params)
# detect the circles
keypoints = detector.detect(in_img)
if len(keypoints) != 4:
print("Warning: found %d keypoints in '%s'" % (len(keypoints), img))
continue
# convert the keypoints to inputs for perspective transformation
inpts = np.float32([[kp.pt[0], kp.pt[1]] for kp in keypoints])
# outputs are fixed size, 600x600 pixels - keypoint order matters!
outpts = np.float32([[600, 600], [0, 600], [600, 0], [0, 0]])
# calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(inpts, outpts)
# do the warp
img_warp = cv2.warpPerspective(in_img, M, (600, 600))
# write out the warped image
out_dir = os.path.join(os.path.dirname(img), 'out')
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
cv2.imwrite(os.path.join(out_dir, os.path.basename(img)), img_warp)
# write out the individual image cells
print("Splitting: ", end="")
cellsize = 100
for col in range(0, 5):
for row in range(0, 5):
print(".", end="")
#print("%d,%d" % (row + 1, col + 1), end=" ")
#print("Processing cell %s, %s" % (row + 1, col + 1))
cell = img_warp[row*cellsize+50:row*cellsize+cellsize+50, col*cellsize+50:col*cellsize+cellsize+50]
cv2.imwrite(os.path.join(out_dir, os.path.basename(img).replace('.jpg', '-%d-%d.jpg' % (row + 1, col + 1))), cell)
print()
def usage():
print("Usage: %s <input directory>" % sys.argv[0])
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sys.exit(1)
input_dir = os.path.abspath(sys.argv[1])
print("Processing images in input directory '%s'" % input_dir)
processDir(input_dir)
| 30.376471 | 119 | 0.672734 |
5f0c050146a72c256ac032a96b88571c08022d64 | 1,772 | py | Python | source/_sample/pyopengl/program_manager.py | showa-yojyo/notebook | 82c15074c24d64a1dfcb70a526bc1deb2ecffe68 | [
"MIT"
] | 14 | 2016-04-13T08:10:02.000Z | 2021-04-19T09:42:51.000Z | source/_sample/pyopengl/program_manager.py | showa-yojyo/note | 5f262ecda3df132cb66206c465d16e174061d6b9 | [
"MIT"
] | 88 | 2017-09-27T15:07:05.000Z | 2019-10-02T04:05:03.000Z | source/_sample/pyopengl/program_manager.py | showa-yojyo/note | 5f262ecda3df132cb66206c465d16e174061d6b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""program_manager.py: Define class ProgramManager.
"""
import OpenGL.GL as GL
class ProgramManager(object):
"""OpenGL shader program manager.
This class managers a program object and its shader objects.
"""
def __init__(self):
"""Initialize an instance of class ProgramManager."""
self.program_id = 0
self.shader_sources = None
self.shader_ids = {}
def setup(self, shader_sources):
"""Setup shaders."""
if not shader_sources:
return
shader_ids = {}
for shader_type, source in shader_sources.items():
shader = GL.glCreateShader(shader_type)
GL.glShaderSource(shader, source)
GL.glCompileShader(shader)
if GL.glGetShaderiv(shader, GL.GL_COMPILE_STATUS) != GL.GL_TRUE:
raise RuntimeError(GL.glGetShaderInfoLog(shader).decode())
shader_ids[shader_type] = shader
self.shader_sources = shader_sources
self.shader_ids = shader_ids
self.program_id = GL.glCreateProgram()
for shader in shader_ids.values():
GL.glAttachShader(self.program_id, shader)
GL.glLinkProgram(self.program_id)
if GL.glGetProgramiv(self.program_id, GL.GL_LINK_STATUS) != GL.GL_TRUE:
raise RuntimeError(GL.glGetProgramInfoLog(self.program_id).decode())
GL.glUseProgram(self.program_id)
def cleanup(self):
"""Clean up shaders and program."""
if not self.shader_sources:
return
GL.glUseProgram(0)
for shader in self.shader_ids.values():
GL.glDetachShader(self.program_id, shader)
GL.glDeleteShader(shader)
GL.glDeleteProgram(self.program_id)
| 30.551724 | 80 | 0.638826 |
e395f22d7ee63ed66e41922858b949d73de6f668 | 12,144 | py | Python | mvn/datasets/human36m.py | QuPengfei/learnable-triangulation-pytorch | 861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2 | [
"MIT"
] | 914 | 2019-05-15T10:54:09.000Z | 2022-03-24T19:20:33.000Z | mvn/datasets/human36m.py | QuPengfei/learnable-triangulation-pytorch | 861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2 | [
"MIT"
] | 129 | 2019-06-08T08:43:42.000Z | 2021-08-03T02:52:12.000Z | mvn/datasets/human36m.py | QuPengfei/learnable-triangulation-pytorch | 861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2 | [
"MIT"
] | 166 | 2019-05-17T03:05:01.000Z | 2022-03-10T18:02:18.000Z | import os
from collections import defaultdict
import pickle
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
from mvn.utils.multiview import Camera
from mvn.utils.img import get_square_bbox, resize_image, crop_image, normalize_image, scale_bbox
from mvn.utils import volumetric
class Human36MMultiViewDataset(Dataset):
"""
Human3.6M for multiview tasks.
"""
def __init__(self,
h36m_root='/Vol1/dbstore/datasets/Human3.6M/processed/',
labels_path='/Vol1/dbstore/datasets/Human3.6M/extra/human36m-multiview-labels-SSDbboxes.npy',
pred_results_path=None,
image_shape=(256, 256),
train=False,
test=False,
retain_every_n_frames_in_test=1,
with_damaged_actions=False,
cuboid_side=2000.0,
scale_bbox=1.5,
norm_image=True,
kind="mpii",
undistort_images=False,
ignore_cameras=[],
crop=True
):
"""
h36m_root:
Path to 'processed/' directory in Human3.6M
labels_path:
Path to 'human36m-multiview-labels.npy' generated by 'generate-labels-npy-multiview.py'
from https://github.sec.samsung.net/RRU8-VIOLET/human36m-preprocessing
retain_every_n_frames_in_test:
By default, there are 159 181 frames in training set and 26 634 in test (val) set.
With this parameter, test set frames will be evenly skipped frames so that the
test set size is `26634 // retain_every_n_frames_test`.
Use a value of 13 to get 2049 frames in test set.
with_damaged_actions:
If `True`, will include 'S9/[Greeting-2,SittingDown-2,Waiting-1]' in test set.
kind:
Keypoint format, 'mpii' or 'human36m'
ignore_cameras:
A list with indices of cameras to exclude (0 to 3 inclusive)
"""
assert train or test, '`Human36MMultiViewDataset` must be constructed with at least ' \
'one of `test=True` / `train=True`'
assert kind in ("mpii", "human36m")
self.h36m_root = h36m_root
self.labels_path = labels_path
self.image_shape = None if image_shape is None else tuple(image_shape)
self.scale_bbox = scale_bbox
self.norm_image = norm_image
self.cuboid_side = cuboid_side
self.kind = kind
self.undistort_images = undistort_images
self.ignore_cameras = ignore_cameras
self.crop = crop
self.labels = np.load(labels_path, allow_pickle=True).item()
n_cameras = len(self.labels['camera_names'])
assert all(camera_idx in range(n_cameras) for camera_idx in self.ignore_cameras)
train_subjects = ['S1', 'S5', 'S6', 'S7', 'S8']
test_subjects = ['S9', 'S11']
train_subjects = list(self.labels['subject_names'].index(x) for x in train_subjects)
test_subjects = list(self.labels['subject_names'].index(x) for x in test_subjects)
indices = []
if train:
mask = np.isin(self.labels['table']['subject_idx'], train_subjects, assume_unique=True)
indices.append(np.nonzero(mask)[0])
if test:
mask = np.isin(self.labels['table']['subject_idx'], test_subjects, assume_unique=True)
if not with_damaged_actions:
mask_S9 = self.labels['table']['subject_idx'] == self.labels['subject_names'].index('S9')
damaged_actions = 'Greeting-2', 'SittingDown-2', 'Waiting-1'
damaged_actions = [self.labels['action_names'].index(x) for x in damaged_actions]
mask_damaged_actions = np.isin(self.labels['table']['action_idx'], damaged_actions)
mask &= ~(mask_S9 & mask_damaged_actions)
indices.append(np.nonzero(mask)[0][::retain_every_n_frames_in_test])
self.labels['table'] = self.labels['table'][np.concatenate(indices)]
self.num_keypoints = 16 if kind == "mpii" else 17
assert self.labels['table']['keypoints'].shape[1] == 17, "Use a newer 'labels' file"
self.keypoints_3d_pred = None
if pred_results_path is not None:
pred_results = np.load(pred_results_path, allow_pickle=True)
keypoints_3d_pred = pred_results['keypoints_3d'][np.argsort(pred_results['indexes'])]
self.keypoints_3d_pred = keypoints_3d_pred[::retain_every_n_frames_in_test]
assert len(self.keypoints_3d_pred) == len(self), \
f"[train={train}, test={test}] {labels_path} has {len(self)} samples, but '{pred_results_path}' " + \
f"has {len(self.keypoints_3d_pred)}. Did you follow all preprocessing instructions carefully?"
def __len__(self):
return len(self.labels['table'])
def __getitem__(self, idx):
sample = defaultdict(list) # return value
shot = self.labels['table'][idx]
subject = self.labels['subject_names'][shot['subject_idx']]
action = self.labels['action_names'][shot['action_idx']]
frame_idx = shot['frame_idx']
for camera_idx, camera_name in enumerate(self.labels['camera_names']):
if camera_idx in self.ignore_cameras:
continue
# load bounding box
bbox = shot['bbox_by_camera_tlbr'][camera_idx][[1,0,3,2]] # TLBR to LTRB
bbox_height = bbox[2] - bbox[0]
if bbox_height == 0:
# convention: if the bbox is empty, then this view is missing
continue
# scale the bounding box
bbox = scale_bbox(bbox, self.scale_bbox)
# load image
image_path = os.path.join(
self.h36m_root, subject, action, 'imageSequence' + '-undistorted' * self.undistort_images,
camera_name, 'img_%06d.jpg' % (frame_idx+1))
assert os.path.isfile(image_path), '%s doesn\'t exist' % image_path
image = cv2.imread(image_path)
# load camera
shot_camera = self.labels['cameras'][shot['subject_idx'], camera_idx]
retval_camera = Camera(shot_camera['R'], shot_camera['t'], shot_camera['K'], shot_camera['dist'], camera_name)
if self.crop:
# crop image
image = crop_image(image, bbox)
retval_camera.update_after_crop(bbox)
if self.image_shape is not None:
# resize
image_shape_before_resize = image.shape[:2]
image = resize_image(image, self.image_shape)
retval_camera.update_after_resize(image_shape_before_resize, self.image_shape)
sample['image_shapes_before_resize'].append(image_shape_before_resize)
if self.norm_image:
image = normalize_image(image)
sample['images'].append(image)
sample['detections'].append(bbox + (1.0,)) # TODO add real confidences
sample['cameras'].append(retval_camera)
sample['proj_matrices'].append(retval_camera.projection)
# 3D keypoints
# add dummy confidences
sample['keypoints_3d'] = np.pad(
shot['keypoints'][:self.num_keypoints],
((0,0), (0,1)), 'constant', constant_values=1.0)
# build cuboid
# base_point = sample['keypoints_3d'][6, :3]
# sides = np.array([self.cuboid_side, self.cuboid_side, self.cuboid_side])
# position = base_point - sides / 2
# sample['cuboids'] = volumetric.Cuboid3D(position, sides)
# save sample's index
sample['indexes'] = idx
if self.keypoints_3d_pred is not None:
sample['pred_keypoints_3d'] = self.keypoints_3d_pred[idx]
sample.default_factory = None
return sample
def evaluate_using_per_pose_error(self, per_pose_error, split_by_subject):
def evaluate_by_actions(self, per_pose_error, mask=None):
if mask is None:
mask = np.ones_like(per_pose_error, dtype=bool)
action_scores = {
'Average': {'total_loss': per_pose_error[mask].sum(), 'frame_count': np.count_nonzero(mask)}
}
for action_idx in range(len(self.labels['action_names'])):
action_mask = (self.labels['table']['action_idx'] == action_idx) & mask
action_per_pose_error = per_pose_error[action_mask]
action_scores[self.labels['action_names'][action_idx]] = {
'total_loss': action_per_pose_error.sum(), 'frame_count': len(action_per_pose_error)
}
action_names_without_trials = \
[name[:-2] for name in self.labels['action_names'] if name.endswith('-1')]
for action_name_without_trial in action_names_without_trials:
combined_score = {'total_loss': 0.0, 'frame_count': 0}
for trial in 1, 2:
action_name = '%s-%d' % (action_name_without_trial, trial)
combined_score['total_loss' ] += action_scores[action_name]['total_loss']
combined_score['frame_count'] += action_scores[action_name]['frame_count']
del action_scores[action_name]
action_scores[action_name_without_trial] = combined_score
for k, v in action_scores.items():
action_scores[k] = float('nan') if v['frame_count'] == 0 else (v['total_loss'] / v['frame_count'])
return action_scores
subject_scores = {
'Average': evaluate_by_actions(self, per_pose_error)
}
for subject_idx in range(len(self.labels['subject_names'])):
subject_mask = self.labels['table']['subject_idx'] == subject_idx
subject_scores[self.labels['subject_names'][subject_idx]] = \
evaluate_by_actions(self, per_pose_error, subject_mask)
return subject_scores
def evaluate(self, keypoints_3d_predicted, split_by_subject=False, transfer_cmu_to_human36m=False, transfer_human36m_to_human36m=False):
keypoints_gt = self.labels['table']['keypoints'][:, :self.num_keypoints]
if keypoints_3d_predicted.shape != keypoints_gt.shape:
raise ValueError(
'`keypoints_3d_predicted` shape should be %s, got %s' % \
(keypoints_gt.shape, keypoints_3d_predicted.shape))
if transfer_cmu_to_human36m or transfer_human36m_to_human36m:
human36m_joints = [10, 11, 15, 14, 1, 4]
if transfer_human36m_to_human36m:
cmu_joints = [10, 11, 15, 14, 1, 4]
else:
cmu_joints = [10, 8, 9, 7, 14, 13]
keypoints_gt = keypoints_gt[:, human36m_joints]
keypoints_3d_predicted = keypoints_3d_predicted[:, cmu_joints]
# mean error per 16/17 joints in mm, for each pose
per_pose_error = np.sqrt(((keypoints_gt - keypoints_3d_predicted) ** 2).sum(2)).mean(1)
# relative mean error per 16/17 joints in mm, for each pose
if not (transfer_cmu_to_human36m or transfer_human36m_to_human36m):
root_index = 6 if self.kind == "mpii" else 6
else:
root_index = 0
keypoints_gt_relative = keypoints_gt - keypoints_gt[:, root_index:root_index + 1, :]
keypoints_3d_predicted_relative = keypoints_3d_predicted - keypoints_3d_predicted[:, root_index:root_index + 1, :]
per_pose_error_relative = np.sqrt(((keypoints_gt_relative - keypoints_3d_predicted_relative) ** 2).sum(2)).mean(1)
result = {
'per_pose_error': self.evaluate_using_per_pose_error(per_pose_error, split_by_subject),
'per_pose_error_relative': self.evaluate_using_per_pose_error(per_pose_error_relative, split_by_subject)
}
return result['per_pose_error_relative']['Average']['Average'], result
| 44.321168 | 140 | 0.615777 |
93d31b57e11df0cbb469a2ea87c01cd62fa1c1d4 | 2,286 | py | Python | stubs/micropython-v1_13-esp32/uos.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_13-esp32/uos.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_13-esp32/uos.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
Module: 'uos' on micropython-v1.13-266-esp32
"""
# MCU: {'ver': 'v1.13-266', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.13.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.13.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '266', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.5.0
from typing import Any
def remove(*args) -> Any:
...
class VfsFat:
""""""
def open(self, *args) -> Any:
...
def remove(self, *args) -> Any:
...
def chdir(self, *args) -> Any:
...
def getcwd(self, *args) -> Any:
...
def ilistdir(self, *args) -> Any:
...
def mkdir(self, *args) -> Any:
...
def mkfs(self, *args) -> Any:
...
def mount(self, *args) -> Any:
...
def rename(self, *args) -> Any:
...
def rmdir(self, *args) -> Any:
...
def stat(self, *args) -> Any:
...
def statvfs(self, *args) -> Any:
...
def umount(self, *args) -> Any:
...
class VfsLfs2:
""""""
def open(self, *args) -> Any:
...
def remove(self, *args) -> Any:
...
def chdir(self, *args) -> Any:
...
def getcwd(self, *args) -> Any:
...
def ilistdir(self, *args) -> Any:
...
def mkdir(self, *args) -> Any:
...
def mkfs(self, *args) -> Any:
...
def mount(self, *args) -> Any:
...
def rename(self, *args) -> Any:
...
def rmdir(self, *args) -> Any:
...
def stat(self, *args) -> Any:
...
def statvfs(self, *args) -> Any:
...
def umount(self, *args) -> Any:
...
def chdir(*args) -> Any:
...
def dupterm(*args) -> Any:
...
def dupterm_notify(*args) -> Any:
...
def getcwd(*args) -> Any:
...
def ilistdir(*args) -> Any:
...
def listdir(*args) -> Any:
...
def mkdir(*args) -> Any:
...
def mount(*args) -> Any:
...
def rename(*args) -> Any:
...
def rmdir(*args) -> Any:
...
def stat(*args) -> Any:
...
def statvfs(*args) -> Any:
...
def umount(*args) -> Any:
...
def uname(*args) -> Any:
...
def urandom(*args) -> Any:
...
| 14.56051 | 294 | 0.451006 |
9243feb24e24bbc02065fe51884824955c6d22ec | 2,067 | py | Python | modules/pytket-cirq/setup.py | isobelhooper/pytket-extensions | 53e1f40844fff29814a599d70a61963c27f094f2 | [
"Apache-2.0"
] | null | null | null | modules/pytket-cirq/setup.py | isobelhooper/pytket-extensions | 53e1f40844fff29814a599d70a61963c27f094f2 | [
"Apache-2.0"
] | null | null | null | modules/pytket-cirq/setup.py | isobelhooper/pytket-extensions | 53e1f40844fff29814a599d70a61963c27f094f2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import os
from setuptools import setup, find_namespace_packages # type: ignore
metadata: dict = {}
with open("_metadata.py") as fp:
exec(fp.read(), metadata)
shutil.copy(
"_metadata.py",
os.path.join("pytket", "extensions", "cirq", "_metadata.py"),
)
setup(
name=metadata["__extension_name__"],
version=metadata["__extension_version__"],
author="Will Simmons",
author_email="will.simmons@cambridgequantum.com",
python_requires=">=3.7",
url="https://github.com/CQCL/pytket-extensions",
description="Extension for pytket, providing translation to and from the Cirq "
"framework",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
license="Apache 2",
packages=find_namespace_packages(include=["pytket.*"]),
include_package_data=True,
install_requires=["pytket ~= 0.11.0", "cirq ~= 0.11.0"],
classifiers=[
"Environment :: Console",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: Other/Proprietary License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
],
zip_safe=False,
)
| 36.263158 | 83 | 0.685051 |
e7927438d5930b243ad6f6a9a3a53bcdc215fe29 | 27,068 | py | Python | QUANTAXIS/QACmd/__init__.py | nuswgg/QUANTAXIS | ccdb6116e1974f3a7c9d0e6635060bfb7f149b88 | [
"MIT"
] | 3 | 2020-10-20T07:48:52.000Z | 2022-02-11T05:47:34.000Z | QUANTAXIS/QACmd/__init__.py | nuswgg/QUANTAXIS | ccdb6116e1974f3a7c9d0e6635060bfb7f149b88 | [
"MIT"
] | null | null | null | QUANTAXIS/QACmd/__init__.py | nuswgg/QUANTAXIS | ccdb6116e1974f3a7c9d0e6635060bfb7f149b88 | [
"MIT"
] | 1 | 2020-03-10T11:01:25.000Z | 2020-03-10T11:01:25.000Z | # encoding: UTF-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cmd
import csv
import os
import shutil
import string
import sys
import platform
import subprocess
import requests
from QUANTAXIS.QACmd.runner import run_backtest, run
from QUANTAXIS.QAApplication.QAAnalysis import QA_backtest_analysis_backtest
from QUANTAXIS.QAUtil import QA_util_log_info, QA_Setting, QA_util_mongo_initial
from QUANTAXIS.QASU.main import (
QA_SU_save_stock_list,
QA_SU_save_stock_min,
QA_SU_save_stock_transaction,
QA_SU_save_index_transaction,
QA_SU_save_single_stock_min,
QA_SU_save_stock_xdxr,
QA_SU_save_stock_block,
QA_SU_save_stock_info,
QA_SU_save_stock_info_tushare,
QA_SU_save_stock_day,
QA_SU_save_single_stock_day,
QA_SU_save_index_day,
QA_SU_save_single_index_day,
QA_SU_save_index_min,
QA_SU_save_single_index_min,
QA_SU_save_future_list,
QA_SU_save_index_list,
QA_SU_save_etf_list,
QA_SU_save_etf_day,
QA_SU_save_single_etf_day,
QA_SU_save_etf_min,
QA_SU_save_single_etf_min,
QA_SU_save_financialfiles,
QA_SU_save_option_50etf_day,
QA_SU_save_option_50etf_min,
QA_SU_save_option_300etf_day,
QA_SU_save_option_300etf_min,
QA_SU_save_option_commodity_day,
QA_SU_save_option_commodity_min,
QA_SU_save_option_contract_list,
QA_SU_save_option_day_all,
QA_SU_save_option_min_all,
QA_SU_save_future_day,
QA_SU_save_future_min,
QA_SU_save_future_min_all,
QA_SU_save_future_day_all,
QA_SU_save_report_calendar_day,
QA_SU_save_report_calendar_his,
QA_SU_save_stock_divyield_day,
QA_SU_save_stock_divyield_his,
QA_SU_save_bond_day,
QA_SU_save_single_bond_day,
QA_SU_save_bond_list,
QA_SU_save_bond_min,
QA_SU_save_single_bond_min
)
from QUANTAXIS.QASU.save_binance import QA_SU_save_binance_symbol, QA_SU_save_binance_1hour, \
QA_SU_save_binance_1day, QA_SU_save_binance_1min, QA_SU_save_binance
from QUANTAXIS.QASU.save_bitfinex import QA_SU_save_bitfinex_symbol, QA_SU_save_bitfinex_1hour, \
QA_SU_save_bitfinex_1day, QA_SU_save_bitfinex_1min, QA_SU_save_bitfinex
from QUANTAXIS.QASU.save_bitmex import QA_SU_save_bitmex_symbol, QA_SU_save_bitmex
from QUANTAXIS.QASU.save_huobi import QA_SU_save_huobi_symbol, QA_SU_save_huobi_1hour, \
QA_SU_save_huobi_1day, QA_SU_save_huobi_1min, QA_SU_save_huobi, QA_SU_save_huobi_realtime
from QUANTAXIS.QASU.save_okex import QA_SU_save_okex_symbol, QA_SU_save_okex_1hour, \
QA_SU_save_okex_1day, QA_SU_save_okex_1min, QA_SU_save_okex
# 东方财富爬虫
from QUANTAXIS.QASU.main import (QA_SU_crawl_eastmoney)
from QUANTAXIS import __version__
class CLI(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = 'QUANTAXIS> ' # 定义命令行提示符
def do_shell(self, arg):
"run a shell commad"
print(">", arg)
sub_cmd = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
print(sub_cmd.communicate()[0])
def do_version(self, arg):
QA_util_log_info(__version__)
def help_version(self):
print("syntax: version [message]",)
print("-- prints a version message")
# @click.command()
# @click.option('--e', default=1, help='Number of greetings.')
def do_examples(self, arg):
QA_util_log_info('QUANTAXIS example')
now_path = os.getcwd()
#project_dir = os.path.dirname(os.path.abspath(__file__))
data = requests.get(
'https://codeload.github.com/quantaxis/QADemo/zip/master'
)
with open("{}{}QADEMO.zip".format(now_path, os.sep), "wb") as code:
code.write(data.content)
QA_util_log_info(
'Successfully generate QADEMO in : {}, for more examples, please visit https://github.com/quantaxis/qademo'
.format(now_path)
)
def help_examples(self):
print('make a sample backtest framework')
def do_download_updatex(self, arg):
now_path = os.getcwd()
data = requests.get(
'https://raw.githubusercontent.com/QUANTAXIS/QUANTAXIS/master/config/update_x.py'
)
with open("{}{}update_x.py".format(now_path, os.sep), "wb") as code:
code.write(data.content)
def do_download_updateall(self, arg):
now_path = os.getcwd()
data = requests.get(
'https://raw.githubusercontent.com/QUANTAXIS/QUANTAXIS/master/config/update_all.py'
)
with open("{}{}update_all.py".format(now_path, os.sep), "wb") as code:
code.write(data.content)
def do_drop_database(self, arg):
QA_util_mongo_initial()
def help_drop_database(self):
print('drop quantaxis\'s databases')
def do_quit(self, arg): # 定义quit命令所执行的操作
sys.exit(1)
def help_quit(self): # 定义quit命令的帮助输出
print("syntax: quit",)
print("-- terminates the application")
def do_clean(self, arg):
try:
if platform.system() == 'Windows':
os.popen('del back*csv')
os.popen('del *log')
else:
os.popen('rm -rf back*csv')
os.popen('rm -rf *log')
except:
pass
def help_clean(self):
QA_util_log_info('Clean the old backtest reports and logs')
def do_exit(self, arg): # 定义quit命令所执行的操作
sys.exit(1)
def help_exit(self):
print('syntax: exit')
print("-- terminates the application")
def print_crawl_usage(self):
print(
"Usage: \n\
----------------------------------------------------------------------------------------------------------------------\n\
⌨️命令格式:crawl eastmoney zjlx 6位股票代码 : 抓取 东方财富 资金流向 ❤️鸣谢❤️ www.eastmoney.com 网页提供数据!\n\
⌨️命令格式:crawl jrj zjlx 6位股票代码 : 抓取 金融界 资金流向 ❤️鸣谢❤️ www.jrj.com.cn 网页提供数据!\n\
⌨️命令格式:crawl 10jqka funds 6位股票代码 : 抓取 同花顺 资金流向 ❤️鸣谢❤️ www.10jqka.com.cn 网页提供数据!\n\
-----------------------------------------------------------------------------------------------------------------------\n\
⌨️命令格式:crawl eastmoney zjlx all : 抓取 东方财富 所有股票资金流向 ❤️鸣谢❤️ www.eastmoney.com 网页提供数据!\n\
⌨️命令格式:crawl jrj zjlx all : 抓取 金融界 所有股票资金流向 ❤️鸣谢❤️ www.jrj.com.cn 网页提供数据!\n\
⌨️命令格式:crawl 10jqka funds all : 抓取 同花顺 所有股票资金流向 ❤️鸣谢❤️ www.10jqka.com.cn 网页提供数据!\n\
-----------------------------------------------------------------------------------------------------------------------\n\
@yutiansut\n\
@QUANTAXIS\n\
请访问 https://book.yutiansut.com/\n\
"
)
def do_crawl(self, arg):
if arg == '':
self.print_crawl_usage()
else:
arg = arg.split(' ')
if len(arg) == 3 and arg[0] == 'eastmoney' and arg[
1] == 'zjlx' and arg[2] != 'all':
print(" 准备抓取东方财富资金流向数据 ")
QA_SU_crawl_eastmoney(action=arg[1], stockCode=arg[2])
elif len(arg) == 3 and arg[0] == 'jrj' and arg[
1] == 'zjlx' and arg[2] != 'all':
print("❌crawl jrj zjlx XXXXXX !没有实现")
elif len(arg) == 3 and arg[0] == '10jqka' and arg[
1] == 'funds' and arg[2] != 'all':
print("❌crawl 10jqka funds XXXXXX !没有实现")
elif len(arg) == 3 and arg[0] == 'eastmoney' and arg[
1] == 'zjlx' and arg[2] == 'all':
#print("❌crawl eastmoney zjlx all !没有实现")
print(" 准备抓取东方财富资金流向数据 ")
QA_SU_crawl_eastmoney(action=arg[1], stockCode=arg[2])
elif len(arg) == 3 and arg[0] == 'jrj' and arg[1] == 'zjlx' and arg[
2] == 'all':
print("❌crawl jrj zjlx all !没有实现")
elif len(arg) == 3 and arg[0] == '10jqka' and arg[
1] == 'funds' and arg[2] == 'all':
print("❌crawl 10jqka funds all !没有实现")
else:
print("❌crawl 命令格式不正确!")
self.print_crawl_usage()
def print_save_usage(self):
print(
"Usage: \n\
命令格式:save all : save stock_day/xdxr/ index_day/ stock_list/index_list \n\
命令格式:save X|x : save stock_day/xdxr/min index_day/min etf_day/min stock_list/index_list/block \n\
命令格式:save day : save stock_day/xdxr index_day etf_day stock_list/index_list \n\
命令格式:save min : save stock_min/xdxr index_min etf_min stock_list/index_list \n\
命令格式: save future: save future_day/min/list \n\
命令格式: save ox: save option_contract_list/option_day/option_min/option_commodity_day/option_commodity_min \n\
命令格式: save transaction: save stock_transaction and index_transaction (Warning: Large Disk Space Required) \n\
------------------------------------------------------------ \n\
命令格式:save stock_day : 保存日线数据 \n\
命令格式:save single_stock_day : 保存单个股票日线数据 \n\
命令格式:save stock_xdxr : 保存日除权除息数据 \n\
命令格式:save stock_min : 保存分钟线数据 \n\
命令格式:save single_stock_min : 保存单个股票分钟线数据 \n\
命令格式:save index_day : 保存指数日线数据 \n\
命令格式:save index_min : 保存指数分钟线数据 \n\
命令格式:save single_index_min : 保存单个指数分钟线数据 \n\
命令格式:save future_day : 保存期货日线数据 \n\
命令格式:save future_min : 保存期货分钟线数据 \n\
命令格式:save etf_day : 保存ETF日线数据 \n\
命令格式:save single_etf_day : 保存单个ETF日线数据 \n\
命令格式:save etf_min : 保存ET分钟数据 \n\
命令格式:save stock_list : 保存股票列表 \n\
命令格式:save stock_block: 保存板块 \n\
命令格式:save stock_info : 保存tushare数据接口获取的股票列表 \n\
命令格式:save financialfiles : 保存高级财务数据(自1996年开始) \n\
命令格式:save option_contract_list 保存上市的期权合约信息(不包括已经过期摘牌的合约数据)\n\
命令格式:save 50etf_option_day : 保存上海证券交易所50ETF期权日线数据(不包括已经过期摘牌的数据) \n\
命令格式:save 50etf_option_min : 保存上海证券交易所50ETF期权分钟线数据(不包括已经过期摘牌的数据) \n\
命令格式:save 300etf_option_day : 保存上海证券交易所300ETF期权日线数据(不包括已经过期摘牌的数据) \n\
命令格式:save 300etf_option_min : 保存上海证券交易所300ETF期权分钟线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_commodity_day : 保存商品期权日线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_commodity_min : 保存商品期权分钟线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_day_all : 保存上海证券交易所所有期权日线数据(不包括已经过期摘牌的数据) \n\
命令格式:save option_min_all : 保存上海证券交易所所有期权分钟数据(不包括已经过期摘牌的数据) \n\
命令格式:save index_list : 保存指数列表 \n\
命令格式:save etf_list : 保存etf列表 \n\
命令格式:save future_list : 保存期货列表 \n\
命令格式:save bond_day : 保存债券日线数据 \n\
命令格式:save single_bond_day : 保存单个债券日线数据 \n\
命令格式:save bond_min : 保存债券分钟线数据 \n\
命令格式:save single_bond_min : 保存单个债券分钟线数据 \n\
命令格式:save bond_list : 保存债券列表 \n\
命令格式:save bitmex : 保存bitmex交易所日线\现货交易对小时线数据 \n\
命令格式:save binance : 保存币安交易所数据 \n\
命令格式:save binance all : 一次性保存币安交易所日/小时/30/15/5/1分钟线数据(耗时很长) \n\
命令格式:save binance 1day/1hour/1min : 单独保存币安交易所日/小时/分钟数据 \n\
命令格式:save bitfinex : 保存bitfinex交易所数据 \n\
命令格式:save bitfinex all : 一次性保存bitfinex交易所日/小时/30/15/5/1分钟线数据(耗时很长) \n\
命令格式:save bitfinex 1day/1hour/1min : 单独保存bitfinex交易所日/小时/分钟数据 \n\
命令格式:save huobi : 保存火币Pro交易所日/小时/分钟现货交易对数据 \n\
命令格式:save huobi all : 一次性保存火币Pro交易所日/小时/30/15/5/1分钟线数据(耗时很长) \n\
命令格式:save huobi 1day/1hour/1min/5min/15min/30min : 单独保存火币Pro交易所日/小时/分钟线数据 \n\
命令格式:save huobi realtime : 接收火币Pro交易所实时行情(仅排名前30的主要币种)\n\
命令格式:save okex : 保存OKEx交易所数据 \n\
命令格式:save okex all : 一次性保存OKEx交易所日/小时/30/15/5/1分钟线数据(耗时很长) \n\
命令格式:save okex 86400/3600/1800/900/300/60 : 单独保存OKEx交易所日/小时/30/15/5/1分钟数据 \n\
----------------------------------------------------------\n\
if you just want to save daily data just\n\
save all+ save stock_block+save stock_info, it about 1G data \n\
if you want to save save the fully data including min level \n\
save x + save stock_info \n \n\
@yutiansut\n\
@QUANTAXIS\n\
请访问 https://book.yutiansut.com/\n\
"
)
def do_save(self, arg):
# 仅仅是为了初始化才在这里插入用户,如果想要注册用户,要到webkit底下注册
if arg == '':
self.print_save_usage()
else:
arg = arg.split(' ')
if len(arg) == 1 and arg[0] == 'all':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
# TODO: 将ts还是tdx作为命令传入
# QA_SU_save_stock_day('ts')
QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
# QA_SU_save_stock_min('tdx')
QA_SU_save_index_day('tdx')
# QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
# QA_SU_save_etf_day('tdx')
# QA_SU_save_etf_min('tdx')
QA_SU_save_index_list('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_stock_block('tdx')
# QA_SU_save_stock_info('tdx')
# QA_SU_save_report_calendar_his()
# QA_SU_save_stock_divyield_his()
elif len(arg) == 1 and arg[0] == 'day':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
# QA_SU_save_stock_min('tdx')
QA_SU_save_index_day('tdx')
# QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
QA_SU_save_etf_day('tdx')
# QA_SU_save_etf_min('tdx')
QA_SU_save_index_list('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_stock_block('tdx')
# QA_SU_save_stock_divyield_day()
# QA_SU_save_report_calendar_day()
elif len(arg) == 1 and arg[0] == 'min':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
# QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
QA_SU_save_stock_min('tdx')
# QA_SU_save_index_day('tdx')
QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
# QA_SU_save_etf_day('tdx')
QA_SU_save_etf_min('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_index_list('tdx')
# QA_SU_save_stock_block('tdx')
elif len(arg) == 1 and arg[0] == 'transaction':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
QA_SU_save_index_transaction('tdx')
QA_SU_save_stock_transaction('tdx')
# QA_SU_save_stock_day('tdx')
# QA_SU_save_stock_xdxr('tdx')
# QA_SU_save_stock_min('tdx')
# QA_SU_save_index_day('tdx')
# QA_SU_save_index_min('tdx')
# QA_SU_save_etf_list('tdx')
# QA_SU_save_etf_day('tdx')
# QA_SU_save_etf_min('tdx')
# QA_SU_save_stock_list('tdx')
# QA_SU_save_index_list('tdx')
# QA_SU_save_stock_block('tdx')
elif len(arg) == 1 and arg[0] in ['X', 'x']:
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
QA_SU_save_stock_min('tdx')
QA_SU_save_index_day('tdx')
QA_SU_save_index_min('tdx')
QA_SU_save_etf_list('tdx')
QA_SU_save_etf_day('tdx')
QA_SU_save_etf_min('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_index_list('tdx')
QA_SU_save_stock_block('tdx')
QA_SU_save_future_list('tdx')
# QA_SU_save_stock_info('tdx')
elif len(arg) == 1 and arg[0] == "binance":
QA_SU_save_binance_symbol()
QA_SU_save_binance_1day()
QA_SU_save_binance_1hour()
QA_SU_save_binance_1min()
elif len(arg) == 2 and arg[0] == "binance":
if (arg[1] == 'all'):
QA_SU_save_binance_symbol()
QA_SU_save_binance_1day()
QA_SU_save_binance_1hour()
QA_SU_save_binance('30m')
QA_SU_save_binance('15m')
QA_SU_save_binance('5m')
QA_SU_save_binance_1min()
else:
frequency = arg[1]
QA_SU_save_binance(frequency)
elif len(arg) == 1 and arg[0] == "bitfinex":
QA_SU_save_bitfinex_symbol()
QA_SU_save_bitfinex_1day()
QA_SU_save_bitfinex_1hour()
QA_SU_save_bitfinex_1min()
elif len(arg) == 2 and arg[0] == "bitfinex":
if (arg[1] == 'all'):
QA_SU_save_bitfinex_symbol()
QA_SU_save_bitfinex_1day()
QA_SU_save_bitfinex_1hour()
QA_SU_save_bitfinex('30m')
QA_SU_save_bitfinex('15m')
QA_SU_save_bitfinex('5m')
QA_SU_save_bitfinex_1min()
else:
frequency = arg[1]
QA_SU_save_bitfinex(frequency)
elif len(arg) == 1 and arg[0] == "bitmex":
QA_SU_save_bitmex_symbol()
QA_SU_save_bitmex('1d')
QA_SU_save_bitmex('1h')
QA_SU_save_bitmex('1m')
elif len(arg) == 1 and arg[0] == "huobi":
QA_SU_save_huobi_symbol()
QA_SU_save_huobi_1day()
QA_SU_save_huobi_1hour()
QA_SU_save_huobi_1min()
elif len(arg) == 2 and arg[0] == "huobi":
if (arg[1] == 'realtime'):
QA_SU_save_huobi_realtime()
elif (arg[1] == 'all'):
QA_SU_save_huobi_symbol()
QA_SU_save_huobi_1day()
QA_SU_save_huobi_1hour()
QA_SU_save_huobi('30min')
QA_SU_save_huobi('15min')
QA_SU_save_huobi('5min')
QA_SU_save_huobi_1min()
else:
frequency = arg[1]
QA_SU_save_huobi(frequency)
elif len(arg) == 1 and arg[0] == "okex":
QA_SU_save_okex_symbol()
QA_SU_save_okex_1day()
QA_SU_save_okex_1hour()
QA_SU_save_okex_1min()
elif len(arg) == 2 and arg[0] == "okex":
if (arg[1] == 'all'):
QA_SU_save_okex_symbol()
QA_SU_save_okex_1day()
QA_SU_save_okex_1hour()
QA_SU_save_okex('1800')
QA_SU_save_okex('900')
QA_SU_save_okex('300')
QA_SU_save_okex_1min()
else:
frequency = arg[1]
QA_SU_save_okex(frequency)
elif len(arg) == 1 and arg[0] == "financialfiles":
QA_SU_save_financialfiles()
elif len(arg) == 1 and arg[0] == "future":
QA_SU_save_future_day('tdx')
QA_SU_save_future_min('tdx')
QA_SU_save_future_list('tdx')
elif len(arg) == 1 and arg[0] == "future_all":
QA_SU_save_future_day_all('tdx')
QA_SU_save_future_min_all('tdx')
QA_SU_save_future_list('tdx')
elif len(arg) == 1 and arg[0] == '50etf_option_day':
QA_SU_save_option_50etf_day('tdx')
elif len(arg) == 1 and arg[0] == '50etf_option_min':
QA_SU_save_option_50etf_min('tdx')
elif len(arg) == 1 and arg[0] == '300etf_option_day':
QA_SU_save_option_300etf_day('tdx')
elif len(arg) == 1 and arg[0] == '300etf_option_min':
QA_SU_save_option_300etf_min('tdx')
elif len(arg) == 1 and arg[0] == 'option_commodity_day':
QA_SU_save_option_commodity_day('tdx')
elif len(arg) == 1 and arg[0] == 'option_commodity_min':
QA_SU_save_option_commodity_min('tdx')
elif len(arg) == 1 and arg[0] in ['ox', 'OX', 'oX', 'Ox']:
QA_SU_save_option_contract_list('tdx')
QA_SU_save_option_50etf_day('tdx')
QA_SU_save_option_50etf_min('tdx')
QA_SU_save_option_300etf_day('tdx')
QA_SU_save_option_300etf_min('tdx')
QA_SU_save_option_commodity_day('tdx')
QA_SU_save_option_commodity_min('tdx')
elif len(arg) == 2 and arg[0] == 'single_stock_day':
QA_SU_save_single_stock_day(arg[1], 'tdx')
elif len(arg) == 2 and arg[0] == 'single_index_day':
QA_SU_save_single_index_day(arg[1], 'tdx')
elif len(arg) == 2 and arg[0] == 'single_etf_day':
QA_SU_save_single_etf_day(arg[1], 'tdx')
elif len(arg) == 2 and arg[0] == 'single_stock_min':
QA_SU_save_single_stock_min(arg[1], 'tdx')
elif len(arg) == 2 and arg[0] == 'single_index_min':
QA_SU_save_single_index_min(arg[1], 'tdx')
elif len(arg) == 2 and arg[0] == 'single_etf_min':
QA_SU_save_single_etf_min(arg[1], 'tdx')
elif len(arg) == 2 and arg[0] == 'single_bond_day':
QA_SU_save_single_bond_day(arg[1], 'tdx')
elif len(arg) == 2 and arg[0] == 'single_bond_min':
QA_SU_save_single_bond_min(arg[1], 'tdx')
else:
for i in arg:
if i == 'insert_user':
if QA_Setting().client.quantaxis.user_list.find(
{'username': 'admin'}).count() == 0:
QA_Setting().client.quantaxis.user_list.insert(
{
'username': 'admin',
'password': 'admin'
}
)
else:
try:
eval("QA_SU_save_%s('tdx')" % (i))
except:
print("❌命令格式不正确!")
self.print_save_usage()
def help_save(self):
QA_util_log_info('Save all the stock data from pytdx')
def do_fn(self, arg):
try:
QA_util_log_info(eval(arg))
except:
print(Exception)
def do_help(self, arg):
QA_util_log_info("Possible commands are:")
QA_util_log_info("save")
QA_util_log_info("clean")
QA_util_log_info("fn")
QA_util_log_info("drop_database")
QA_util_log_info("examples")
QA_util_log_info("shell")
QA_util_log_info("version")
QA_util_log_info("quit")
QA_util_log_info("exit")
QA_util_log_info('MORE EXAMPLE on https://github.com/QUANTAXIS/QADemo')
def help(self):
QA_util_log_info('fn+methods name')
def do_ls(self, arg):
QA_util_log_info(os.path.dirname(os.path.abspath(__file__)))
def sourcecpy(src, des):
src = os.path.normpath(src)
des = os.path.normpath(des)
if not os.path.exists(src) or not os.path.exists(src):
print("folder is not exist")
sys.exit(1)
# 获得原始目录中所有的文件,并拼接每个文件的绝对路径
os.chdir(src)
src_file = [os.path.join(src, file) for file in os.listdir()]
for source in src_file:
# 若是文件
if os.path.isfile(source):
shutil.copy(source, des) # 第一个参数是文件,第二个参数目录
# 若是目录
if os.path.isdir(source):
p, src_name = os.path.split(source)
des = os.path.join(des, src_name)
shutil.copytree(source, des) # 第一个参数是目录,第二个参数也是目录
# 创建CLI实例并运行
def QA_cmd():
cli = CLI()
cli.cmdloop()
| 42.761453 | 134 | 0.549394 |
e540b434096f8f26f82c04b0b20b95f1132384eb | 9,234 | py | Python | notion/markdown.py | esnaultdev/notion-py | a6541fc5ae209885fbcbc7e023b1e68a0f213c96 | [
"MIT"
] | 1 | 2021-04-14T13:57:53.000Z | 2021-04-14T13:57:53.000Z | notion/markdown.py | onyxim/notion-py | afa9baeabdabc58849a6d8b80f3b0df12f7cfa27 | [
"MIT"
] | null | null | null | notion/markdown.py | onyxim/notion-py | afa9baeabdabc58849a6d8b80f3b0df12f7cfa27 | [
"MIT"
] | null | null | null | import commonmark
import re
import html
from xml.dom import minidom
from commonmark.dump import prepare
delimiters = {
"!",
'"',
"#",
"$",
"%",
"&",
"'",
"(",
")",
"*",
"+",
",",
"-",
".",
"/",
":",
";",
"<",
"=",
">",
"?",
"@",
"[",
"\\",
"]",
"^",
"_",
"`",
"{",
"|",
"}",
"~",
"☃",
" ",
"\t",
"\n",
"\x0b",
"\x0c",
"\r",
"\x1c",
"\x1d",
"\x1e",
"\x1f",
"\x85",
"\xa0",
"\u1680",
"\u2000",
"\u2001",
"\u2002",
"\u2003",
"\u2004",
"\u2005",
"\u2006",
"\u2007",
"\u2008",
"\u2009",
"\u200a",
"\u2028",
"\u2029",
"\u202f",
"\u205f",
"\u3000",
}
_NOTION_TO_MARKDOWN_MAPPER = {"i": "☃", "b": "☃☃", "s": "~~", "c": "`", "e": "$$"}
FORMAT_PRECEDENCE = ["s", "b", "i", "a", "c", "e"]
def _extract_text_and_format_from_ast(item):
if item["type"] == "html_inline":
if item.get("literal", "") == "<s>":
return "", ("s",)
if item.get("literal", "").startswith('<latex'):
elem = minidom.parseString(item.get("literal", "") + '</latex>').documentElement
equation = elem.attributes['equation'].value
return "", ("e", equation)
if item["type"] == "emph":
return item.get("literal", ""), ("i",)
if item["type"] == "strong":
return item.get("literal", ""), ("b",)
if item["type"] == "code":
return item.get("literal", ""), ("c",)
if item["type"] == "link":
return item.get("literal", ""), ("a", item.get("destination", "#"))
return item.get("literal", ""), ()
def _get_format(notion_segment, as_set=False):
if len(notion_segment) == 1:
if as_set:
return set()
else:
return []
else:
if as_set:
return set([tuple(f) for f in notion_segment[1]])
else:
return notion_segment[1]
def markdown_to_notion(markdown):
if not isinstance(markdown, str):
markdown = str(markdown)
# commonmark doesn't support strikethrough, so we need to handle it ourselves
while markdown.count("~~") >= 2:
markdown = markdown.replace("~~", "<s>", 1)
markdown = markdown.replace("~~", "</s>", 1)
# commonmark doesn't support latex blocks, so we need to handle it ourselves
def handle_latex(match):
return f'<latex equation="{html.escape(match.group(0)[2:-2])}">\u204d</latex>'
markdown = re.sub(r'(?<!\\\\|\$\$)(?:\\\\)*((\$\$)+)(?!(\$\$))(.+?)(?<!(\$\$))\1(?!(\$\$))', handle_latex, markdown)
# we don't want to touch dashes, so temporarily replace them here
markdown = markdown.replace("-", "⸻")
parser = commonmark.Parser()
ast = prepare(parser.parse(markdown))
format = set()
notion = []
for section in ast:
_, ended_format = _extract_text_and_format_from_ast(section)
if ended_format and ended_format in format:
format.remove(ended_format)
if section["type"] == "paragraph":
notion.append(["\n\n"])
for item in section.get("children", []):
literal, new_format = _extract_text_and_format_from_ast(item)
if new_format:
format.add(new_format)
if item["type"] == "html_inline" and literal == "</s>":
format.remove(("s",))
literal = ""
if item["type"] == "html_inline" and literal == "</latex>":
for f in filter(lambda f: f[0] == 'e', format):
format.remove(f)
break
literal = ""
if item["type"] == "softbreak":
literal = "\n"
if literal:
notion.append(
[literal, [list(f) for f in sorted(format)]]
if format
else [literal]
)
# in the ast format, code blocks are meant to be immediately self-closing
if ("c",) in format:
format.remove(("c",))
# remove any trailing newlines from automatic closing paragraph markers
if notion:
notion[-1][0] = notion[-1][0].rstrip("\n")
# consolidate any adjacent text blocks with identical styles
consolidated = []
for item in notion:
if consolidated and _get_format(consolidated[-1], as_set=True) == _get_format(
item, as_set=True
):
consolidated[-1][0] += item[0]
elif item[0]:
consolidated.append(item)
return cleanup_dashes(consolidated)
def cleanup_dashes(thing):
regex_pattern = re.compile('⸻|%E2%B8%BB')
if type(thing) is list:
for counter, value in enumerate(thing):
thing[counter] = cleanup_dashes(value)
elif type(thing) is str:
return regex_pattern.sub('-', thing)
return thing
def notion_to_markdown(notion):
markdown_chunks = []
use_underscores = True
for item in notion or []:
markdown = ""
text = item[0]
format = item[1] if len(item) == 2 else []
match = re.match(
"^(?P<leading>\s*)(?P<stripped>(\s|.)*?)(?P<trailing>\s*)$", text
)
if not match:
raise Exception("Unable to extract text from: %r" % text)
leading_whitespace = match.groupdict()["leading"]
stripped = match.groupdict()["stripped"]
trailing_whitespace = match.groupdict()["trailing"]
markdown += leading_whitespace
sorted_format = sorted(
format,
key=lambda x: FORMAT_PRECEDENCE.index(x[0])
if x[0] in FORMAT_PRECEDENCE
else -1,
)
for f in sorted_format:
if f[0] in _NOTION_TO_MARKDOWN_MAPPER:
if stripped:
markdown += _NOTION_TO_MARKDOWN_MAPPER[f[0]]
if f[0] == "a":
markdown += "["
# Check wheter a format modifies the content
content_changed = False
for f in sorted_format:
if f[0] == 'e':
markdown += f[1]
content_changed = True
if not content_changed:
markdown += stripped
for f in reversed(sorted_format):
if f[0] in _NOTION_TO_MARKDOWN_MAPPER:
if stripped:
markdown += _NOTION_TO_MARKDOWN_MAPPER[f[0]]
if f[0] == "a":
markdown += "]({})".format(f[1])
markdown += trailing_whitespace
# to make it parseable, add a space after if it combines code/links and emphasis formatting
format_types = [f[0] for f in format]
if (
("c" in format_types or "a" in format_types)
and ("b" in format_types or "i" in format_types)
and not trailing_whitespace
):
markdown += " "
markdown_chunks.append(markdown)
# use underscores as needed to separate adjacent chunks to avoid ambiguous runs of asterisks
full_markdown = ""
last_used_underscores = False
for i in range(len(markdown_chunks)):
prev = markdown_chunks[i - 1] if i > 0 else ""
curr = markdown_chunks[i]
next = markdown_chunks[i + 1] if i < len(markdown_chunks) - 1 else ""
prev_ended_in_delimiter = not prev or prev[-1] in delimiters
next_starts_with_delimiter = not next or next[0] in delimiters
if (
prev_ended_in_delimiter
and next_starts_with_delimiter
and not last_used_underscores
and curr.startswith("☃")
and curr.endswith("☃")
):
if curr[1] == "☃":
count = 2
else:
count = 1
curr = "_" * count + curr[count:-count] + "_" * count
last_used_underscores = True
else:
last_used_underscores = False
final_markdown = curr.replace("☃", "*")
# to make it parseable, convert emphasis/strong combinations to use a mix of _ and *
if "***" in final_markdown:
final_markdown = final_markdown.replace("***", "**_", 1)
final_markdown = final_markdown.replace("***", "_**", 1)
full_markdown += final_markdown
return full_markdown
def notion_to_plaintext(notion, client=None):
plaintext = ""
for item in notion or []:
text = item[0]
formats = item[1] if len(item) == 2 else []
if text == "‣":
for f in formats:
if f[0] == "p": # page link
if client is None:
plaintext += "page:" + f[1]
else:
plaintext += client.get_block(f[1]).title_plaintext
elif f[0] == "u": # user link
if client is None:
plaintext += "user:" + f[1]
else:
plaintext += client.get_user(f[1]).full_name
continue
plaintext += text
return plaintext
def plaintext_to_notion(plaintext):
return [[plaintext]]
| 26.610951 | 120 | 0.507797 |
bd2bc694699f6f1209aaa8da21fa1af19f26395a | 1,504 | py | Python | tests/test_map3d.py | zkytony/thortils | 07ddfa6f6d09662094ba39343f89ba124c250e03 | [
"MIT"
] | null | null | null | tests/test_map3d.py | zkytony/thortils | 07ddfa6f6d09662094ba39343f89ba124c250e03 | [
"MIT"
] | null | null | null | tests/test_map3d.py | zkytony/thortils | 07ddfa6f6d09662094ba39343f89ba124c250e03 | [
"MIT"
] | null | null | null | import time
import thortils as tt
from thortils import constants
from thortils.controller import launch_controller, thor_controller_param
from thortils.map3d import Map3D, Mapper3D
from thortils.utils.visual import GridMapVisualizer
from thortils.agent import thor_reachable_positions
def test_mapper(scene, floor_cut=0.1):
controller = launch_controller({**constants.CONFIG, **{'scene': scene}})
mapper = Mapper3D(controller)
mapper.automate(num_stops=20, sep=1.5)
grid_map = mapper.get_grid_map(floor_cut=floor_cut, debug=False)
# Visualize reachable positions obtained from controller
reachable_positions = thor_reachable_positions(controller)
highlights = []
for thor_pos in reachable_positions:
highlights.append(grid_map.to_grid_pos(*thor_pos))
# show grid map
viz = GridMapVisualizer(grid_map=grid_map, res=30)
img = viz.render()
img = viz.highlight(img, highlights,
color=(25, 214, 224), show_progress=True)
viz.show_img(img)
time.sleep(5)
viz.on_cleanup()
controller.stop()
if __name__ == "__main__":
test_mapper("FloorPlan2")
test_mapper("FloorPlan1")
test_mapper("FloorPlan3")
test_mapper("FloorPlan4")
test_mapper("FloorPlan201", floor_cut=0.3)
test_mapper("FloorPlan202")
test_mapper("FloorPlan301")
test_mapper("FloorPlan302")
test_mapper("FloorPlan303")
test_mapper("FloorPlan401")
test_mapper("FloorPlan402")
test_mapper("FloorPlan403")
| 32 | 76 | 0.729388 |
46001615dcae8007152b34c028b27d63999abc85 | 2,509 | py | Python | icon_validator/rules/plugin_validators/icon_validator.py | rapid7/icon-integrations-validators | 673e588f8c6aa02bdb6c5e82556fdc59fe3a7280 | [
"MIT"
] | 6 | 2020-11-10T03:07:00.000Z | 2022-02-24T18:07:57.000Z | icon_validator/rules/plugin_validators/icon_validator.py | rapid7/icon-integrations-validators | 673e588f8c6aa02bdb6c5e82556fdc59fe3a7280 | [
"MIT"
] | 17 | 2020-01-21T16:02:04.000Z | 2022-01-12T15:11:26.000Z | icon_validator/rules/plugin_validators/icon_validator.py | rapid7/icon-integrations-validators | 673e588f8c6aa02bdb6c5e82556fdc59fe3a7280 | [
"MIT"
] | 2 | 2020-12-26T11:33:23.000Z | 2021-09-30T22:22:43.000Z | import os
from pathlib import Path
import filetype
from icon_validator.rules.validator import KomandPluginValidator
from icon_validator.exceptions import ValidationException
class IconValidator(KomandPluginValidator):
def validate(self, plugin_spec):
"""Base64 matches icon file valid base64, <=70kb in size, png"""
IconValidator.check_icon_file_exists(plugin_spec)
IconValidator.check_icon_less_than_equal_70kb(plugin_spec)
IconValidator.check_if_icon_is_png(plugin_spec)
IconValidator.check_if_extension_image_file_exists(plugin_spec)
IconValidator.check_extension_image_file_is_nonzero_size(plugin_spec)
@staticmethod
def check_icon_file_exists(plugin_spec):
directory = plugin_spec.directory
icon_file = directory + "/" + "icon.png"
f = Path(icon_file)
if not f.is_file():
raise ValidationException("icon.png file not included in plugin.")
@staticmethod
def check_icon_less_than_equal_70kb(plugin_spec):
directory = plugin_spec.directory
icon_file = directory + "/" + "icon.png"
info = os.stat(icon_file)
if info.st_size >= 70000:
raise ValidationException(f"Included icon ({info.st_size}) file exceeds maximum size limitation of 70Kb.")
@staticmethod
def check_if_icon_is_png(plugin_spec):
directory = plugin_spec.directory
icon_file = directory + "/" + "icon.png"
kind = filetype.guess(icon_file)
if kind.extension != "png":
raise ValidationException(f"Included icon file ({kind.extension}) is not 'PNG'.")
@staticmethod
def check_if_extension_image_file_exists(plugin_spec):
directory = plugin_spec.directory
extension_image_file = f"{directory}/extension.png"
file_item = Path(extension_image_file)
if not file_item.is_file():
raise ValidationException(
"extension.png file not included in plugin. Please include a color PNG image of a logo for this vendor or product.")
@staticmethod
def check_extension_image_file_is_nonzero_size(plugin_spec):
directory = plugin_spec.directory
extension_image_file = f"{directory}/extension.png"
image_file = os.stat(extension_image_file)
if not image_file.st_size > 0:
raise ValidationException(
"Extension image file is size zero. Please include a color PNG image of a logo for this vendor or product.")
| 38.015152 | 132 | 0.704265 |
e0d24bdd7d77fa38bc7454aae4f74dcd800899c5 | 265 | py | Python | mumbaihackathon_in/mumbai_hackathon/doctype/team_project_url/team_project_url.py | Mumbaikar007/mumbaihackathon_in | 261a4340862cb884dca0a6b0a513da47ba26caa6 | [
"MIT"
] | null | null | null | mumbaihackathon_in/mumbai_hackathon/doctype/team_project_url/team_project_url.py | Mumbaikar007/mumbaihackathon_in | 261a4340862cb884dca0a6b0a513da47ba26caa6 | [
"MIT"
] | null | null | null | mumbaihackathon_in/mumbai_hackathon/doctype/team_project_url/team_project_url.py | Mumbaikar007/mumbaihackathon_in | 261a4340862cb884dca0a6b0a513da47ba26caa6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Neil Lasrado and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class TeamProjectURL(Document):
pass
| 24.090909 | 51 | 0.777358 |
1d12e18406e5d771ad6ce49de3f060b5d87959a1 | 789 | py | Python | var/spack/repos/builtin/packages/r-kknn/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/r-kknn/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/r-kknn/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RKknn(RPackage):
"""Weighted k-Nearest Neighbors for Classification, Regression and
Clustering."""
homepage = "https://cloud.r-project.org/package=kknn"
url = "https://cloud.r-project.org/src/contrib/kknn_1.3.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/kknn"
version('1.3.1', sha256='22840e70ec2afa40371e274b583634c8f6d27149a87253ee411747d5db78f3db')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-igraph@1.0:', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
| 35.863636 | 95 | 0.703422 |
d4721f47df851152506797fb9590416b4212bcb1 | 2,805 | py | Python | houdini/houdini_client/auth_backend.py | TrianglePlusPlus/houdini | 292b1fb395fc34dbefa8f891cc94bb811f5805bb | [
"MIT"
] | 2 | 2017-09-25T00:30:22.000Z | 2021-02-04T22:11:54.000Z | houdini/houdini_client/auth_backend.py | TrianglePlusPlus/houdini | 292b1fb395fc34dbefa8f891cc94bb811f5805bb | [
"MIT"
] | 11 | 2016-12-29T22:05:57.000Z | 2020-06-05T17:23:10.000Z | houdini/houdini_client/auth_backend.py | TrianglePlusPlus/houdini | 292b1fb395fc34dbefa8f891cc94bb811f5805bb | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import check_password
from django.utils import timezone
from datetime import datetime
from enum import Enum
import jwt
import pytz
import requests
User = get_user_model()
def authenticate_jwt(jwt_string, app_secret):
# Check to see if the signature is correct
try:
data = jwt.decode(jwt_string, app_secret)
return data
except jwt.DecodeError:
return None
def is_logged_in(request):
if request.session.get('logged_in_since'):
logged_in_since = datetime.strptime(request.session['logged_in_since'], settings.ISO_8601)
logged_in_since = pytz.utc.localize(logged_in_since)
return (timezone.now() - logged_in_since) < settings.TIME_TO_LIVE
else:
return False
FailureType = Enum('FailureType', 'server_failure local_failure')
class RemoteServerAuthBackend(ModelBackend):
def authenticate(self, email=None, password=None, response=None):
if response is None:
response = {}
# make a JWT jwt_string of data signed with app_secret
jwt_string = jwt.encode({
"email": email,
"password": password
}, settings.HOUDINI_SECRET)
# POST it to the login endpoint
r = requests.post(
settings.HOUDINI_SERVER + "/endpoints/login",
# TODO: cert and verify will change in production
# cert isn't necessary since we have verify=False, but we will leave it
# as a placeholder for when we are in production with Let's Encrypt
cert=settings.SSL_DEV_CERT_KEY,
verify=False,
# TODO: ^only in development!!!
data={
"app_key": settings.HOUDINI_KEY,
"jwt_string": jwt_string
})
# if we were successfully logged in
if r.status_code == 200:
try:
user = User.objects.get(email=email)
response['success'] = True
response['http_response'] = r
return user
except User.DoesNotExist:
response['failure_type'] = FailureType.local_failure
response['http_response'] = r
return None
else:
response['failure_type'] = FailureType.server_failure
response['http_response'] = r
return None
def get_user(self, user_id, request=None):
try:
user = User.objects.get(pk=user_id)
if request:
if not is_logged_in(request):
return None
return user
except User.DoesNotExist:
return None
| 33.795181 | 98 | 0.621747 |
8d01cc69680492c531089f17afbbed11eff02948 | 4,284 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/stat/rewrite/rewritepolicy_stats.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/stat/rewrite/rewritepolicy_stats.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/stat/rewrite/rewritepolicy_stats.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rewritepolicy_stats(base_resource) :
""" Statistics for rewrite policy resource.
"""
def __init__(self) :
self._name = ""
self._clearstats = ""
self._pipolicyhits = 0
self._pipolicyhitsrate = 0
self._pipolicyundefhits = 0
self._pipolicyundefhitsrate = 0
@property
def name(self) :
"""Name of the rewrite policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the rewrite policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def pipolicyhitsrate(self) :
"""Rate (/s) counter for pipolicyhits.
"""
try :
return self._pipolicyhitsrate
except Exception as e:
raise e
@property
def pipolicyundefhitsrate(self) :
"""Rate (/s) counter for pipolicyundefhits.
"""
try :
return self._pipolicyundefhitsrate
except Exception as e:
raise e
@property
def pipolicyhits(self) :
"""Number of hits on the policy.
"""
try :
return self._pipolicyhits
except Exception as e:
raise e
@property
def pipolicyundefhits(self) :
"""Number of undef hits on the policy.
"""
try :
return self._pipolicyundefhits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rewritepolicy_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rewritepolicy
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all rewritepolicy_stats resources that are configured on netscaler.
"""
try :
obj = rewritepolicy_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.name = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class rewritepolicy_response(base_response) :
def __init__(self, length=1) :
self.rewritepolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rewritepolicy = [rewritepolicy_stats() for _ in range(length)]
| 26.121951 | 136 | 0.709617 |
912622f2c7b1b67e32e16a1c2e4e4074211af43f | 385 | py | Python | textattack/models/helpers/__init__.py | chong-z/TextAttack | 9842160b558db2118365770029be70782327a40a | [
"MIT"
] | null | null | null | textattack/models/helpers/__init__.py | chong-z/TextAttack | 9842160b558db2118365770029be70782327a40a | [
"MIT"
] | null | null | null | textattack/models/helpers/__init__.py | chong-z/TextAttack | 9842160b558db2118365770029be70782327a40a | [
"MIT"
] | null | null | null | # Helper stuff, like embeddings.
from . import utils
from .glove_embedding_layer import GloveEmbeddingLayer, GloveLikeEmbeddingLayer
# Helper modules.
from .bert_for_classification import BERTForClassification
from .lstm_for_classification import LSTMForClassification
from .t5_for_text_to_text import T5ForTextToText
from .word_cnn_for_classification import WordCNNForClassification
| 38.5 | 79 | 0.880519 |
ffb0acac5b902323e7bbc9998c59f8fbe916f5ac | 3,443 | py | Python | simpleTest.py | egerland/masters | 410fd5c877e8f981f0fbbc5f7ee1506d6517dc8d | [
"MIT"
] | null | null | null | simpleTest.py | egerland/masters | 410fd5c877e8f981f0fbbc5f7ee1506d6517dc8d | [
"MIT"
] | null | null | null | simpleTest.py | egerland/masters | 410fd5c877e8f981f0fbbc5f7ee1506d6517dc8d | [
"MIT"
] | null | null | null | # Copyright 2006-2017 Coppelia Robotics GmbH. All rights reserved.
# marc@coppeliarobotics.com
# www.coppeliarobotics.com
#
# -------------------------------------------------------------------
# THIS FILE IS DISTRIBUTED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTY. THE USER WILL USE IT AT HIS/HER OWN RISK. THE ORIGINAL
# AUTHORS AND COPPELIA ROBOTICS GMBH WILL NOT BE LIABLE FOR DATA LOSS,
# DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING OR
# MISUSING THIS SOFTWARE.
#
# You are free to use/modify/distribute this file for whatever purpose!
# -------------------------------------------------------------------
#
# This file was automatically created for V-REP release V3.4.0 rev. 1 on April 5th 2017
# Make sure to have the server side running in V-REP:
# in a child script of a V-REP scene, add following command
# to be executed just once, at simulation start:
#
# simExtRemoteApiStart(19999)
#
# then start simulation, and run this program.
#
# IMPORTANT: for each successful call to simxStart, there
# should be a corresponding call to simxFinish at the end!
import traceback
try:
from vrepAPI import vrep
except:
print(traceback.format_exc())
print ('--------------------------------------------------------------')
print ('"vrep.py" could not be imported. This means very probably that')
print ('either "vrep.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "vrep.py"')
print ('--------------------------------------------------------------')
print ('')
raise
import time
print ('Program started')
vrep.simxFinish(-1) # just in case, close all opened connections
clientID=vrep.simxStart('127.0.0.1',19999,True,True,5000,5) # Connect to V-REP
if clientID!=-1:
print ('Connected to remote API server')
# Now try to retrieve data in a blocking fashion (i.e. a service call):
res,objs=vrep.simxGetObjects(clientID,vrep.sim_handle_all,vrep.simx_opmode_blocking)
if res==vrep.simx_return_ok:
print ('Number of objects in the scene: ',len(objs))
else:
print ('Remote API function call returned with error code: ',res)
time.sleep(2)
# Now retrieve streaming data (i.e. in a non-blocking fashion):
startTime=time.time()
vrep.simxGetIntegerParameter(clientID,vrep.sim_intparam_mouse_x,vrep.simx_opmode_streaming) # Initialize streaming
while time.time()-startTime < 5:
returnCode,data=vrep.simxGetIntegerParameter(clientID,vrep.sim_intparam_mouse_x,vrep.simx_opmode_buffer) # Try to retrieve the streamed data
if returnCode==vrep.simx_return_ok: # After initialization of streaming, it will take a few ms before the first value arrives, so check the return code
print ('Mouse position x: ',data) # Mouse position x is actualized when the cursor is over V-REP's window
time.sleep(0.005)
# Now send some data to V-REP in a non-blocking fashion:
vrep.simxAddStatusbarMessage(clientID,'Hello V-REP!',vrep.simx_opmode_oneshot)
# Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
vrep.simxGetPingTime(clientID)
# Now close the connection to V-REP:
vrep.simxFinish(clientID)
else:
print ('Failed connecting to remote API server')
print ('Program ended')
| 43.582278 | 159 | 0.674121 |
d50b45d41a1ab378540be0be78746402bbe27ead | 335 | py | Python | build/catkin_generated/order_packages.py | hyu-nani/ydlidar_ws | 56316db999c057c4315a20ba8277826d6a043120 | [
"MIT"
] | 1 | 2021-11-08T12:24:24.000Z | 2021-11-08T12:24:24.000Z | build/catkin_generated/order_packages.py | hyu-nani/ydlidar_ws | 56316db999c057c4315a20ba8277826d6a043120 | [
"MIT"
] | null | null | null | build/catkin_generated/order_packages.py | hyu-nani/ydlidar_ws | 56316db999c057c4315a20ba8277826d6a043120 | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = '/home/pls/ydlidar_ws/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = '/home/pls/ydlidar_ws/devel'.split(';') if '/home/pls/ydlidar_ws/devel' != '' else []
| 55.833333 | 107 | 0.680597 |
8bf6ceddcc5dd49d53e934dbcb22f8cb5b0c02a2 | 1,645 | py | Python | internal/notes/builtin-SAVE/packages/scons/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/scons/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/scons/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Scons(PythonPackage):
"""SCons is a software construction tool"""
homepage = "http://scons.org"
url = "https://pypi.io/packages/source/s/scons/scons-2.5.1.tar.gz"
version('2.5.1', '3eac81e5e8206304a9b4683c57665aa4')
version('2.5.0', 'bda5530a70a41a7831d83c8b191c021e')
# Python 3 is not supported
depends_on('python@:2.8', type=('build', 'run'))
| 42.179487 | 78 | 0.669909 |
bcf5dabbb48a04f3db5a9d99101d7e39ffe2e031 | 544 | py | Python | awards/form.py | UmurerwaDivine/Awards | 7017466033fda36b3de6aa2e8d130f1b9e9ac9b8 | [
"MIT"
] | null | null | null | awards/form.py | UmurerwaDivine/Awards | 7017466033fda36b3de6aa2e8d130f1b9e9ac9b8 | [
"MIT"
] | null | null | null | awards/form.py | UmurerwaDivine/Awards | 7017466033fda36b3de6aa2e8d130f1b9e9ac9b8 | [
"MIT"
] | null | null | null | from django import forms
from .models import Profile,Pic
from django.contrib.auth.forms import AuthenticationForm
class ProfileForm(forms.ModelForm):
model = Profile
username = forms.CharField(label='Username',max_length = 30)
bio = forms.CharField(label='Image Caption',max_length=500)
profile_pic = forms.ImageField(label = 'Image Field')
class ProfileUploadForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class ImageForm(forms.ModelForm):
class Meta:
model = Pic
exclude = ['user','pub_date'] | 22.666667 | 61 | 0.746324 |
5ffdba068d9a031f0114ea29ef5212e208459cb3 | 450 | py | Python | nmtpytorch/layers/__init__.py | toshohirasawa/mmt-with-monolingual-data | 3f80f3a1807e1a837ef82d75917c1cf581270b84 | [
"MIT"
] | null | null | null | nmtpytorch/layers/__init__.py | toshohirasawa/mmt-with-monolingual-data | 3f80f3a1807e1a837ef82d75917c1cf581270b84 | [
"MIT"
] | null | null | null | nmtpytorch/layers/__init__.py | toshohirasawa/mmt-with-monolingual-data | 3f80f3a1807e1a837ef82d75917c1cf581270b84 | [
"MIT"
] | 1 | 2020-07-22T19:25:53.000Z | 2020-07-22T19:25:53.000Z | # Basic layers
from .ff import FF
from .fusion import Fusion
from .flatten import Flatten
from .seq_conv import SequenceConvolution
from .rnninit import RNNInitializer
from .max_margin import MaxMargin
from .embedding import get_partial_embedding_layer
# Attention layers
from .attention import *
# ZSpace layers
from .z import ZSpace
from .z_att import ZSpaceAtt
# Encoder layers
from .encoders import *
# Decoder layers
from .decoders import *
| 20.454545 | 50 | 0.806667 |
5a0d30a2da75ed965b105d380213ae523e46f906 | 4,584 | py | Python | oauth_dropins/dropbox.py | ravenscroftj/oauth-dropins | 59cc4bfc8157142249c5eb561b1f665da560e6c1 | [
"Unlicense"
] | null | null | null | oauth_dropins/dropbox.py | ravenscroftj/oauth-dropins | 59cc4bfc8157142249c5eb561b1f665da560e6c1 | [
"Unlicense"
] | null | null | null | oauth_dropins/dropbox.py | ravenscroftj/oauth-dropins | 59cc4bfc8157142249c5eb561b1f665da560e6c1 | [
"Unlicense"
] | null | null | null | """Dropbox OAuth drop-in.
Standard OAuth 2.0 flow. Docs:
https://www.dropbox.com/developers/documentation/http/overview
https://www.dropbox.com/developers/documentation/http/documentation#authorization
"""
import logging
import urllib.parse, urllib.request
from flask import request
from google.cloud import ndb
from . import views, models
from .webutil import flask_util, util
from .webutil.util import json_dumps, json_loads
DROPBOX_APP_KEY = util.read('dropbox_app_key')
DROPBOX_APP_SECRET = util.read('dropbox_app_secret')
GET_AUTH_CODE_URL = '&'.join((
'https://www.dropbox.com/1/oauth2/authorize?'
'response_type=code',
'client_id=%(client_id)s',
'redirect_uri=%(redirect_uri)s',
'state=%(state)s',
))
GET_ACCESS_TOKEN_URL = '&'.join((
'https://api.dropbox.com/1/oauth2/token?',
'grant_type=authorization_code',
'code=%(code)s',
'client_id=%(client_id)s',
'client_secret=%(client_secret)s',
'redirect_uri=%(redirect_uri)s',
))
class DropboxAuth(models.BaseAuth):
"""An authenticated Dropbox user or page.
Provides methods that return information about this user (or page) and make
OAuth-signed requests to Dropbox's HTTP-based APIs. Stores OAuth credentials
in the datastore. See models.BaseAuth for usage details.
Implements urlopen() but not api().
"""
access_token_str = ndb.StringProperty(required=True)
def site_name(self):
return 'Dropbox'
def user_display_name(self):
"""Returns the Dropbox user id.
"""
return self.key_id()
def access_token(self):
"""Returns the OAuth access token string.
"""
return self.access_token_str
def urlopen(self, url, **kwargs):
"""Wraps urlopen() and adds OAuth credentials to the request.
"""
headers = {'Authorization': f'Bearer {self.access_token_str}'}
try:
return util.urlopen(urllib.request.Request(url, headers=headers), **kwargs)
except BaseException as e:
util.interpret_http_exception(e)
raise
class DropboxCsrf(ndb.Model):
"""Stores a CSRF token for the Dropbox OAuth2 flow."""
token = ndb.StringProperty(required=False)
state = ndb.TextProperty(required=False)
class Start(views.Start):
"""Starts Dropbox auth. Requests an auth code and expects a redirect back.
"""
NAME = 'dropbox'
LABEL = 'Dropbox'
def redirect_url(self, state=None):
assert DROPBOX_APP_KEY and DROPBOX_APP_SECRET, (
"Please fill in the dropbox_app_key and dropbox_app_secret files in "
"your app's root directory.")
csrf_key = DropboxCsrf(state=state).put()
return GET_AUTH_CODE_URL % {
'client_id': DROPBOX_APP_KEY,
'redirect_uri': urllib.parse.quote_plus(self.to_url(state=state)),
'state': f'{state}|{csrf_key.id()}',
}
@classmethod
def button_html(cls, *args, **kwargs):
return super(cls, cls).button_html(
*args, input_style='background-color: #EEEEEE; padding: 10px', **kwargs)
class Callback(views.Callback):
"""The auth callback. Fetches an access token, stores it, and redirects home.
"""
def dispatch_request(self):
state = request.values['state']
# handle errors
error = request.values.get('error')
error_reason = urllib.parse.unquote_plus(request.values.get('error_reason', ''))
if error or error_reason:
if error == 'access_denied':
logging.info(f'User declined: {error_reason}')
return self.finish(None, state=state)
else:
flask_util.error(' '.join((error, error_reason)))
# lookup the CSRF token
try:
csrf_id = int(urllib.parse.unquote_plus(state).split('|')[-1])
except (ValueError, TypeError):
flask_util.error(f'Invalid state value {state!r}')
csrf = DropboxCsrf.get_by_id(csrf_id)
if not csrf:
flask_util.error(f'No CSRF token for id {csrf_id}')
# request an access token
data = {
'client_id': DROPBOX_APP_KEY,
'client_secret': DROPBOX_APP_SECRET,
'code': request.values['code'],
'redirect_uri': request.base_url,
}
try:
resp = util.urlopen(GET_ACCESS_TOKEN_URL % data, data=b'').read()
except BaseException as e:
util.interpret_http_exception(e)
raise
try:
data = json_loads(resp)
except (ValueError, TypeError):
logging.error(f'Bad response:\n{resp}', exc_info=True)
flask_util.error('Bad Dropbox response to access token request')
logging.info(f"Storing new Dropbox account: {data['uid']}")
auth = DropboxAuth(id=data['uid'], access_token_str=data['access_token'])
auth.put()
return self.finish(auth, state=csrf.state)
| 30.357616 | 84 | 0.694154 |
51289fc4b8d99cdb7ba2a921b856a287be29b95c | 7,398 | py | Python | core/modules/models/seg/deeplab/decoder.py | FelixFu520/DAO | ac30bad4503408e771bc28c77dd8a20c18c15a05 | [
"MIT"
] | null | null | null | core/modules/models/seg/deeplab/decoder.py | FelixFu520/DAO | ac30bad4503408e771bc28c77dd8a20c18c15a05 | [
"MIT"
] | null | null | null | core/modules/models/seg/deeplab/decoder.py | FelixFu520/DAO | ac30bad4503408e771bc28c77dd8a20c18c15a05 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author:FelixFu
# @Date: 2021.4.14
# @GitHub:https://github.com/felixfu520
# @Copy From:
"""
BSD 3-Clause License
Copyright (c) Soumith Chintala 2016,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from torch import nn
from torch.nn import functional as F
__all__ = ["DeepLabV3Decoder", "DeepLabV3PlusDecoder"]
class DeepLabV3Decoder(nn.Sequential):
def __init__(self, in_channels, out_channels=256, atrous_rates=(12, 24, 36)):
super().__init__(
ASPP(in_channels, out_channels, atrous_rates),
nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
self.out_channels = out_channels
def forward(self, *features):
return super().forward(features[-1])
class DeepLabV3PlusDecoder(nn.Module):
def __init__(
self,
encoder_channels,
out_channels=256,
atrous_rates=(12, 24, 36),
output_stride=16,
):
super().__init__()
if output_stride not in {8, 16}:
raise ValueError("Output stride should be 8 or 16, got {}.".format(output_stride))
self.out_channels = out_channels
self.output_stride = output_stride
self.aspp = nn.Sequential(
ASPP(encoder_channels[-1], out_channels, atrous_rates, separable=True),
SeparableConv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
scale_factor = 2 if output_stride == 8 else 4
self.up = nn.UpsamplingBilinear2d(scale_factor=scale_factor)
highres_in_channels = encoder_channels[-4]
highres_out_channels = 48 # proposed by authors of paper
self.block1 = nn.Sequential(
nn.Conv2d(highres_in_channels, highres_out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(highres_out_channels),
nn.ReLU(),
)
self.block2 = nn.Sequential(
SeparableConv2d(
highres_out_channels + out_channels,
out_channels,
kernel_size=3,
padding=1,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, *features):
aspp_features = self.aspp(features[-1])
aspp_features = self.up(aspp_features)
high_res_features = self.block1(features[-4])
concat_features = torch.cat([aspp_features, high_res_features], dim=1)
fused_features = self.block2(concat_features)
return fused_features
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
super().__init__(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
class ASPPSeparableConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
super().__init__(
SeparableConv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, out_channels, atrous_rates, separable=False):
super(ASPP, self).__init__()
modules = []
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
)
rate1, rate2, rate3 = tuple(atrous_rates)
ASPPConvModule = ASPPConv if not separable else ASPPSeparableConv
modules.append(ASPPConvModule(in_channels, out_channels, rate1))
modules.append(ASPPConvModule(in_channels, out_channels, rate2))
modules.append(ASPPConvModule(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5),
)
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
class SeparableConv2d(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=True,
):
dephtwise_conv = nn.Conv2d(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
)
pointwise_conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
bias=bias,
)
super().__init__(dephtwise_conv, pointwise_conv)
| 33.627273 | 94 | 0.62206 |
522765b438d4b64650ddaffd86f5a23d58c0f190 | 894 | py | Python | plugins/20.py | akhialomgir/auto-derby | 94248ed70e8d83920ca93b62329bccb58bdc78ae | [
"MIT"
] | null | null | null | plugins/20.py | akhialomgir/auto-derby | 94248ed70e8d83920ca93b62329bccb58bdc78ae | [
"MIT"
] | null | null | null | plugins/20.py | akhialomgir/auto-derby | 94248ed70e8d83920ca93b62329bccb58bdc78ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import auto_derby
from auto_derby import single_mode, mathtools
class Training(single_mode.Training):
def score(self, ctx: single_mode.Context) -> float:
success_rate = mathtools.interpolate(
int(ctx.vitality * 10000),
(
(0, 0.15),
(1500, 0.3),
(4000, 1.0),
)
if self.wisdom > 0
else (
(0, 0.01),
(1500, 0.2),
(3000, 0.5),
(5000, 0.85),
(7000, 1.0),
),
)
if success_rate < 0.8:
return 0
return super().score(ctx)
class Plugin(auto_derby.Plugin):
def install(self) -> None:
auto_derby.config.single_mode_training_class = Training
auto_derby.plugin.register(__name__, Plugin())
| 22.35 | 63 | 0.493289 |
25f56fd853fc6ed52990b5af57aabba6a18ed6df | 704 | py | Python | app/events/client/commands/renameAll.py | Hacker-1202/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 14 | 2021-11-05T11:27:25.000Z | 2022-02-28T02:04:32.000Z | app/events/client/commands/renameAll.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 2 | 2022-01-24T22:00:44.000Z | 2022-01-31T13:13:27.000Z | app/events/client/commands/renameAll.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 5 | 2022-01-02T13:33:17.000Z | 2022-02-26T13:09:50.000Z | import discord
import asyncio
from discord.ext import commands
from app.vars.client import client
from app.helpers import Notify
from app.filesystem import ignore
@client.command()
@commands.guild_only()
@commands.has_permissions(manage_nicknames=True)
async def renameAll(ctx, *, nick: str):
notify = Notify(ctx=ctx, title = 'Renaming All Members...')
notify.prepair()
if str(ctx.guild.id) in ignore.getIgnore():
notify.error(content='The server {} is being ignored'.format(ctx.guild.name))
return
for member in ctx.guild.members:
await member.edit(nick=nick)
else:
notify.success(content=f"All members have been successfully renamed to { nick }") | 30.608696 | 89 | 0.72017 |
0cc81fb810176cad0758a62dcf12de8793946109 | 37,768 | py | Python | heat/rpc/client.py | steveb/heat | e5202ef4540887386c4cde10449d97611f90d927 | [
"Apache-2.0"
] | 1 | 2015-12-18T21:46:55.000Z | 2015-12-18T21:46:55.000Z | heat/rpc/client.py | steveb/heat | e5202ef4540887386c4cde10449d97611f90d927 | [
"Apache-2.0"
] | null | null | null | heat/rpc/client.py | steveb/heat | e5202ef4540887386c4cde10449d97611f90d927 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the heat engine RPC API."""
from oslo_utils import reflection
from heat.common import messaging
from heat.rpc import api as rpc_api
class EngineClient(object):
"""Client side of the heat engine rpc API.
API version history::
1.0 - Initial version.
1.1 - Add support_status argument to list_resource_types()
1.4 - Add support for service list
1.9 - Add template_type option to generate_template()
1.10 - Add support for software config list
1.11 - Add support for template versions list
1.12 - Add with_detail option for stack resources list
1.13 - Add support for template functions list
1.14 - Add cancel_with_rollback option to stack_cancel_update
1.15 - Add preview_update_stack() call
1.16 - Adds version, type_name to list_resource_types()
1.17 - Add files to validate_template
1.18 - Add show_nested to validate_template
1.19 - Add show_output and list_outputs for returning stack outputs
1.20 - Add resolve_outputs to stack show
1.21 - Add deployment_id to create_software_deployment
1.22 - Add support for stack export
1.23 - Add environment_files to create/update/preview/validate
1.24 - Adds ignorable_errors to validate_template
1.25 - list_stack_resource filter update
1.26 - Add mark_unhealthy
1.27 - Add check_software_deployment
1.28 - Add environment_show call
1.29 - Add template_id to create_stack/update_stack
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
self._client = messaging.get_rpc_client(
topic=rpc_api.ENGINE_TOPIC,
version=self.BASE_RPC_API_VERSION)
@staticmethod
def make_msg(method, **kwargs):
return method, kwargs
def call(self, ctxt, msg, version=None, timeout=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
if timeout is not None:
client = client.prepare(timeout=timeout)
return client.call(ctxt, method, **kwargs)
def cast(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.cast(ctxt, method, **kwargs)
def local_error_name(self, error):
"""Returns the name of the error with any _Remote postfix removed.
:param error: Remote raised error to derive the name from.
"""
error_name = reflection.get_class_name(error, fully_qualified=False)
return error_name.split('_Remote')[0]
def ignore_error_named(self, error, name):
"""Raises the error unless its local name matches the supplied name.
:param error: Remote raised error to derive the local name from.
:param name: Name to compare local name to.
"""
if self.local_error_name(error) != name:
raise error
def identify_stack(self, ctxt, stack_name):
"""Returns the full stack identifier for a single, live stack.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('identify_stack',
stack_name=stack_name))
def list_stacks(self, ctxt, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
"""Returns attributes of all stacks.
It supports pagination (``limit`` and ``marker``), sorting
(``sort_keys`` and ``sort_dir``) and filtering (``filters``) of the
results.
:param ctxt: RPC context.
:param limit: the number of stacks to list (integer or string)
:param marker: the ID of the last item in the previous page
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc')
:param filters: a dict with attribute:value to filter the list
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, show soft-deleted stacks
:param show_nested: if true, show nested stacks
:param show_hidden: if true, show hidden stacks
:param tags: show stacks containing these tags, combine multiple
tags using the boolean AND expression
:param tags_any: show stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: show stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: show stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: a list of stacks
"""
return self.call(ctxt,
self.make_msg('list_stacks', limit=limit,
sort_keys=sort_keys, marker=marker,
sort_dir=sort_dir, filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags, tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def count_stacks(self, ctxt, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
"""Returns the number of stacks that match the given filters.
:param ctxt: RPC context.
:param filters: a dict of ATTR:VALUE to match against stacks
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, count will include the deleted stacks
:param show_nested: if true, count will include nested stacks
:param show_hidden: if true, count will include hidden stacks
:param tags: count stacks containing these tags, combine multiple tags
using the boolean AND expression
:param tags_any: count stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: count stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: count stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: an integer representing the number of matched stacks
"""
return self.call(ctxt, self.make_msg('count_stacks',
filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags,
tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def show_stack(self, ctxt, stack_identity, resolve_outputs=True):
"""Returns detailed information about one or all stacks.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to show, or None to
show all
:param resolve_outputs: If True, stack outputs will be resolved
"""
return self.call(ctxt, self.make_msg('show_stack',
stack_identity=stack_identity,
resolve_outputs=resolve_outputs),
version='1.20')
def preview_stack(self, ctxt, stack_name, template, params, files,
args, environment_files=None):
"""Simulates a new stack using the provided template.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self.call(ctxt,
self.make_msg('preview_stack', stack_name=stack_name,
template=template,
params=params, files=files,
environment_files=environment_files,
args=args),
version='1.23')
def create_stack(self, ctxt, stack_name, template, params, files,
args, environment_files=None):
"""Creates a new stack using the template provided.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self._create_stack(ctxt, stack_name, template, params, files,
args, environment_files=environment_files)
def _create_stack(self, ctxt, stack_name, template, params, files,
args, environment_files=None,
owner_id=None, nested_depth=0, user_creds_id=None,
stack_user_project_id=None, parent_resource_name=None,
template_id=None):
"""Internal interface for engine-to-engine communication via RPC.
Allows some additional options which should not be exposed to users via
the API:
:param owner_id: parent stack ID for nested stacks
:param nested_depth: nested depth for nested stacks
:param user_creds_id: user_creds record for nested stack
:param stack_user_project_id: stack user project for nested stack
:param parent_resource_name: the parent resource name
:param template_id: the ID of a pre-stored template in the DB
"""
return self.call(
ctxt, self.make_msg('create_stack', stack_name=stack_name,
template=template,
params=params, files=files,
environment_files=environment_files,
args=args, owner_id=owner_id,
nested_depth=nested_depth,
user_creds_id=user_creds_id,
stack_user_project_id=stack_user_project_id,
parent_resource_name=parent_resource_name,
template_id=template_id),
version='1.29')
def update_stack(self, ctxt, stack_identity, template, params,
files, args, environment_files=None):
"""Updates an existing stack based on the provided template and params.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self._update_stack(ctxt, stack_identity, template, params,
files, args,
environment_files=environment_files)
def _update_stack(self, ctxt, stack_identity, template, params,
files, args, environment_files=None,
template_id=None):
"""Internal interface for engine-to-engine communication via RPC.
Allows an additional option which should not be exposed to users via
the API:
:param template_id: the ID of a pre-stored template in the DB
"""
return self.call(ctxt,
self.make_msg('update_stack',
stack_identity=stack_identity,
template=template,
params=params,
files=files,
environment_files=environment_files,
args=args,
template_id=template_id),
version='1.29')
def preview_update_stack(self, ctxt, stack_identity, template, params,
files, args, environment_files=None):
"""Returns the resources that would be changed in an update.
Based on the provided template and parameters.
Requires RPC version 1.15 or above.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you wish to update.
:param template: New template for the stack.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
"""
return self.call(ctxt,
self.make_msg('preview_update_stack',
stack_identity=stack_identity,
template=template,
params=params,
files=files,
environment_files=environment_files,
args=args,
),
version='1.23')
def validate_template(self, ctxt, template, params=None, files=None,
environment_files=None, show_nested=False,
ignorable_errors=None):
"""Uses the stack parser to check the validity of a template.
:param ctxt: RPC context.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment/template.
:param environment_files: ordered list of environment file names
included in the files dict
:param show_nested: if True nested templates will be validated
:param ignorable_errors: List of error_code to be ignored as part of
validation
"""
return self.call(ctxt, self.make_msg(
'validate_template',
template=template,
params=params,
files=files,
show_nested=show_nested,
environment_files=environment_files,
ignorable_errors=ignorable_errors),
version='1.24')
def authenticated_to_backend(self, ctxt):
"""Validate the credentials in the RPC context.
Verify that the credentials in the RPC context are valid for the
current cloud backend.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('authenticated_to_backend'))
def get_template(self, ctxt, stack_identity):
"""Get the template.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see.
"""
return self.call(ctxt, self.make_msg('get_template',
stack_identity=stack_identity))
def get_environment(self, context, stack_identity):
"""Returns the environment for an existing stack.
:param context: RPC context
:param stack_identity: identifies the stack
:rtype: dict
"""
return self.call(context,
self.make_msg('get_environment',
stack_identity=stack_identity),
version='1.28')
def delete_stack(self, ctxt, stack_identity, cast=True):
"""Deletes a given stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to delete.
:param cast: cast the message or use call (default: True)
"""
rpc_method = self.cast if cast else self.call
return rpc_method(ctxt,
self.make_msg('delete_stack',
stack_identity=stack_identity))
def abandon_stack(self, ctxt, stack_identity):
"""Deletes a given stack but resources would not be deleted.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to abandon.
"""
return self.call(ctxt,
self.make_msg('abandon_stack',
stack_identity=stack_identity))
def list_resource_types(self,
ctxt,
support_status=None,
type_name=None,
heat_version=None):
"""Get a list of valid resource types.
:param ctxt: RPC context.
:param support_status: Support status of resource type
:param type_name: Resource type's name (regular expression allowed)
:param version: Heat version
"""
return self.call(ctxt, self.make_msg('list_resource_types',
support_status=support_status,
type_name=type_name,
heat_version=heat_version),
version='1.16')
def list_template_versions(self, ctxt):
"""Get a list of available template versions.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('list_template_versions'),
version='1.11')
def list_template_functions(self, ctxt, template_version):
"""Get a list of available functions in a given template.
:param ctxt: RPC context
:param template_name : name of the template which function list you
want to get
"""
return self.call(ctxt, self.make_msg(
'list_template_functions', template_version=template_version),
version='1.13')
def resource_schema(self, ctxt, type_name):
"""Get the schema for a resource type.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('resource_schema',
type_name=type_name))
def generate_template(self, ctxt, type_name, template_type='cfn'):
"""Generate a template based on the specified type.
:param ctxt: RPC context.
:param type_name: The resource type name to generate a template for.
:param template_type: the template type to generate, cfn or hot.
"""
return self.call(ctxt, self.make_msg('generate_template',
type_name=type_name,
template_type=template_type),
version='1.9')
def list_events(self, ctxt, stack_identity, filters=None, limit=None,
marker=None, sort_keys=None, sort_dir=None,):
"""Lists all events associated with a given stack.
It supports pagination (``limit`` and ``marker``),
sorting (``sort_keys`` and ``sort_dir``) and filtering(filters)
of the results.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to get events for
:param filters: a dict with attribute:value to filter the list
:param limit: the number of events to list (integer or string)
:param marker: the ID of the last event in the previous page
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc').
"""
return self.call(ctxt, self.make_msg('list_events',
stack_identity=stack_identity,
filters=filters,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir))
def describe_stack_resource(self, ctxt, stack_identity, resource_name,
with_attr=False):
"""Get detailed resource information about a particular resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt,
self.make_msg('describe_stack_resource',
stack_identity=stack_identity,
resource_name=resource_name,
with_attr=with_attr),
version='1.2')
def find_physical_resource(self, ctxt, physical_resource_id):
"""Return an identifier for the resource.
:param ctxt: RPC context.
:param physcial_resource_id: The physical resource ID to look up.
"""
return self.call(ctxt,
self.make_msg(
'find_physical_resource',
physical_resource_id=physical_resource_id))
def describe_stack_resources(self, ctxt, stack_identity, resource_name):
"""Get detailed resource information about one or more resources.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt, self.make_msg('describe_stack_resources',
stack_identity=stack_identity,
resource_name=resource_name))
def list_stack_resources(self, ctxt, stack_identity,
nested_depth=0, with_detail=False,
filters=None):
"""List the resources belonging to a stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param nested_depth: Levels of nested stacks of which list resources.
:param with_detail: show detail for resources in list.
:param filters: a dict with attribute:value to search the resources
"""
return self.call(ctxt,
self.make_msg('list_stack_resources',
stack_identity=stack_identity,
nested_depth=nested_depth,
with_detail=with_detail,
filters=filters),
version='1.25')
def stack_suspend(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_suspend',
stack_identity=stack_identity))
def stack_resume(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_resume',
stack_identity=stack_identity))
def stack_check(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_check',
stack_identity=stack_identity))
def stack_cancel_update(self, ctxt, stack_identity,
cancel_with_rollback=True):
return self.call(ctxt,
self.make_msg(
'stack_cancel_update',
stack_identity=stack_identity,
cancel_with_rollback=cancel_with_rollback),
version='1.14')
def resource_signal(self, ctxt, stack_identity, resource_name, details,
sync_call=False):
"""Generate an alarm on the resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
:param details: the details of the signal.
"""
return self.call(ctxt, self.make_msg('resource_signal',
stack_identity=stack_identity,
resource_name=resource_name,
details=details,
sync_call=sync_call),
version='1.3')
def resource_mark_unhealthy(self, ctxt, stack_identity, resource_name,
mark_unhealthy, resource_status_reason=None):
"""Mark the resource as unhealthy or healthy.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
:param mark_unhealthy: indicates whether the resource is unhealthy.
:param resource_status_reason: reason for health change.
"""
return self.call(
ctxt,
self.make_msg('resource_mark_unhealthy',
stack_identity=stack_identity,
resource_name=resource_name,
mark_unhealthy=mark_unhealthy,
resource_status_reason=resource_status_reason),
version='1.26')
def create_watch_data(self, ctxt, watch_name, stats_data):
"""Creates data for CloudWatch and WaitConditions.
This could be used by CloudWatch and WaitConditions and treat HA
service events like any other CloudWatch.
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm
:param stats_data: The data to post.
"""
return self.call(ctxt, self.make_msg('create_watch_data',
watch_name=watch_name,
stats_data=stats_data))
def show_watch(self, ctxt, watch_name):
"""Returns the attributes of one watch/alarm.
The show_watch method returns the attributes of one watch
or all watches if no watch_name is passed.
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch',
watch_name=watch_name))
def show_watch_metric(self, ctxt, metric_namespace=None, metric_name=None):
"""Returns the datapoints for a metric.
The show_watch_metric method returns the datapoints associated
with a specified metric, or all metrics if no metric_name is passed.
:param ctxt: RPC context.
:param metric_namespace: Name of the namespace you want to see,
or None to see all
:param metric_name: Name of the metric you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch_metric',
metric_namespace=metric_namespace,
metric_name=metric_name))
def set_watch_state(self, ctxt, watch_name, state):
"""Temporarily set the state of a given watch.
:param ctxt: RPC context.
:param watch_name: Name of the watch
:param state: State (must be one defined in WatchRule class)
"""
return self.call(ctxt, self.make_msg('set_watch_state',
watch_name=watch_name,
state=state))
def get_revision(self, ctxt):
return self.call(ctxt, self.make_msg('get_revision'))
def show_software_config(self, cnxt, config_id):
return self.call(cnxt, self.make_msg('show_software_config',
config_id=config_id))
def list_software_configs(self, cnxt, limit=None, marker=None,
tenant_safe=True):
return self.call(cnxt,
self.make_msg('list_software_configs',
limit=limit,
marker=marker,
tenant_safe=tenant_safe),
version='1.10')
def create_software_config(self, cnxt, group, name, config,
inputs=None, outputs=None, options=None):
inputs = inputs or []
outputs = outputs or []
options = options or {}
return self.call(cnxt, self.make_msg('create_software_config',
group=group,
name=name,
config=config,
inputs=inputs,
outputs=outputs,
options=options))
def delete_software_config(self, cnxt, config_id):
return self.call(cnxt, self.make_msg('delete_software_config',
config_id=config_id))
def list_software_deployments(self, cnxt, server_id=None):
return self.call(cnxt, self.make_msg('list_software_deployments',
server_id=server_id))
def metadata_software_deployments(self, cnxt, server_id):
return self.call(cnxt, self.make_msg('metadata_software_deployments',
server_id=server_id))
def show_software_deployment(self, cnxt, deployment_id):
return self.call(cnxt, self.make_msg('show_software_deployment',
deployment_id=deployment_id))
def check_software_deployment(self, cnxt, deployment_id, timeout):
return self.call(cnxt, self.make_msg('check_software_deployment',
deployment_id=deployment_id,
timeout=timeout),
timeout=timeout, version='1.27')
def create_software_deployment(self, cnxt, server_id, config_id=None,
input_values=None, action='INIT',
status='COMPLETE', status_reason='',
stack_user_project_id=None,
deployment_id=None):
input_values = input_values or {}
return self.call(cnxt, self.make_msg(
'create_software_deployment',
server_id=server_id,
config_id=config_id,
deployment_id=deployment_id,
input_values=input_values,
action=action,
status=status,
status_reason=status_reason,
stack_user_project_id=stack_user_project_id))
def update_software_deployment(self, cnxt, deployment_id,
config_id=None, input_values=None,
output_values=None, action=None,
status=None, status_reason=None,
updated_at=None):
return self.call(
cnxt, self.make_msg('update_software_deployment',
deployment_id=deployment_id,
config_id=config_id,
input_values=input_values,
output_values=output_values,
action=action,
status=status,
status_reason=status_reason,
updated_at=updated_at),
version='1.5')
def delete_software_deployment(self, cnxt, deployment_id):
return self.call(cnxt, self.make_msg('delete_software_deployment',
deployment_id=deployment_id))
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at=None):
return self.call(
cnxt, self.make_msg('signal_software_deployment',
deployment_id=deployment_id,
details=details,
updated_at=updated_at),
version='1.6')
def stack_snapshot(self, ctxt, stack_identity, name):
return self.call(ctxt, self.make_msg('stack_snapshot',
stack_identity=stack_identity,
name=name))
def show_snapshot(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('show_snapshot',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def delete_snapshot(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('delete_snapshot',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def stack_list_snapshots(self, cnxt, stack_identity):
return self.call(cnxt, self.make_msg('stack_list_snapshots',
stack_identity=stack_identity))
def stack_restore(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('stack_restore',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def list_services(self, cnxt):
return self.call(cnxt, self.make_msg('list_services'), version='1.4')
def list_outputs(self, cntx, stack_identity):
return self.call(cntx, self.make_msg('list_outputs',
stack_identity=stack_identity),
version='1.19')
def show_output(self, cntx, stack_identity, output_key):
return self.call(cntx, self.make_msg('show_output',
stack_identity=stack_identity,
output_key=output_key),
version='1.19')
def export_stack(self, ctxt, stack_identity):
"""Exports the stack data in JSON format.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to export.
"""
return self.call(ctxt,
self.make_msg('export_stack',
stack_identity=stack_identity),
version='1.22')
| 45.834951 | 79 | 0.558092 |
37b688f3902462b53ad80ad2d77cbe26e2f0b2cb | 730 | py | Python | final_project/server.py | thisdotthis/xzceb-flask_eng_fr | 0a385fe9ba5fdf0e7cf284259bf0d65e44f5815a | [
"Apache-2.0"
] | null | null | null | final_project/server.py | thisdotthis/xzceb-flask_eng_fr | 0a385fe9ba5fdf0e7cf284259bf0d65e44f5815a | [
"Apache-2.0"
] | null | null | null | final_project/server.py | thisdotthis/xzceb-flask_eng_fr | 0a385fe9ba5fdf0e7cf284259bf0d65e44f5815a | [
"Apache-2.0"
] | null | null | null | from machinetranslation.translator import english_to_french, french_to_english
from flask import Flask, render_template, request
import json
app = Flask(__name__,template_folder='templates')
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
translatedText = english_to_french(textToTranslate)
return translatedText
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
translatedText = french_to_english(textToTranslate)
return translatedText
@app.route("/")
def renderIndexPage():
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000)
| 29.2 | 78 | 0.769863 |
0326d0078bbb1b796ae4fad156dd6c8b7eb8efa0 | 1,976 | py | Python | examples/lammps/melting/lammps-4nodes.py | tpeterka/decaf | ad6ad823070793bfd7fc8d9384d5475f7cf20848 | [
"BSD-3-Clause"
] | 1 | 2019-05-10T02:50:50.000Z | 2019-05-10T02:50:50.000Z | examples/lammps/melting/lammps-4nodes.py | tpeterka/decaf | ad6ad823070793bfd7fc8d9384d5475f7cf20848 | [
"BSD-3-Clause"
] | 2 | 2020-10-28T03:44:51.000Z | 2021-01-18T19:49:33.000Z | examples/lammps/melting/lammps-4nodes.py | tpeterka/decaf | ad6ad823070793bfd7fc8d9384d5475f7cf20848 | [
"BSD-3-Clause"
] | 2 | 2018-08-31T14:02:47.000Z | 2020-04-17T16:01:54.000Z | # a small 4-node example
# input file
infile = 'in.melt'
# --- include the following 4 lines each time ---
import networkx as nx
import os
import imp
wf = imp.load_source('workflow', os.environ['DECAF_PREFIX'] + '/python/decaf.py')
# --- set your options here ---
# path to .so module for dataflow callback functions
mod_path = os.environ['DECAF_PREFIX'] + '/examples/lammps/melting/mod_lammps.so'
# define workflow graph
# 4-node workflow
#
# print (1 proc)
# /
# lammps (4 procs)
# \
# print2 (1 proc) - print (1 proc)
#
# entire workflow takes 10 procs (1 link proc between each producer consumer pair)
#
# --- Graph definition ---
lammps = wf.Node("lammps", start_proc=0, nprocs=4, func='lammps', cmdline='./lammps')
outPort0 = lammps.addOutputPort("out")
print1 = wf.Node("print1", start_proc=5, nprocs=1, func='print', cmdline='./lammps')
inPort1 = print1.addInputPort("in")
print2 = wf.Node("print2", start_proc=7, nprocs=1, func='print2', cmdline='./lammps')
inPort2 = print2.addInputPort("in")
outPort2 = print2.addOutputPort("out")
print3 = wf.Node("print3", start_proc=9, nprocs=1, func='print', cmdline='./lammps')
inPort3 = print3.addInputPort("in")
link1 = wf.Edge(lammps.getOutputPort("out"), print1.getInputPort("in"), start_proc=4, nprocs=1, func='dflow',
path=mod_path, prod_dflow_redist='count', dflow_con_redist='count', cmdline='./lammps')
link2 = wf.Edge(lammps.getOutputPort("out"), print2.getInputPort("in"), start_proc=6, nprocs=1, func='dflow',
path=mod_path, prod_dflow_redist='count', dflow_con_redist='count', cmdline='./lammps')
link3 = wf.Edge(print2.getOutputPort("out"), print3.getInputPort("in"), start_proc=8, nprocs=1, func='dflow',
path=mod_path, prod_dflow_redist='count', dflow_con_redist='count', cmdline='./lammps')
# --- convert the nx graph into a workflow data structure and run the workflow ---
wf.processGraph("lammps",infile)
| 35.285714 | 112 | 0.682186 |
9f8d958020c4f04ba7e69f7e2a2509bc31f978c0 | 462 | py | Python | my/core/time.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | my/core/time.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | my/core/time.py | thetomcraig/HPI | 5eecd8721dc0cbfc68040106bb7b540b1567dff3 | [
"MIT"
] | null | null | null | from functools import lru_cache
from datetime import datetime, tzinfo
import pytz # type: ignore
# https://gist.github.com/edwardabraham/8680198
tz_lookup = {
pytz.timezone(x).localize(datetime.now()).tzname(): pytz.timezone(x)
for x in pytz.all_timezones
}
tz_lookup['UTC'] = pytz.utc # ugh. otherwise it'z Zulu...
# TODO dammit, lru_cache interferes with mypy?
@lru_cache(None)
def abbr_to_timezone(abbr: str) -> tzinfo:
return tz_lookup[abbr]
| 25.666667 | 72 | 0.735931 |
90bde287e2ef4714327514da06e86d797ba92dcc | 6,608 | py | Python | tests/test_project.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
] | 1,582 | 2015-01-05T02:41:44.000Z | 2022-03-30T20:03:22.000Z | tests/test_project.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
] | 66 | 2015-01-23T07:58:04.000Z | 2021-11-12T02:23:27.000Z | tests/test_project.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
] | 347 | 2015-01-05T07:47:07.000Z | 2021-09-20T21:22:32.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from datetime import datetime
from nose.tools import ok_
from tests.base import TestCase
from vilya.config import DOMAIN
from vilya.models.project import CodeDoubanProject
from vilya.libs.text import gravatar_url
from vilya.libs.permdir import get_repo_root
class TestProject(TestCase):
def test_create_project(self):
project_name = "project"
project = CodeDoubanProject.add(
project_name, owner_id="test1", summary="test", product="fire")
git_path = os.path.join(get_repo_root(), '%s.git' % project_name)
ok_(os.path.exists(git_path))
project.delete()
# 本地开发禁用hook了
# def test_create_project_with_hook(self):
# project_name = "project2"
# project = CodeDoubanProject.add(
# project_name, owner_id="test1", summary="test", product="fire")
# hookfile_path = "%s/hooks/post-receive" % project.git_real_path
# ok_(os.path.exists(hookfile_path))
# project.delete()
def test_project_meta_dict(self):
project_name = "project3"
owner_id = "testuser"
summary = "a summary"
product = "fire"
project = CodeDoubanProject.add(
project_name, owner_id, summary, product)
# hookfile_path = "%s/hooks/post-receive" % project.git_real_path
project = CodeDoubanProject.get_by_name(project_name)
data = {
'url': "%s/%s" % (DOMAIN, project_name),
'name': project_name,
'description': summary,
'product': product,
'committers_count': 0,
'forked_count': 0,
'open_issues_count': 0,
'open_tickets_count': 0,
'watched_count': 0,
'owner': {
'name': owner_id,
'avatar': gravatar_url(owner_id + '@douban.com'),
},
}
commits = project.git.get_revisions("HEAD~1", "HEAD")
if commits:
data['last_commit'] = commits[0]
ok_(project.get_info() == data)
project.delete()
def test_project_validate(self):
noname_project = CodeDoubanProject(
108, '', "test1", "testuser", datetime.now(), "fire",
'/fake_path', '/fake_path')
ok_project = CodeDoubanProject(
108, 'project6', "testuser", datetime.now(), "test",
"fire", '/fake_path', '/fake_path')
ok_(bool(noname_project.validate()))
ok_(not bool(ok_project.validate()))
def test_permissions_check(self):
project_name = "project4"
project = CodeDoubanProject.add(project_name, owner_id="admin_user",
summary="test", product="fire")
ok_(project.is_admin("admin_user"))
ok_(not project.is_admin("other_user"))
project.delete()
def test_delete_project(self):
project_name = "project5"
project = CodeDoubanProject.add(project_name, owner_id="admin_user",
summary="test", product="fire")
git_path = os.path.join(get_repo_root(), '%s.git' % project_name)
ok_(os.path.isdir(git_path))
project.delete()
ok_(not os.path.exists(git_path))
def test_fork_and_watch_project(self):
p6 = CodeDoubanProject.add('project6', owner_id="admin_user",
summary="test", product="fire")
p7 = CodeDoubanProject.add('project7', owner_id="other_user",
summary="test", product="fire")
fork_count = CodeDoubanProject.get_forked_count(p6.id)
p6fork = p6.fork('project6_other_user', 'other_user')
fork_count2 = CodeDoubanProject.get_forked_count(p6.id)
ok_(fork_count2 == fork_count + 1)
ok_(CodeDoubanProject.get_forked_count(p6fork.id) == 0)
p6fork2 = p6fork.fork('project6_fork_other_user', 'other_user')
ok_(CodeDoubanProject.get_forked_count(p6.id) == fork_count + 2)
ok_(CodeDoubanProject.get_forked_count(p6fork.id) == 1)
ok_(CodeDoubanProject.get_forked_count(p6fork2.id) == 0)
watch_count = CodeDoubanProject.get_watched_count(p7.id)
CodeDoubanProject.add_watch(p7.id, 'admin_user')
watch_count2 = CodeDoubanProject.get_watched_count(p7.id)
ok_(watch_count2 == watch_count + 1)
ok_(len(p7.get_watch_users()) == watch_count2)
p6.delete()
p7.delete()
def test_transfer_project(self):
pname1 = 'project6'
pname2 = 'project7'
proj_owner = 'admin_user'
to_user = 'testuser1'
p = CodeDoubanProject.add(pname1, owner_id=proj_owner,
summary="test", product="fire")
_ = CodeDoubanProject.add(pname2, owner_id=proj_owner,
summary="test", product="fire")
p.transfer_to(to_user)
p1 = CodeDoubanProject.get_by_name(pname1)
assert p1.owner_id == to_user
p2 = CodeDoubanProject.get_by_name(pname2)
assert p2.owner_id == proj_owner
def test_rename_project(self):
pname1 = 'project8'
pname2 = 'project9'
proj_owner = 'admin_user'
p = CodeDoubanProject.add(pname1, owner_id=proj_owner,
summary="test", product="fire")
p.rename(pname2)
assert p.name == pname2
git_path = os.path.join(get_repo_root(), '%s.git' % pname2)
ok_(os.path.exists(git_path))
def test_rename_bad_project(self):
pname1 = 'project10'
pname2 = '/dad13/'
proj_owner = 'admin_user'
p = CodeDoubanProject.add(pname1, owner_id=proj_owner,
summary="test", product="fire")
assert p.rename(pname2) is False
git_path = os.path.join(get_repo_root(), '%s.git' % pname1)
ok_(os.path.exists(git_path))
def test_update_can_push(self):
project_name = "project11"
owner_id = "testuser"
summary = "a summary"
product = "fire"
CodeDoubanProject.add(project_name,
owner_id,
summary,
product)
p = CodeDoubanProject.get_by_name('project11')
assert p.can_push == 1
p.update_can_push(False)
p = CodeDoubanProject.get_by_name('project11')
assert p.can_push == 0
p.update_can_push(True)
p = CodeDoubanProject.get_by_name('project11')
assert p.can_push == 1
| 37.76 | 76 | 0.597306 |
3fcdc57906c214bdc8179c55b576e2e9e8d80973 | 19,611 | py | Python | python/paddle/fluid/tests/unittests/test_dist_base.py | tianjianhe/Paddle | 2b11c710b3dddf07873fefaaa3758349d2396e88 | [
"Apache-2.0"
] | 2 | 2019-04-03T05:36:17.000Z | 2020-04-29T03:38:54.000Z | python/paddle/fluid/tests/unittests/test_dist_base.py | tianjianhe/Paddle | 2b11c710b3dddf07873fefaaa3758349d2396e88 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_dist_base.py | tianjianhe/Paddle | 2b11c710b3dddf07873fefaaa3758349d2396e88 | [
"Apache-2.0"
] | 3 | 2019-01-07T06:50:29.000Z | 2019-03-13T08:48:23.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import time
import unittest
import os
import sys
import signal
import subprocess
import six
import argparse
import pickle
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import compiler
RUN_STEP = 10
DEFAULT_BATCH_SIZE = 2
class TestDistRunnerBase(object):
def get_model(self, batch_size=DEFAULT_BATCH_SIZE, lr=0.1):
raise NotImplementedError(
"get_model should be implemented by child classes.")
@staticmethod
def get_transpiler(trainer_id,
main_program,
pserver_endpoints,
trainers,
sync_mode,
dc_asgd=False):
# NOTE: import fluid until runtime, or else forking processes will cause error.
config = fluid.DistributeTranspilerConfig()
config.enable_dc_asgd = dc_asgd
t = fluid.DistributeTranspiler(config=config)
t.transpile(
trainer_id=trainer_id,
program=main_program,
pservers=pserver_endpoints,
trainers=trainers,
sync_mode=sync_mode)
return t
def run_pserver(self, args):
self.lr = args.lr
self.get_model(batch_size=args.batch_size)
# NOTE: pserver should not call memory optimize
t = self.get_transpiler(args.trainer_id,
fluid.default_main_program(), args.endpoints,
args.trainers, args.sync_mode, args.dc_asgd)
pserver_prog = t.get_pserver_program(args.current_endpoint)
startup_prog = t.get_startup_program(args.current_endpoint,
pserver_prog)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
exe.run(pserver_prog)
def run_trainer(self, args):
self.lr = args.lr
test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
self.get_model(batch_size=args.batch_size)
if args.mem_opt:
fluid.memory_optimize(fluid.default_main_program(), skip_grads=True)
if args.update_method == "pserver":
t = self.get_transpiler(args.trainer_id,
fluid.default_main_program(),
args.endpoints, args.trainers,
args.sync_mode, args.dc_asgd)
trainer_prog = t.get_trainer_program()
elif args.update_method == "nccl2":
# transpile for nccl2
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
nccl2_t = fluid.DistributeTranspiler(config=config)
nccl2_t.transpile(
args.trainer_id,
program=fluid.default_main_program(),
startup_program=fluid.default_startup_program(),
trainers=args.endpoints,
current_endpoint=args.current_endpoint)
trainer_prog = fluid.default_main_program()
else:
trainer_prog = fluid.default_main_program()
if args.use_cuda:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
strategy = fluid.ExecutionStrategy()
strategy.num_threads = 1
strategy.allow_op_delay = False
build_stra = fluid.BuildStrategy()
if args.use_reduce:
build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
else:
build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce
if args.batch_merge_repeat > 1:
pass_builder = build_stra._finalize_strategy_and_create_passes()
mypass = pass_builder.insert_pass(
len(pass_builder.all_passes()) - 2, "multi_batch_merge_pass")
mypass.set_int("num_repeats", args.batch_merge_repeat)
if args.update_method == "nccl2":
build_stra.num_trainers = len(args.endpoints.split(","))
build_stra.trainer_id = args.trainer_id
else:
build_stra.num_trainers = 1
build_stra.trainer_id = 0
binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
loss_name=avg_cost.name,
build_strategy=build_stra,
exec_strategy=strategy)
feed_var_list = [
var for var in trainer_prog.global_block().vars.values()
if var.is_data
]
feeder = fluid.DataFeeder(feed_var_list, place)
reader_generator = train_reader()
def get_data():
origin_batch = next(reader_generator)
if args.update_method != "local" and args.use_reader_alloc:
new_batch = []
for offset, item in enumerate(origin_batch):
if offset % 2 == args.trainer_id:
new_batch.append(item)
return new_batch
else:
return origin_batch
out_losses = []
for _ in six.moves.xrange(RUN_STEP):
loss, = exe.run(binary,
fetch_list=[avg_cost.name],
feed=feeder.feed(get_data()))
out_losses.append(loss[0])
if six.PY2:
print(pickle.dumps(out_losses))
else:
sys.stdout.buffer.write(pickle.dumps(out_losses))
def runtime_main(test_class):
parser = argparse.ArgumentParser(description='Run dist test.')
parser.add_argument(
'--role', type=str, required=True, choices=['pserver', 'trainer'])
parser.add_argument('--endpoints', type=str, required=False, default="")
parser.add_argument(
'--update_method',
type=str,
default="local",
choices=["pserver", "nccl2", "local"])
parser.add_argument('--trainer_id', type=int, required=False, default=0)
parser.add_argument('--trainers', type=int, required=False, default=1)
parser.add_argument(
'--current_endpoint', type=str, required=False, default="")
parser.add_argument('--sync_mode', action='store_true')
parser.add_argument('--mem_opt', action='store_true')
parser.add_argument('--use_cuda', action='store_true')
parser.add_argument('--use_reduce', action='store_true')
parser.add_argument('--dc_asgd', action='store_true')
parser.add_argument(
'--use_reader_alloc', action='store_true', required=False)
parser.add_argument('--batch_size', required=False, type=int, default=2)
parser.add_argument('--lr', required=False, type=float, default=0.001)
parser.add_argument(
'--batch_merge_repeat', required=False, type=int, default=1)
args = parser.parse_args()
model = test_class()
if args.role == "pserver" and args.update_method == "pserver":
model.run_pserver(args)
else:
model.run_trainer(args)
import paddle.compat as cpt
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase):
def _setup_config(self):
raise NotImplementedError("tests should have _setup_config implemented")
def _after_setup_config(self):
if self._enforce_place == "CPU":
self.__use_cuda = False
elif self._enforce_place == "GPU":
self.__use_cuda = True
else:
if fluid.core.is_compiled_with_cuda():
self.__use_cuda = True
else:
self.__use_cuda = False
def setUp(self):
self._trainers = 2
self._pservers = 2
self._port_set = set()
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
self._sync_mode = True
self._enforce_place = None
self._mem_opt = False
self._use_reduce = False
self._dc_asgd = False # must use with async mode
self._use_reader_alloc = True
self._nccl2_mode = False
self._lr = 0.001
self._setup_config()
self._after_setup_config()
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def start_pserver(self, model_file, check_error_log, required_envs):
ps0_ep, ps1_ep = self._ps_endpoints.split(",")
ps_cmd = "%s %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"
ps0_cmd = ps_cmd % \
(self._python_interp, model_file, self._ps_endpoints, ps0_ep,
self._trainers)
ps1_cmd = ps_cmd % \
(self._python_interp, model_file, self._ps_endpoints, ps1_ep,
self._trainers)
if self._sync_mode:
ps0_cmd += " --sync_mode"
ps1_cmd += " --sync_mode"
if self._mem_opt:
ps0_cmd += " --mem_opt"
ps1_cmd += " --mem_opt"
print(ps0_cmd)
print(ps1_cmd)
ps0_pipe = open("/tmp/ps0_err.log", "wb")
ps1_pipe = open("/tmp/ps1_err.log", "wb")
ps0_proc = subprocess.Popen(
ps0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps0_pipe,
env=required_envs)
ps1_proc = subprocess.Popen(
ps1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps1_pipe,
env=required_envs)
return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
def _run_local(self,
model,
envs,
check_error_log=False,
batch_size=DEFAULT_BATCH_SIZE,
batch_merge_repeat=1):
cmd = "%s %s --role trainer --lr %f" % (self._python_interp, model,
self._lr)
if batch_size != DEFAULT_BATCH_SIZE:
cmd += " --batch_size %d" % batch_size
if batch_merge_repeat > 1:
cmd += " --batch_merge_repeat %d" % batch_merge_repeat
if self.__use_cuda:
cmd += " --use_cuda"
env_local = {"CUDA_VISIBLE_DEVICES": "0"}
else:
env_local = {'CPU_NUM': '1'}
env_local.update(envs)
print("local_cmd: {}, env: {}".format(cmd, env_local))
if check_error_log:
err_log = open("/tmp/trainer.err.log", "wb")
local_proc = subprocess.Popen(
cmd.split(" "),
stdout=subprocess.PIPE,
stderr=err_log,
env=env_local)
else:
local_proc = subprocess.Popen(
cmd.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env_local)
local_out, local_err = local_proc.communicate()
if check_error_log:
err_log.close()
sys.stderr.write('local_stderr: %s\n' % local_err)
sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
return pickle.loads(local_out)
def _run_cluster(self, model, envs, check_error_log):
# Run dist train to compare with local results
ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(model,
check_error_log, envs)
ps0_ep, ps1_ep = self._ps_endpoints.split(",")
tr_cmd = "%s %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"
tr0_cmd = tr_cmd % \
(self._python_interp, model, self._ps_endpoints,
0, ps0_ep, self._trainers, self._lr)
tr1_cmd = tr_cmd % \
(self._python_interp, model, self._ps_endpoints,
1, ps1_ep, self._trainers, self._lr)
if self._sync_mode:
tr0_cmd += " --sync_mode"
tr1_cmd += " --sync_mode"
if self._mem_opt:
tr0_cmd += " --mem_opt"
tr1_cmd += " --mem_opt"
if self._use_reduce:
tr0_cmd += " --use_reduce"
tr1_cmd += " --use_reduce"
if self._use_reader_alloc:
tr0_cmd += " --use_reader_alloc"
tr1_cmd += " --use_reader_alloc"
if self.__use_cuda:
tr0_cmd += " --use_cuda"
tr1_cmd += " --use_cuda"
env0 = {"CUDA_VISIBLE_DEVICES": "0"}
env1 = {"CUDA_VISIBLE_DEVICES": "1"}
else:
env0 = {'CPU_NUM': '1'}
env1 = {'CPU_NUM': '1'}
env0.update(envs)
env1.update(envs)
print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
tr0_pipe = open("/tmp/tr0_err.log", "wb")
tr1_pipe = open("/tmp/tr1_err.log", "wb")
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=env0)
tr1_proc = subprocess.Popen(
tr1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=env1)
# Wait until trainer process terminate
while True:
stat0 = tr0_proc.poll()
time.sleep(0.1)
if stat0 is not None:
break
while True:
stat1 = tr1_proc.poll()
time.sleep(0.1)
if stat1 is not None:
break
tr0_out, tr0_err = tr0_proc.communicate()
tr1_out, tr1_err = tr1_proc.communicate()
# close trainer file
tr0_pipe.close()
tr1_pipe.close()
ps0_pipe.close()
ps1_pipe.close()
ps0.terminate()
ps1.terminate()
# print server log
with open("/tmp/ps0_err.log", "r") as fn:
sys.stderr.write("ps0 stderr: %s\n" % fn.read())
with open("/tmp/ps1_err.log", "r") as fn:
sys.stderr.write("ps1 stderr: %s\n" % fn.read())
# print log
if stat0 == 0:
sys.stderr.write('trainer 0 stdout: %s\n' % pickle.loads(tr0_out))
with open("/tmp/tr0_err.log", "r") as fn:
sys.stderr.write('trainer 0 stderr: %s\n' % fn.read())
if stat1 == 0:
sys.stderr.write('trainer 1 stdout: %s\n' % pickle.loads(tr1_out))
with open("/tmp/tr1_err.log", "r") as fn:
sys.stderr.write('trainer 1 stderr: %s\n' % fn.read())
return pickle.loads(tr0_out), pickle.loads(tr1_out)
def _run_cluster_nccl2(self, model, envs, check_error_log):
# NOTE: we reuse ps_endpoints as nccl2 worker endpoints
worker_endpoints = self._ps_endpoints.split(",")
w0_ep, w1_ep = worker_endpoints
tr_cmd = "%s %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method nccl2 --lr %f"
tr0_cmd = tr_cmd % \
(self._python_interp, model, self._ps_endpoints,
0, w0_ep, self._lr)
tr1_cmd = tr_cmd % \
(self._python_interp, model, self._ps_endpoints,
1, w1_ep, self._lr)
if self._mem_opt:
tr0_cmd += " --mem_opt"
tr1_cmd += " --mem_opt"
if self._use_reduce:
tr0_cmd += " --use_reduce"
tr1_cmd += " --use_reduce"
if self._use_reader_alloc:
tr0_cmd += " --use_reader_alloc"
tr1_cmd += " --use_reader_alloc"
if self.__use_cuda:
tr0_cmd += " --use_cuda"
tr1_cmd += " --use_cuda"
env0 = {"CUDA_VISIBLE_DEVICES": "0"}
env1 = {"CUDA_VISIBLE_DEVICES": "1"}
else:
env0 = {'CPU_NUM': '1'}
env1 = {'CPU_NUM': '1'}
env0.update(envs)
env1.update(envs)
print("tr0_cmd:{}, env: {}".format(tr0_cmd, env0))
print("tr1_cmd:{}, env: {}".format(tr1_cmd, env1))
tr0_pipe = open("/tmp/tr0_err.log", "wb")
tr1_pipe = open("/tmp/tr1_err.log", "wb")
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=env0)
tr1_proc = subprocess.Popen(
tr1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=env1)
tr0_out, tr0_err = tr0_proc.communicate()
tr1_out, tr1_err = tr1_proc.communicate()
# close trainer file
tr0_pipe.close()
tr1_pipe.close()
# print log
sys.stderr.write('trainer 0 stderr: %s\n' % tr0_err)
sys.stderr.write('trainer 1 stderr: %s\n' % tr1_err)
sys.stderr.write('trainer 0 stdout: %s\n' % tr0_out)
sys.stderr.write('trainer 1 stdout: %s\n' % tr1_out)
return pickle.loads(tr0_out), pickle.loads(tr1_out)
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
# TODO(typhoonzero): should auto adapt GPU count on the machine.
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_fraction_of_gpu_memory_to_use": "0.15",
"FLAGS_rpc_deadline": "5000", # 5sec to fail fast
"FLAGS_cudnn_deterministic": "1",
"http_proxy": "",
"NCCL_P2P_DISABLE": "1"
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
local_losses\
= self._run_local(model_file, required_envs,
check_error_log)
if self._nccl2_mode:
tr0_losses, tr1_losses = self._run_cluster_nccl2(
model_file, required_envs, check_error_log)
else:
tr0_losses, tr1_losses = self._run_cluster(
model_file, required_envs, check_error_log)
for step_id in range(RUN_STEP):
local_loss = local_losses[step_id]
tr0_loss = tr0_losses[step_id]
tr1_loss = tr1_losses[step_id]
dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
print("=======", local_loss, ":", dist_loss[0], "=======")
self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
| 36.249538 | 138 | 0.56866 |
2c2eabcc9b525cd6f26dce7620922e5abc941e5d | 1,588 | py | Python | processextra.py | juhokokkala/matern32-poisson-sde-stan | 9907b4697b2cc7c735776ba7577ecc5add1e33b5 | [
"MIT"
] | 1 | 2020-03-16T17:55:02.000Z | 2020-03-16T17:55:02.000Z | processextra.py | juhokokkala/matern32-poisson-sde-stan | 9907b4697b2cc7c735776ba7577ecc5add1e33b5 | [
"MIT"
] | null | null | null | processextra.py | juhokokkala/matern32-poisson-sde-stan | 9907b4697b2cc7c735776ba7577ecc5add1e33b5 | [
"MIT"
] | 1 | 2019-10-06T13:52:36.000Z | 2019-10-06T13:52:36.000Z | ################################################################################
# Copyright (C) 2016 Juho Kokkala
#
# This file is licensed under the MIT License.
################################################################################
"""Script for processing the results of the extra test about numeric errors"""
import csv
import numpy as np
from matplotlib import pyplot as plt
def read_output(file):
"""Tool for reading the CmdStan output into Python """
csvfile = open(file,'rt')
csvreader = csv.reader(csvfile,delimiter=',')
headerfound = False
data = []
header = []
keepiterating = True
while keepiterating:
try:
row = csvreader.__next__()
if len(row)==0 or row[0][0] is '#':
pass
elif not headerfound:
header = row
headerfound = True
else:
data.append([float(i) for i in row])
except StopIteration:
keepiterating = False
except: #Some erroneous row in the file
pass
data = np.array(data)
return header,data
## Load data, check that lp__ is the first
header0,data0 = read_output("basicGP_extra.csv")
header1,data1 = read_output("SDE_extra.csv")
print(header0[0])
print(header1[0])
##
plt.plot(np.arange(11,201),data0[10:200,0],'b-')
plt.plot(np.arange(11,201),data1[10:200,0],'r--')
plt.ylabel('lp__')
plt.xlabel('Chain step')
plt.legend(['basic GP','SDE'])
plt.show()
| 29.407407 | 80 | 0.515113 |
0bfee9a3caadd126a21565ba95e70e159c45b255 | 3,701 | py | Python | cekit/descriptor/image.py | crobby/cekit | 129aeeeb0eab6ff445c4a3dfe7b9be5d190cceb0 | [
"MIT"
] | null | null | null | cekit/descriptor/image.py | crobby/cekit | 129aeeeb0eab6ff445c4a3dfe7b9be5d190cceb0 | [
"MIT"
] | null | null | null | cekit/descriptor/image.py | crobby/cekit | 129aeeeb0eab6ff445c4a3dfe7b9be5d190cceb0 | [
"MIT"
] | null | null | null | import copy
import os
import yaml
from cekit.descriptor import Descriptor, Label, Env, Port, Run, Modules, \
Packages, Osbs, Volume, Resource, ExecuteContainer
from cekit.version import version as cekit_version
_image_schema = yaml.safe_load("""
map:
name: {type: str, required: True}
version: {type: text, required: True}
schema_version: {type: int}
release: {type: text}
from: {type: str}
description: {type: text}
labels: {type: any}
envs: {type: any}
ports: {type: any}
run: {type: any}
artifacts: {type: any}
modules: {type: any}
packages: {type: any}
osbs: {type: any}
volumes: {type: any}""")
def get_image_schema():
return copy.deepcopy(_image_schema)
class Image(Descriptor):
def __init__(self, descriptor, artifact_dir):
self._artifact_dir = artifact_dir
self.path = artifact_dir
self.schemas = [_image_schema.copy()]
super(Image, self).__init__(descriptor)
self.skip_merging = ['description',
'version',
'name',
'release']
self._prepare()
self._descriptor['execute'] = ExecuteContainer([{'name': 'noop'}], 'Image')
def _prepare(self):
"""Updates self._descriptor with objects and prepare sane label"""
self._descriptor['labels'] = self._descriptor.get('labels', [])
# we will persist cekit version in a label here, so we know which version of cekit
# was used to build the image
self['labels'].extend([{'name': 'org.concrt.version',
'value': cekit_version},
{'name': 'io.cekit.version',
'value': cekit_version}])
# The description key available in image descriptor's
# root is added as labels to the image
key = 'description'
# If we define the label in the image descriptor
# we should *not* override it with value from
# the root's key
if key in self._descriptor and not self.label(key):
value = self._descriptor[key]
self._descriptor['labels'].append({'name': key, 'value': value})
# Last - if there is no 'summary' label added to image descriptor
# we should use the value of the 'description' key and create
# a 'summary' label with it's content. If there is even that
# key missing - we should not add anything.
description = self.label('description')
if not self.label('summary') and description:
self._descriptor['labels'].append(
{'name': 'summary', 'value': description['value']})
self._descriptor['labels'] = [Label(x) for x in self._descriptor.get('labels', [])]
self._descriptor['envs'] = [Env(x) for x in self._descriptor.get('envs', [])]
self._descriptor['ports'] = [Port(x) for x in self._descriptor.get('ports', [])]
if 'run' in self._descriptor:
self._descriptor['run'] = Run(self._descriptor['run'])
self._descriptor['artifacts'] = [Resource(a, directory=self._artifact_dir)
for a in self._descriptor.get('artifacts', [])]
if 'modules' in self._descriptor:
self._descriptor['modules'] = Modules(self._descriptor['modules'], self.path)
if 'packages' in self._descriptor:
self._descriptor['packages'] = Packages(self._descriptor['packages'])
if 'osbs' in self._descriptor:
self._descriptor['osbs'] = Osbs(self._descriptor['osbs'])
self._descriptor['volumes'] = [Volume(x) for x in self._descriptor.get('volumes', [])]
| 40.67033 | 94 | 0.600919 |
20093b291281c6dc9c5554c762be6181e3967908 | 64,037 | py | Python | salt/transport/tcp.py | eiginn/salt | fae5bc757ad0f1716483ce7ae180b451545c2058 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | salt/transport/tcp.py | eiginn/salt | fae5bc757ad0f1716483ce7ae180b451545c2058 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | salt/transport/tcp.py | eiginn/salt | fae5bc757ad0f1716483ce7ae180b451545c2058 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
"""
import errno
import logging
import os
import queue
import socket
import threading
import time
import traceback
import urllib.parse
import salt.crypt
import salt.exceptions
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.iostream
import salt.ext.tornado.netutil
import salt.ext.tornado.tcpclient
import salt.ext.tornado.tcpserver
import salt.payload
import salt.transport.client
import salt.transport.frame
import salt.transport.ipc
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.utils.versions
from salt.exceptions import SaltClientError, SaltReqTimeoutError
from salt.transport import iter_transport_opts
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP # nosec
if salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import salt.ext.tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
"""
Ensure that TCP keepalives are set for the socket.
"""
if hasattr(socket, "SO_KEEPALIVE"):
if opts.get("tcp_keepalive", False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "SOL_TCP"):
if hasattr(socket, "TCP_KEEPIDLE"):
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE, int(tcp_keepalive_idle)
)
if hasattr(socket, "TCP_KEEPCNT"):
tcp_keepalive_cnt = opts.get("tcp_keepalive_cnt", -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT, int(tcp_keepalive_cnt)
)
if hasattr(socket, "TCP_KEEPINTVL"):
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP,
socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl),
)
if hasattr(socket, "SIO_KEEPALIVE_VALS"):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(
socket.SIO_KEEPALIVE_VALS,
(
1,
int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000),
),
)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
"""
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
"""
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
"""
Start the load balancer
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except OSError as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if (
salt.ext.tornado.util.errno_from_exception(e)
== errno.ECONNABORTED
):
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to tcp.
Note: this class returns a singleton
"""
async_methods = [
"crypted_transfer_decode_dictentry",
"_crypted_transfer",
"_uncrypted_transfer",
"send",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = dict(opts)
if "master_uri" in kwargs:
self.opts["master_uri"] = kwargs["master_uri"]
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if self.crypt != "clear":
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get("resolver")
parse = urllib.parse.urlparse(self.opts["master_uri"])
master_host, master_port = parse.netloc.rsplit(":", 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(
self.opts,
args=(
self.opts,
master_host,
int(master_port),
),
kwargs={
"io_loop": self.io_loop,
"resolver": resolver,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_ret_port"),
},
)
def close(self):
if self._closing:
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
self.message_client.close()
# pylint: disable=W1701
def __del__(self):
try:
self.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
"""
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
try:
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except salt.ext.tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError("Connection to master lost")
raise salt.ext.tornado.gen.Return(ret)
class AsyncTCPPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
async_methods = [
"send_id",
"connect_callback",
"connect",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = opts
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.message_client = None
self.event = salt.utils.event.get_event("minion", opts=self.opts, listen=False)
def close(self):
if self._closing:
return
self._closing = True
if self.message_client is not None:
self.message_client.close()
self.message_client = None
if self.event is not None:
self.event.destroy()
self.event = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def send_id(self, tok, force_auth):
"""
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
"""
load = {"id": self.opts["id"], "tok": tok}
@salt.ext.tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise salt.ext.tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while (
count <= self.opts["tcp_authentication_retries"]
or self.opts["tcp_authentication_retries"] < 0
):
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event({"master": self.opts["master"]}, "__master_connected")
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get("__role") == "syndic":
data = "Syndic {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "syndic")
else:
data = "Minion {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "minion")
load = {
"id": self.opts["id"],
"cmd": "_minion_event",
"pretag": None,
"tok": self.tok,
"data": data,
"tag": tag,
}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel,
(self.opts,),
loop_kwarg="io_loop",
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info(
"fire_master failed: master could not be contacted. Request timed"
" out."
)
except Exception: # pylint: disable=broad-except
log.info("fire_master failed: %s", traceback.format_exc())
finally:
# SyncWrapper will call either close() or destroy(), whichever is available
del req_channel
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event({"master": self.opts["master"]}, "__master_disconnected")
@salt.ext.tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b"salt")
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4505)) != 4505:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts["master_ip"], int(self.publish_port)),
kwargs={
"io_loop": self.io_loop,
"connect_callback": self.connect_callback,
"disconnect_callback": self.disconnect_callback,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_publish_port"),
},
)
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt: # pylint: disable=try-except-raise
raise
except Exception as exc: # pylint: disable=broad-except
if "-|RETRY|-" not in str(exc):
raise SaltClientError(
"Unable to sign_in to master: {}".format(exc)
) # TODO: better error message
def on_recv(self, callback):
"""
Register an on_recv callback
"""
if callback is None:
return self.message_client.on_recv(callback)
@salt.ext.tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(body)
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
self.req_server = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise
if self.req_server is None:
# We only close the socket if we don't have a req_server instance.
# If we did, because the req_server is also handling this socket, when we call
# req_server.stop(), tornado will give us an AssertionError because it's trying to
# match the socket.fileno() (after close it's -1) to the fd it holds on it's _sockets cache
# so it can remove the socket from the IOLoop handlers
self._socket.close()
self._socket = None
if self.req_server is not None:
try:
self.req_server.close()
except OSError as exc:
if exc.errno != 9:
raise
log.exception(
"TCPReqServerChannel close generated an exception: %s", str(exc)
)
self.req_server = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer,
args=(self.opts, self.socket_queue),
name="LoadBalancerServer",
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
"""
if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Publish daemon niceness to %i",
self.opts["pub_server_niceness"],
)
os.nice(self.opts["pub_server_niceness"])
self.payload_handler = payload_handler
self.io_loop = io_loop
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(
self.socket_queue,
self.handle_message,
ssl_options=self.opts.get("ssl"),
)
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind(
(self.opts["interface"], int(self.opts["ret_port"]))
)
self.req_server = SaltMessageServer(
self.handle_message,
ssl_options=self.opts.get("ssl"),
io_loop=self.io_loop,
)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, header, payload):
"""
Handle incoming messages from underlying tcp streams
"""
try:
try:
payload = self._decode_payload(payload)
except Exception: # pylint: disable=broad-except
stream.write(salt.transport.frame.frame_msg("bad load", header=header))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(
payload.get("load"), dict
):
yield stream.write(
salt.transport.frame.frame_msg(
"payload and load must be a dict", header=header
)
)
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if "\0" in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(salt.payload.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
salt.payload.dumps("bad load: id {} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if (
payload["enc"] == "clear"
and payload.get("load", {}).get("cmd") == "_auth"
):
yield stream.write(
salt.transport.frame.frame_msg(
self._auth(payload["load"]), header=header
)
)
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.write("Some exception handling minion payload")
log.error(
"Some exception handling a payload from minion", exc_info=True
)
stream.close()
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == "send":
stream.write(
salt.transport.frame.frame_msg(
self.crypticle.dumps(ret), header=header
)
)
elif req_fun == "send_private":
stream.write(
salt.transport.frame.frame_msg(
self._encrypt_private(
ret,
req_opts["key"],
req_opts["tgt"],
),
header=header,
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.write("Server-side exception handling payload")
stream.close()
except salt.ext.tornado.gen.Return:
raise
except salt.ext.tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error("Connection was unexpectedly closed", exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error("Unexpected exception occurred: %s", exc, exc_info=True)
raise salt.ext.tornado.gen.Return()
class SaltMessageServer(salt.ext.tornado.tcpserver.TCPServer):
"""
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
"""
def __init__(self, message_handler, *args, **kwargs):
io_loop = (
kwargs.pop("io_loop", None) or salt.ext.tornado.ioloop.IOLoop.current()
)
self._closing = False
super().__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
@salt.ext.tornado.gen.coroutine
def handle_stream(self, stream, address):
"""
Handle incoming streams and add messages to the incoming queue
"""
log.trace("Req client %s connected", address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
header = framed_msg["head"]
self.io_loop.spawn_callback(
self.message_handler, stream, header, framed_msg["body"]
)
except salt.ext.tornado.iostream.StreamClosedError:
log.trace("req client disconnected %s", address)
self.remove_client((stream, address))
except Exception as e: # pylint: disable=broad-except
log.trace("other master-side exception: %s", e)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
"""
Shutdown the whole server
"""
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.shutdown() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
"""
Close the server
"""
if self._closing:
return
self._closing = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except OSError as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
"""
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
"""
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super().__init__(message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.stop() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
self._stop.set()
self.thread.join()
super().close()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'salt.ext.tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address
)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(salt.ext.tornado.tcpclient.TCPClient):
"""
Override _create_stream() in TCPClient to enable keep alive support.
"""
def __init__(self, opts, resolver=None):
self.opts = opts
super().__init__(resolver=resolver)
def _create_stream(
self, max_buffer_size, af, addr, **kwargs
): # pylint: disable=unused-argument,arguments-differ
"""
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
"""
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = salt.ext.tornado.iostream.IOStream(
sock, max_buffer_size=max_buffer_size
)
if salt.ext.tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
super().__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@salt.ext.tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
yield futures
raise salt.ext.tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient:
"""
Low-level message sending client
"""
def __init__(
self,
opts,
host,
port,
io_loop=None,
resolver=None,
connect_callback=None,
disconnect_callback=None,
source_ip=None,
source_port=None,
):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or salt.ext.tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = salt.ext.tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
self.backoff = opts.get("tcp_reconnect_backoff", 1)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, "_stream") and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = salt.ext.tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
if (
self.io_loop
!= salt.ext.tornado.ioloop.IOLoop.current(instance=False)
or not self._stream_return_future.done()
):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self._stop_io_loop(),
)
self.io_loop.start()
except Exception as e: # pylint: disable=broad-except
log.info("Exception caught in SaltMessageClient.close: %s", str(e))
finally:
orig_loop.make_current()
self._tcp_client.close()
self.io_loop = None
self._read_until_future = None
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def connect(self):
"""
Ask for this client to reconnect to the origin
"""
if hasattr(self, "_connecting_future") and not self._connecting_future.done():
future = self._connecting_future
else:
future = salt.ext.tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
@salt.ext.tornado.gen.coroutine
def _connect(self):
"""
Try to connect for the rest of time!
"""
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if salt.ext.tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {
"source_ip": self.source_ip,
"source_port": self.source_port,
}
else:
log.warning(
"If you need a certain source IP/port, consider upgrading"
" Tornado >= 4.5"
)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(
self.host, self.port, ssl_options=self.opts.get("ssl"), **kwargs
)
self._connecting_future.set_result(True)
break
except Exception as exc: # pylint: disable=broad-except
log.warning(
"TCP Message Client encountered an exception while connecting to"
" %s:%s: %r, will reconnect in %d seconds",
self.host,
self.port,
exc,
self.backoff,
)
yield salt.ext.tornado.gen.sleep(self.backoff)
# self._connecting_future.set_exception(exc)
@salt.ext.tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(
4096, partial=True
)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg["head"]
body = framed_msg["body"]
message_id = header.get("mid")
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error(
"Got response for message_id %s that we are not"
" tracking",
message_id,
)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug(
"tcp stream to %s:%s closed, unable to recv",
self.host,
self.port,
)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if "detect_mode" in self.opts:
log.info(
"There was an error trying to use TCP transport; "
"attempting to fallback to another transport"
)
else:
raise SaltClientError
except Exception as e: # pylint: disable=broad-except
log.error("Exception parsing response", exc_info=True)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@salt.ext.tornado.gen.coroutine
def _stream_send(self):
while (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except salt.ext.tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception("Unable to find available messageid")
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
"""
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id, msg):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
future = self.send_future_map.pop(message_id)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
if future.attempts < future.tries:
future.attempts += 1
log.debug(
"SaltReqTimeoutError, retrying. (%s/%s)",
future.attempts,
future.tries,
)
self.send(
msg,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError("Message timed out"))
def send(self, msg, timeout=None, callback=None, raw=False, future=None, tries=3):
"""
Send given message, and return a future
"""
message_id = self._message_id()
header = {"mid": message_id}
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message_id, msg
)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append(
(message_id, salt.transport.frame.frame_msg(msg, header=header))
)
return future
class Subscriber:
"""
Client object for use with the TCP publisher server
"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(salt.ext.tornado.tcpserver.TCPServer):
"""
TCP publisher
"""
def __init__(self, opts, io_loop=None):
super().__init__(ssl_options=opts.get("ssl"))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.event = None
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
"master", opts=self.opts, listen=False
)
else:
self.event = None
def close(self):
if self._closing:
return
self._closing = True
if self.event is not None:
self.event.destroy()
self.event = None
if self.aes_funcs is not None:
self.aes_funcs.destroy()
self.aes_funcs = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {"new": [id_], "lost": []}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {"new": [], "lost": [id_]}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
@salt.ext.tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
body = framed_msg["body"]
if body["enc"] != "aes":
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
load = crypticle.loads(body["load"])
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load["id"], load["tok"]):
continue
client.id_ = load["id"]
self._add_client_present(client)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug("tcp stream to %s closed, unable to recv", client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e: # pylint: disable=broad-except
log.error(
"Exception parsing response from %s", client.address, exc_info=True
)
continue
def handle_stream(self, stream, address):
log.trace("Subscriber at %s connected", address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@salt.ext.tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug("TCP PubServer sending payload: %s", package)
payload = salt.transport.frame.frame_msg(package["payload"])
to_remove = []
if "topic_lst" in package:
topic_lst = package["topic_lst"]
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug("Publish target %s not connected", topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug(
"Subscriber at %s has disconnected from publisher", client.address
)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace("TCP PubServer finished publishing payload")
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state["secrets"]
self.__init__(state["opts"])
def __getstate__(self):
return {"opts": self.opts, "secrets": salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
"""
Bind to the interface specified in the configuration file
"""
log_queue = kwargs.get("log_queue")
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get("log_queue_level")
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts["interface"], int(self.opts["publish_port"])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
finally:
pull_sock.close()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
"""
process_manager.add_process(
self._publish_daemon, kwargs=kwargs, name=self.__class__.__name__
)
def publish(self, load):
"""
Publish "load" to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
# Use the Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
# TODO: switch to the actual asynchronous interface
# pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,),
loop_kwarg="io_loop",
)
pub_sock.connect()
int_payload = {"payload": salt.payload.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list" and not self.opts.get("order_masters", False):
if isinstance(load["tgt"], str):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(
load["tgt"], tgt_type=load["tgt_type"]
)
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
else:
int_payload["topic_lst"] = load["tgt"]
# Send it over IPC!
pub_sock.send(int_payload)
| 38.004154 | 107 | 0.554039 |
b1de66542e990852570d0825e181d49c32975991 | 48 | py | Python | python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorWidthAndPrecision_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | f'{1:.5d} {2:3.5d} {3:3d} {"spam":>20} {4:<#d}'
| 24 | 47 | 0.395833 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.