hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
411b979b773b2c1ca489c6724a32d9c196d5baf2 | 2,410 | py | Python | doc/source/conf.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 4 | 2019-04-02T03:49:13.000Z | 2022-01-22T14:57:33.000Z | doc/source/conf.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 451 | 2019-03-25T07:27:52.000Z | 2021-07-26T01:26:43.000Z | doc/source/conf.py | bzhaoopenstack/labkeeper | fecda8a306fd3f1dea3f66606ac8bd962981d2b0 | [
"Apache-2.0"
] | 14 | 2018-09-28T18:45:12.000Z | 2022-01-22T14:57:22.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'labkeeper'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
| 32.567568 | 79 | 0.697925 |
53ddd4aa686d8ca1d970c22c2e93a153eeabefc5 | 50,943 | py | Python | tests/unit/states/test_mount.py | ifraixedes/saltstack-salt | b54becb8b43cc9b7c00b2c0bc637ac534dc62896 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | tests/unit/states/test_mount.py | ifraixedes/saltstack-salt | b54becb8b43cc9b7c00b2c0bc637ac534dc62896 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | tests/unit/states/test_mount.py | ifraixedes/saltstack-salt | b54becb8b43cc9b7c00b2c0bc637ac534dc62896 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import os
import salt.states.mount as mount
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class MountTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.states.mount
"""
def setup_loader_modules(self):
return {mount: {}}
# 'mounted' function tests: 1
def test_mounted(self):
"""
Test to verify that a device is mounted.
"""
name = os.path.realpath("/mnt/sdb")
device = os.path.realpath("/dev/sdb5")
fstype = "xfs"
name2 = os.path.realpath("/mnt/cifs")
device2 = "//SERVER/SHARE/"
fstype2 = "cifs"
opts2 = ["noowners"]
superopts2 = ["uid=510", "gid=100", "username=cifsuser", "domain=cifsdomain"]
name3 = os.path.realpath("/mnt/jfs2")
device3 = "/dev/hd1"
fstype3 = "jfs2"
opts3 = [""]
superopts3 = ["uid=510", "gid=100", "username=jfs2user", "domain=jfs2sdomain"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock = MagicMock(
side_effect=["new", "present", "new", "change", "bad config", "salt"]
)
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_ret = MagicMock(return_value={"retcode": 1})
mock_mnt = MagicMock(
return_value={
name: {"device": device, "opts": [], "superopts": []},
name2: {"device": device2, "opts": opts2, "superopts": superopts2},
name3: {"device": device3, "opts": opts3, "superopts": superopts3},
}
)
mock_aixfs_retn = MagicMock(return_value="present")
mock_emt = MagicMock(return_value={})
mock_str = MagicMock(return_value="salt")
mock_user = MagicMock(return_value={"uid": 510})
mock_group = MagicMock(return_value={"gid": 100})
mock_read_cache = MagicMock(return_value={})
mock_write_cache = MagicMock(return_value=True)
with patch.dict(mount.__grains__, {"os": "Darwin"}):
with patch.dict(
mount.__salt__,
{
"mount.active": mock_mnt,
"cmd.run_all": mock_ret,
"mount.umount": mock_f,
},
), patch("os.path.exists", MagicMock(return_value=True)):
comt = "Unable to find device with label /dev/sdb5."
ret.update({"comment": comt})
self.assertDictEqual(
mount.mounted(name, "LABEL=/dev/sdb5", fstype), ret
)
with patch.dict(mount.__opts__, {"test": True}):
comt = "Remount would be forced because options (noowners) changed"
ret.update({"comment": comt, "result": None})
self.assertDictEqual(mount.mounted(name, device, fstype), ret)
with patch.dict(mount.__opts__, {"test": False}):
comt = "Unable to unmount {}: False.".format(name)
umount = (
"Forced unmount and mount because options (noowners) changed"
)
ret.update(
{
"comment": comt,
"result": False,
"changes": {"umount": umount},
}
)
self.assertDictEqual(mount.mounted(name, device, "nfs"), ret)
umount1 = (
"Forced unmount because devices don't match. "
"Wanted: {0}, current: {1}, {1}".format(
os.path.realpath("/dev/sdb6"), device
)
)
comt = "Unable to unmount"
ret.update(
{
"comment": comt,
"result": None,
"changes": {"umount": umount1},
}
)
self.assertDictEqual(
mount.mounted(
name, os.path.realpath("/dev/sdb6"), fstype, opts=[]
),
ret,
)
with patch.dict(
mount.__salt__,
{
"mount.active": mock_emt,
"mount.mount": mock_str,
"mount.set_automaster": mock,
},
):
with patch.dict(mount.__opts__, {"test": True}), patch(
"os.path.exists", MagicMock(return_value=False)
):
comt = "{} does not exist and would not be created".format(name)
ret.update({"comment": comt, "changes": {}})
self.assertDictEqual(mount.mounted(name, device, fstype), ret)
with patch.dict(mount.__opts__, {"test": False}):
with patch.object(os.path, "exists", mock_f):
comt = "Mount directory is not present"
ret.update({"comment": comt, "result": False})
self.assertDictEqual(
mount.mounted(name, device, fstype), ret
)
with patch.object(os.path, "exists", mock_t):
comt = "Mount directory is not present"
ret.update({"comment": "salt", "result": False})
self.assertDictEqual(
mount.mounted(name, device, fstype), ret
)
with patch.dict(mount.__opts__, {"test": True}), patch(
"os.path.exists", MagicMock(return_value=False)
):
comt = (
"{0} does not exist and would neither be created nor"
" mounted. {0} needs to be written to the fstab in order to"
" be made persistent.".format(name)
)
ret.update({"comment": comt, "result": None})
self.assertDictEqual(
mount.mounted(name, device, fstype, mount=False), ret
)
with patch.dict(mount.__opts__, {"test": False}), patch(
"os.path.exists", MagicMock(return_value=False)
):
comt = (
"{} not present and not mounted. "
"Entry already exists in the fstab.".format(name)
)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(
mount.mounted(name, device, fstype, mount=False), ret
)
comt = (
"{} not present and not mounted. "
"Added new entry to the fstab.".format(name)
)
ret.update(
{
"comment": comt,
"result": True,
"changes": {"persist": "new"},
}
)
self.assertDictEqual(
mount.mounted(name, device, fstype, mount=False), ret
)
comt = (
"{} not present and not mounted. "
"Updated the entry in the fstab.".format(name)
)
ret.update(
{
"comment": comt,
"result": True,
"changes": {"persist": "update"},
}
)
self.assertDictEqual(
mount.mounted(name, device, fstype, mount=False), ret
)
comt = (
"{} not present and not mounted. "
"However, the fstab was not found.".format(name)
)
ret.update({"comment": comt, "result": False, "changes": {}})
self.assertDictEqual(
mount.mounted(name, device, fstype, mount=False), ret
)
comt = "{} not present and not mounted".format(name)
ret.update({"comment": comt, "result": True, "changes": {}})
self.assertDictEqual(
mount.mounted(name, device, fstype, mount=False), ret
)
# Test no change for uid provided as a name #25293
with patch.dict(mount.__grains__, {"os": "CentOS"}):
set_fstab_mock = MagicMock(autospec=True, return_value="present")
with patch.dict(
mount.__salt__,
{
"mount.active": mock_mnt,
"mount.mount": mock_str,
"mount.umount": mock_f,
"mount.read_mount_cache": mock_read_cache,
"mount.write_mount_cache": mock_write_cache,
"mount.set_fstab": set_fstab_mock,
"user.info": mock_user,
"group.info": mock_group,
},
):
with patch.dict(mount.__opts__, {"test": True}), patch.object(
os.path, "exists", mock_t
):
# Starting with Python 3.8 the os.path.realpath function attempts to resolve
# symbolic links and junctions on Windows. So, since were using a share
# that doesn't exist, we need to mock
# https://docs.python.org/3/library/os.path.html?highlight=ntpath%20realpath#os.path.realpath
with patch.object(
os.path,
"realpath",
MagicMock(side_effect=[name2, device2, device2]),
):
comt = "Target was already mounted. Entry already exists in the fstab."
ret.update({"name": name2, "result": True})
ret.update({"comment": comt, "changes": {}})
self.assertDictEqual(
mount.mounted(
name2,
device2,
fstype2,
opts=["uid=user1", "gid=group1"],
),
ret,
)
# Test to check the options order #57520
set_fstab_mock.assert_called_with(
name2,
"//SERVER/SHARE/",
"cifs",
["gid=group1", "uid=user1"],
0,
0,
"/etc/fstab",
test=True,
match_on="auto",
)
with patch.dict(mount.__grains__, {"os": "AIX"}):
with patch.dict(
mount.__salt__,
{
"mount.active": mock_mnt,
"mount.mount": mock_str,
"mount.umount": mock_f,
"mount.read_mount_cache": mock_read_cache,
"mount.write_mount_cache": mock_write_cache,
"mount.set_filesystems": mock_aixfs_retn,
"user.info": mock_user,
"group.info": mock_group,
},
):
with patch.dict(mount.__opts__, {"test": True}):
with patch.object(os.path, "exists", mock_t):
comt = (
"Target was already mounted. Entry already exists in the"
" fstab."
)
ret.update({"name": name3, "result": True})
ret.update({"comment": comt, "changes": {}})
self.assertDictEqual(
mount.mounted(
name3,
device3,
fstype3,
opts=["uid=user1", "gid=group1"],
),
ret,
)
# 'swap' function tests: 1
def test_swap(self):
"""
Test to activates a swap device.
"""
name = "/mnt/sdb"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(side_effect=["present", "new", "change", "bad config"])
mock_f = MagicMock(return_value=False)
mock_swp = MagicMock(return_value=[name])
mock_fs = MagicMock(return_value={"none": {"device": name, "fstype": "xfs"}})
mock_fs_diff = MagicMock(
return_value={"none": {"device": "something_else", "fstype": "xfs"}}
)
mock_aixfs = MagicMock(return_value={name: {"dev": name, "fstype": "jfs2"}})
mock_emt = MagicMock(return_value={})
with patch.dict(mount.__grains__, {"os": "test"}):
with patch.dict(
mount.__salt__,
{
"mount.swaps": mock_swp,
"mount.fstab": mock_fs_diff,
"file.is_link": mock_f,
},
):
with patch.dict(mount.__opts__, {"test": True}):
comt = (
"Swap {} is set to be added to the "
"fstab and to be activated".format(name)
)
ret.update({"comment": comt})
self.assertDictEqual(mount.swap(name), ret)
with patch.dict(mount.__opts__, {"test": False}):
comt = "Swap {} already active".format(name)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.swap(name, persist=False), ret)
with patch.dict(
mount.__salt__,
{"mount.fstab": mock_emt, "mount.set_fstab": mock},
):
comt = "Swap {} already active".format(name)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.swap(name), ret)
comt = (
"Swap /mnt/sdb already active. "
"Added new entry to the fstab."
)
ret.update(
{
"comment": comt,
"result": True,
"changes": {"persist": "new"},
}
)
self.assertDictEqual(mount.swap(name), ret)
comt = (
"Swap /mnt/sdb already active. "
"Updated the entry in the fstab."
)
ret.update(
{
"comment": comt,
"result": True,
"changes": {"persist": "update"},
}
)
self.assertDictEqual(mount.swap(name), ret)
comt = (
"Swap /mnt/sdb already active. "
"However, the fstab was not found."
)
ret.update({"comment": comt, "result": False, "changes": {}})
self.assertDictEqual(mount.swap(name), ret)
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(side_effect=["present", "new", "change", "bad config"])
mock_emt = MagicMock(return_value={})
with patch.dict(mount.__grains__, {"os": "test"}):
with patch.dict(
mount.__salt__,
{
"mount.swaps": mock_swp,
"mount.fstab": mock_fs,
"file.is_link": mock_f,
},
):
with patch.dict(mount.__opts__, {"test": True}):
comt = "Swap {} already active".format(name)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.swap(name), ret)
with patch.dict(mount.__opts__, {"test": False}):
comt = "Swap {} already active".format(name)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.swap(name), ret)
with patch.dict(
mount.__salt__,
{"mount.fstab": mock_emt, "mount.set_fstab": mock},
):
comt = "Swap {} already active".format(name)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.swap(name), ret)
comt = (
"Swap /mnt/sdb already active. "
"Added new entry to the fstab."
)
ret.update(
{
"comment": comt,
"result": True,
"changes": {"persist": "new"},
}
)
self.assertDictEqual(mount.swap(name), ret)
comt = (
"Swap /mnt/sdb already active. "
"Updated the entry in the fstab."
)
ret.update(
{
"comment": comt,
"result": True,
"changes": {"persist": "update"},
}
)
self.assertDictEqual(mount.swap(name), ret)
comt = (
"Swap /mnt/sdb already active. "
"However, the fstab was not found."
)
ret.update({"comment": comt, "result": False, "changes": {}})
self.assertDictEqual(mount.swap(name), ret)
with patch.dict(mount.__grains__, {"os": "AIX"}):
with patch.dict(
mount.__salt__,
{
"mount.swaps": mock_swp,
"mount.filesystems": mock_aixfs,
"file.is_link": mock_f,
},
):
with patch.dict(mount.__opts__, {"test": True}):
comt = "Swap {} already active".format(name)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.swap(name), ret)
with patch.dict(mount.__opts__, {"test": False}):
comt = (
"Swap {} already active. swap not present"
" in /etc/filesystems on AIX.".format(name)
)
ret.update({"comment": comt, "result": False})
self.assertDictEqual(mount.swap(name), ret)
with patch.dict(
mount.__salt__,
{"mount.filesystems": mock_emt, "mount.set_filesystems": mock},
):
comt = (
"Swap {} already active. swap not present"
" in /etc/filesystems on AIX.".format(name)
)
ret.update({"comment": comt, "result": False})
self.assertDictEqual(mount.swap(name), ret)
# 'unmounted' function tests: 1
def test_unmounted(self):
"""
Test to verify that a device is not mounted
"""
name = "/mnt/sdb"
device = "/dev/sdb5"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock_f = MagicMock(return_value=False)
mock_t = MagicMock(return_value=True)
mock_dev = MagicMock(return_value={name: {"device": device}})
mock_fs = MagicMock(return_value={name: {"device": name}})
mock_mnt = MagicMock(side_effect=[{name: {}}, {}, {}, {}])
name3 = os.path.realpath("/mnt/jfs2")
device3 = "/dev/hd1"
fstype3 = "jfs2"
opts3 = [""]
mock_mnta = MagicMock(return_value={name3: {"device": device3, "opts": opts3}})
mock_aixfs = MagicMock(return_value={name: {"dev": name3, "fstype": fstype3}})
mock_delete_cache = MagicMock(return_value=True)
comt3 = (
"Mount point /mnt/sdb is unmounted but needs to be purged "
"from /etc/auto_salt to be made persistent"
)
with patch.dict(mount.__grains__, {"os": "Darwin"}):
with patch.dict(
mount.__salt__,
{
"mount.active": mock_mnt,
"mount.automaster": mock_fs,
"file.is_link": mock_f,
},
):
with patch.dict(mount.__opts__, {"test": True}):
comt = "Mount point {} is mounted but should not be".format(name)
ret.update({"comment": comt})
self.assertDictEqual(mount.unmounted(name, device), ret)
comt = (
"Target was already unmounted. "
"fstab entry for device {} not found".format(device)
)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(
mount.unmounted(name, device, persist=True), ret
)
with patch.dict(mount.__salt__, {"mount.automaster": mock_dev}):
ret.update({"comment": comt3, "result": None})
self.assertDictEqual(
mount.unmounted(name, device, persist=True), ret
)
comt = "Target was already unmounted"
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.unmounted(name, device), ret)
with patch.dict(mount.__grains__, {"os": "AIX"}):
with patch.dict(
mount.__salt__,
{
"mount.active": mock_mnta,
"mount.filesystems": mock_aixfs,
"file.is_link": mock_f,
},
):
with patch.dict(mount.__opts__, {"test": True}):
comt = "Target was already unmounted"
ret.update({"comment": comt, "result": True})
self.assertDictEqual(mount.unmounted(name, device), ret)
comt = (
"Target was already unmounted. "
"fstab entry for device /dev/sdb5 not found"
)
ret.update({"comment": comt, "result": True})
self.assertDictEqual(
mount.unmounted(name, device, persist=True), ret
)
with patch.dict(mount.__salt__, {"mount.filesystems": mock_dev}):
comt = "Mount point {} is mounted but should not be".format(
name3
)
ret.update({"comment": comt, "result": None, "name": name3})
self.assertDictEqual(
mount.unmounted(name3, device3, persist=True), ret
)
with patch.dict(mount.__opts__, {"test": False}), patch.dict(
mount.__salt__,
{
"mount.umount": mock_t,
"mount.delete_mount_cache": mock_delete_cache,
},
):
comt = "Target was successfully unmounted"
ret.update(
{
"comment": comt,
"result": True,
"name": name3,
"changes": {"umount": True},
}
)
self.assertDictEqual(mount.unmounted(name3, device3), ret)
# 'mod_watch' function tests: 1
def test_mod_watch(self):
"""
Test the mounted watcher, called to invoke the watch command.
"""
name = "/mnt/sdb"
ret = {
"name": name,
"result": True,
"comment": "Watch not supported in unmount at this time",
"changes": {},
}
self.assertDictEqual(mount.mod_watch(name, sfun="unmount"), ret)
def test_mounted_multiple_mounts(self):
"""
Test to verify that a device is mounted.
"""
name = "/mnt/nfs1"
device = "localhost:/mnt/nfsshare"
fstype = "nfs4"
name2 = "/mnt/nfs2"
device2 = "localhost:/mnt/nfsshare"
fstype2 = "nfs4"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
mock = MagicMock(
side_effect=[
"new",
"present",
"new",
"change",
"bad config",
"salt",
"present",
]
)
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_ret = MagicMock(return_value={"retcode": 1})
mock_mnt = MagicMock(
return_value={name: {"device": device, "opts": [], "superopts": []}}
)
mock_read_cache = MagicMock(return_value={})
mock_write_cache = MagicMock(return_value=True)
mock_user = MagicMock(return_value={"uid": 510})
mock_group = MagicMock(return_value={"gid": 100})
mock_str = MagicMock(return_value="salt")
mock_fstab_config = ["localhost:/mnt/nfsshare /mnt/nfs1 nfs defaults 0 0"]
# Test no change for uid provided as a name #25293
with patch.dict(mount.__grains__, {"os": "CentOS"}):
with patch.dict(mount.__opts__, {"test": True}):
with patch.dict(
mount.__salt__,
{
"mount.active": mock_mnt,
"mount.mount": mock_str,
"mount.umount": mock_f,
"mount.read_mount_cache": mock_read_cache,
"mount.write_mount_cache": mock_write_cache,
"user.info": mock_user,
"group.info": mock_group,
},
):
with patch.object(os.path, "exists", mock_t):
comt = "/mnt/nfs2 would be mounted"
ret.update({"name": name2, "result": None})
ret.update({"comment": comt, "changes": {}})
self.assertDictEqual(
mount.mounted(name2, device2, fstype2, opts=[]), ret
)
def test__convert_to_fast_none(self):
"""
Test the device name conversor
"""
assert mount._convert_to("/dev/sda1", None) == "/dev/sda1"
def test__convert_to_fast_device(self):
"""
Test the device name conversor
"""
assert mount._convert_to("/dev/sda1", "device") == "/dev/sda1"
def test__convert_to_fast_token(self):
"""
Test the device name conversor
"""
assert mount._convert_to("LABEL=home", "label") == "LABEL=home"
def test__convert_to_device_none(self):
"""
Test the device name conversor
"""
salt_mock = {
"disk.blkid": MagicMock(return_value={}),
}
with patch.dict(mount.__salt__, salt_mock):
assert mount._convert_to("/dev/sda1", "uuid") is None
salt_mock["disk.blkid"].assert_called_with("/dev/sda1")
def test__convert_to_device_token(self):
"""
Test the device name conversor
"""
uuid = "988c663d-74a2-432b-ba52-3eea34015f22"
salt_mock = {
"disk.blkid": MagicMock(return_value={"/dev/sda1": {"UUID": uuid}}),
}
with patch.dict(mount.__salt__, salt_mock):
uuid = "UUID={}".format(uuid)
assert mount._convert_to("/dev/sda1", "uuid") == uuid
salt_mock["disk.blkid"].assert_called_with("/dev/sda1")
def test__convert_to_token_device(self):
"""
Test the device name conversor
"""
uuid = "988c663d-74a2-432b-ba52-3eea34015f22"
salt_mock = {
"disk.blkid": MagicMock(return_value={"/dev/sda1": {"UUID": uuid}}),
}
with patch.dict(mount.__salt__, salt_mock):
uuid = "UUID={}".format(uuid)
assert mount._convert_to(uuid, "device") == "/dev/sda1"
salt_mock["disk.blkid"].assert_called_with(token=uuid)
def test_fstab_present_macos_test_present(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry is already in /etc/auto_salt."],
}
grains_mock = {"os": "MacOS"}
opts_mock = {"test": True}
salt_mock = {"mount.set_automaster": MagicMock(return_value="present")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_automaster"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="noowners",
config="/etc/auto_salt",
test=True,
not_change=False,
)
def test_fstab_present_aix_test_present(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry is already in /etc/filesystems."],
}
grains_mock = {"os": "AIX"}
opts_mock = {"test": True}
salt_mock = {"mount.set_filesystems": MagicMock(return_value="present")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_filesystems"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
mount=True,
opts="",
config="/etc/filesystems",
test=True,
match_on="auto",
not_change=False,
)
def test_fstab_present_test_present(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry is already in /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": True}
salt_mock = {"mount.set_fstab": MagicMock(return_value="present")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
test=True,
match_on="auto",
not_change=False,
)
def test_fstab_present_test_new(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": [
"/home entry will be written in /etc/fstab.",
"Will mount /dev/sda1 on /home",
],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": True}
salt_mock = {"mount.set_fstab": MagicMock(return_value="new")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
test=True,
match_on="auto",
not_change=False,
)
def test_fstab_present_test_change(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry will be updated in /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": True}
salt_mock = {"mount.set_fstab": MagicMock(return_value="change")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
test=True,
match_on="auto",
not_change=False,
)
def test_fstab_present_test_error(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": False,
"changes": {},
"comment": ["/home entry cannot be created in /etc/fstab: error."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": True}
salt_mock = {"mount.set_fstab": MagicMock(return_value="error")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
test=True,
match_on="auto",
not_change=False,
)
def test_fstab_present_macos_present(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {},
"comment": ["/home entry was already in /etc/auto_salt."],
}
grains_mock = {"os": "MacOS"}
opts_mock = {"test": False}
salt_mock = {"mount.set_automaster": MagicMock(return_value="present")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_automaster"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="noowners",
config="/etc/auto_salt",
not_change=False,
)
def test_fstab_present_aix_present(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {},
"comment": ["/home entry was already in /etc/filesystems."],
}
grains_mock = {"os": "AIX"}
opts_mock = {"test": False}
salt_mock = {"mount.set_filesystems": MagicMock(return_value="present")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_filesystems"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
mount=True,
opts="",
config="/etc/filesystems",
match_on="auto",
not_change=False,
)
def test_fstab_present_present(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {},
"comment": ["/home entry was already in /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": False}
salt_mock = {"mount.set_fstab": MagicMock(return_value="present")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
match_on="auto",
not_change=False,
)
def test_fstab_present_new(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {"persist": "new"},
"comment": [
"/home entry added in /etc/fstab.",
"Mounted /dev/sda1 on /home",
],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": False}
set_fstab_mock = {"mount.set_fstab": MagicMock(return_value="new")}
mount_mock = {"mount.mount": MagicMock(return_value=True)}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, mount_mock), patch.dict(
mount.__salt__, set_fstab_mock
):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
set_fstab_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
match_on="auto",
not_change=False,
)
def test_fstab_present_new_no_mount(self):
"""
Test fstab_present with mount=false option
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {"persist": "new"},
"comment": ["/home entry added in /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": False}
salt_mock = {"mount.set_fstab": MagicMock(return_value="new")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2", mount=False) == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
match_on="auto",
not_change=False,
)
def test_fstab_present_change(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {"persist": "change"},
"comment": ["/home entry updated in /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": False}
salt_mock = {"mount.set_fstab": MagicMock(return_value="change")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
match_on="auto",
not_change=False,
)
def test_fstab_present_fail(self):
"""
Test fstab_present
"""
ret = {
"name": "/dev/sda1",
"result": False,
"changes": {},
"comment": ["/home entry cannot be changed in /etc/fstab: error."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": False}
salt_mock = {"mount.set_fstab": MagicMock(return_value="error")}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_present("/dev/sda1", "/home", "ext2") == ret
salt_mock["mount.set_fstab"].assert_called_with(
name="/home",
device="/dev/sda1",
fstype="ext2",
opts="defaults",
dump=0,
pass_num=0,
config="/etc/fstab",
match_on="auto",
not_change=False,
)
def test_fstab_absent_macos_test_absent(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry is already missing in /etc/auto_salt."],
}
grains_mock = {"os": "MacOS"}
opts_mock = {"test": True}
salt_mock = {"mount.automaster": MagicMock(return_value={})}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.automaster"].assert_called_with("/etc/auto_salt")
def test_fstab_absent_aix_test_absent(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry is already missing in /etc/filesystems."],
}
grains_mock = {"os": "AIX"}
opts_mock = {"test": True}
salt_mock = {"mount.filesystems": MagicMock(return_value={})}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.filesystems"].assert_called_with("/etc/filesystems")
def test_fstab_absent_test_absent(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry is already missing in /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": True}
salt_mock = {"mount.fstab": MagicMock(return_value={})}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.fstab"].assert_called_with("/etc/fstab")
def test_fstab_absent_test_present(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": None,
"changes": {},
"comment": ["/home entry will be removed from /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": True}
salt_mock = {"mount.fstab": MagicMock(return_value={"/home": {}})}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.fstab"].assert_called_with("/etc/fstab")
def test_fstab_absent_macos_present(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {"persist": "removed"},
"comment": ["/home entry removed from /etc/auto_salt."],
}
grains_mock = {"os": "MacOS"}
opts_mock = {"test": False}
salt_mock = {
"mount.automaster": MagicMock(return_value={"/home": {}}),
"mount.rm_automaster": MagicMock(return_value=True),
}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.automaster"].assert_called_with("/etc/auto_salt")
salt_mock["mount.rm_automaster"].assert_called_with(
name="/home", device="/dev/sda1", config="/etc/auto_salt"
)
def test_fstab_absent_aix_present(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {"persist": "removed"},
"comment": ["/home entry removed from /etc/filesystems."],
}
grains_mock = {"os": "AIX"}
opts_mock = {"test": False}
salt_mock = {
"mount.filesystems": MagicMock(return_value={"/home": {}}),
"mount.rm_filesystems": MagicMock(return_value=True),
}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.filesystems"].assert_called_with("/etc/filesystems")
salt_mock["mount.rm_filesystems"].assert_called_with(
name="/home", device="/dev/sda1", config="/etc/filesystems"
)
def test_fstab_absent_present(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {"persist": "removed"},
"comment": ["/home entry removed from /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": False}
salt_mock = {
"mount.fstab": MagicMock(return_value={"/home": {}}),
"mount.rm_fstab": MagicMock(return_value=True),
}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.fstab"].assert_called_with("/etc/fstab")
salt_mock["mount.rm_fstab"].assert_called_with(
name="/home", device="/dev/sda1", config="/etc/fstab"
)
def test_fstab_absent_absent(self):
"""
Test fstab_absent
"""
ret = {
"name": "/dev/sda1",
"result": True,
"changes": {},
"comment": ["/home entry is already missing in /etc/fstab."],
}
grains_mock = {"os": "Linux"}
opts_mock = {"test": False}
salt_mock = {"mount.fstab": MagicMock(return_value={})}
with patch.dict(mount.__grains__, grains_mock), patch.dict(
mount.__opts__, opts_mock
), patch.dict(mount.__salt__, salt_mock):
assert mount.fstab_absent("/dev/sda1", "/home") == ret
salt_mock["mount.fstab"].assert_called_with("/etc/fstab")
| 39.036782 | 113 | 0.455804 |
36ebacede0e300cbef2b0e5ecbb0b1ec826f81dd | 616 | py | Python | osu/local/hitobject/taiko/taiko_spinner_hitobject.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 23 | 2019-02-27T06:20:15.000Z | 2022-03-31T22:54:11.000Z | osu/local/hitobject/taiko/taiko_spinner_hitobject.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 38 | 2019-03-03T17:35:39.000Z | 2021-08-23T20:43:34.000Z | osu/local/hitobject/taiko/taiko_spinner_hitobject.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 4 | 2020-03-30T20:43:14.000Z | 2022-03-06T19:40:15.000Z | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from osu.local.hitobject.hitobject import Hitobject
from osu.local.hitobject.taiko.taiko import Taiko
from misc.frozen_cls import FrozenCls
@FrozenCls
class TaikoSpinnerHitobject(QGraphicsItem, Hitobject):
def __init__(self, data):
QGraphicsItem.__init__(self)
Hitobject.__init__(self, data)
def paint(self, painter, option, widget):
# TODO
pass
def resizeEvent(self, event):
print('owh')
def boundingRect(self):
return QRectF(0, 0, self.radius, self.radius) | 21.241379 | 54 | 0.704545 |
1f181b65028d4e2169aeae14fd2810393d6e1ae1 | 1,758 | py | Python | AWS/stylometry_tokenPL/tokens.py | wcex1994/EssAI | 968f07d06f4d49a1538fb56fe505f13ff5f6fa8e | [
"MIT"
] | null | null | null | AWS/stylometry_tokenPL/tokens.py | wcex1994/EssAI | 968f07d06f4d49a1538fb56fe505f13ff5f6fa8e | [
"MIT"
] | null | null | null | AWS/stylometry_tokenPL/tokens.py | wcex1994/EssAI | 968f07d06f4d49a1538fb56fe505f13ff5f6fa8e | [
"MIT"
] | 1 | 2021-05-06T21:55:33.000Z | 2021-05-06T21:55:33.000Z | #=================================================================
# Libraries
#=================================================================
from utils import *
# from scipy import stats as s
import pandas as pd
import numpy as np
from math import log, e
# This language model will be used for tokenizing and pos tagging
# import spacy
# nlp = spacy.load("en_core_web_sm")
import spacy
nlp = spacy.load('/opt/en_core_web_sm-2.1.0')
#=================================================================
# Tokens
#=================================================================
def textToTokens(text):
"Generates a column of Spacy tokens."
return nlp(text)
def partOfSpeech(row):
'''
For each row, assigns a POS for
each word in the text.
'''
return [token.pos_ for token in row]
def posEntropy(pos):
'''Returns the POS for each unit.'''
# counts = pd.Series(pos).value_counts()
# return s.entropy(counts)
labels = pos
vc = pd.Series(labels).value_counts(normalize=True, sort=False)
base = e
return -(vc * np.log(vc)/np.log(base)).sum()
def posCounts(pos):
"Returns the part of speech counts for each unit."
return pd.Series(pos).value_counts()
#=================================================================
# Run Transformation Pipeline
#=================================================================
def tokenPipeline(df, textCol = "Text"):
transforms = [
(textToTokens,"token", textCol),
(partOfSpeech, "partOfSpeech", "token"),
(posEntropy, "posEntropy","partOfSpeech"),
(posCounts, "AppendCounts", "partOfSpeech")
]
return applyTransforms(df, transforms)
| 27.904762 | 69 | 0.489761 |
689d38ffea1f22e6dbfd84ad003cc47c08c6b7f5 | 747 | py | Python | examples/synthesis/monolithic/synthetize.py | pierg/crome-synthesis | c4392e69176e67e99c4bbacf8affbd949acebd2a | [
"MIT"
] | null | null | null | examples/synthesis/monolithic/synthetize.py | pierg/crome-synthesis | c4392e69176e67e99c4bbacf8affbd949acebd2a | [
"MIT"
] | null | null | null | examples/synthesis/monolithic/synthetize.py | pierg/crome-synthesis | c4392e69176e67e99c4bbacf8affbd949acebd2a | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from crome_synthesis.controller import Controller
from crome_synthesis.tools.persistence import dump_mono_controller
controller_name = "arbiter"
spec_path = Path(os.path.abspath(os.path.dirname(__file__))).parent / f"controller_specs/{controller_name}"
controller_spec = spec_path / f"spec.txt"
print(f"controller selected: {controller_spec}")
# METHOD 1: MONOLITHIC SYNTHESIS FROM STRIX
controller = Controller.from_file(file_path=controller_spec, name=controller_name)
print(f"Monolithic synthesis realized in {controller.synth_time} s")
print(controller.mealy)
controller.save(format="png", absolute_folder_path=spec_path)
c = dump_mono_controller(absolute_folder_path=spec_path, controller=controller)
| 35.571429 | 107 | 0.827309 |
f5e9c9dedc2bb1ad3af5842408f1d6888f6da623 | 2,069 | py | Python | cache/.mako.tmp/annotation_helper.tmpl.py | necromuralist/Bake-A-Pig-Like-A-Goat | 22b513fe0a8cbdcf813d6c663e7b8b58fb0e6fc9 | [
"MIT"
] | null | null | null | cache/.mako.tmp/annotation_helper.tmpl.py | necromuralist/Bake-A-Pig-Like-A-Goat | 22b513fe0a8cbdcf813d6c663e7b8b58fb0e6fc9 | [
"MIT"
] | null | null | null | cache/.mako.tmp/annotation_helper.tmpl.py | necromuralist/Bake-A-Pig-Like-A-Goat | 22b513fe0a8cbdcf813d6c663e7b8b58fb0e6fc9 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1591844202.4011192
_enable_loop = True
_template_filename = '/home/athena/.virtualenvs/WiCCA/lib/python3.8/site-packages/nikola/data/themes/base/templates/annotation_helper.tmpl'
_template_uri = 'annotation_helper.tmpl'
_source_encoding = 'utf-8'
_exports = ['css', 'code']
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer('\n\n')
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_css(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n <link rel="stylesheet" href="http://assets.annotateit.org/annotator/v1.2.5/annotator.min.css">\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_code(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n <script src="http://code.jquery.com/jquery-migrate-1.2.1.js"></script>\n <script src="http://assets.annotateit.org/annotator/v1.2.7/annotator-full.js"></script>\n <script>\n jQuery(function ($) {\n $(\'body\').annotator().annotator(\'setupPlugins\', {}, {\n // Disable filter bar\n Filter: false\n });\n });\n </script>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "/home/athena/.virtualenvs/WiCCA/lib/python3.8/site-packages/nikola/data/themes/base/templates/annotation_helper.tmpl", "uri": "annotation_helper.tmpl", "source_encoding": "utf-8", "line_map": {"16": 0, "21": 3, "22": 16, "28": 1, "32": 1, "38": 5, "42": 5, "48": 42}}
__M_END_METADATA
"""
| 39.037736 | 400 | 0.666989 |
a8b61215bc3df78d2c2e83cc69f18db21b189c92 | 1,048 | py | Python | foliage/base_tab.py | caltechlibrary/foliage | 6fa7c6e5177cb3b8814cdbbdbb9b6d0e4b5718da | [
"BSD-3-Clause"
] | 8 | 2022-01-11T22:30:50.000Z | 2022-01-28T02:06:03.000Z | foliage/base_tab.py | caltechlibrary/foliage | 6fa7c6e5177cb3b8814cdbbdbb9b6d0e4b5718da | [
"BSD-3-Clause"
] | null | null | null | foliage/base_tab.py | caltechlibrary/foliage | 6fa7c6e5177cb3b8814cdbbdbb9b6d0e4b5718da | [
"BSD-3-Clause"
] | null | null | null | '''
base_tab.py: base class for tabs
The tabs in Foliage are conceptually pretty simple: there's a function to
create the tab contents, and another function to set up optional watchers
for detecting and acting on designated PyWebIO "pin" objects. The class
FoliageTab is a base class used by all the Foliage tab classes. This common
base class makes it possible to implement tab creation and pin watching in
__main__.py's foliage_page() as a loop over a list of objects, rather than
by hardcoding calls to every tab directly.
Copyright
---------
Copyright (c) 2021-2022 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
class FoliageTab():
def contents(self):
'''Return a dict of elements {'title': '...', 'content': [objects]}.'''
raise NotImplementedError()
def pin_watchers(self):
'''Return a dict of elements {'pin_name': callback_function}.'''
raise NotImplementedError()
| 37.428571 | 79 | 0.73187 |
958738e72de62e8ba76dfeb9ad0ef72679654002 | 5,600 | py | Python | eggs/transaction-1.4.3-py2.7.egg/transaction/_manager.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | null | null | null | eggs/transaction-1.4.3-py2.7.egg/transaction/_manager.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | null | null | null | eggs/transaction-1.4.3-py2.7.egg/transaction/_manager.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | null | null | null | ############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
"""A TransactionManager controls transaction boundaries.
It coordinates application code and resource managers, so that they
are associated with the right transaction.
"""
import sys
import threading
from zope.interface import implementer
from transaction.interfaces import ITransactionManager
from transaction.interfaces import TransientError
from transaction.weakset import WeakSet
from transaction._compat import reraise
from transaction._transaction import Transaction
# We have to remember sets of synch objects, especially Connections.
# But we don't want mere registration with a transaction manager to
# keep a synch object alive forever; in particular, it's common
# practice not to explicitly close Connection objects, and keeping
# a Connection alive keeps a potentially huge number of other objects
# alive (e.g., the cache, and everything reachable from it too).
# Therefore we use "weak sets" internally.
# Call the ISynchronizer newTransaction() method on every element of
# WeakSet synchs.
# A transaction manager needs to do this whenever begin() is called.
# Since it would be good if tm.get() returned the new transaction while
# newTransaction() is running, calling this has to be delayed until after
# the transaction manager has done whatever it needs to do to make its
# get() return the new txn.
def _new_transaction(txn, synchs):
if synchs:
synchs.map(lambda s: s.newTransaction(txn))
# Important: we must always pass a WeakSet (even if empty) to the Transaction
# constructor: synchronizers are registered with the TM, but the
# ISynchronizer xyzCompletion() methods are called by Transactions without
# consulting the TM, so we need to pass a mutable collection of synchronizers
# so that Transactions "see" synchronizers that get registered after the
# Transaction object is constructed.
@implementer(ITransactionManager)
class TransactionManager(object):
def __init__(self):
self._txn = None
self._synchs = WeakSet()
def begin(self):
""" See ITransactionManager.
"""
if self._txn is not None:
self._txn.abort()
txn = self._txn = Transaction(self._synchs, self)
_new_transaction(txn, self._synchs)
return txn
__enter__ = lambda self: self.begin()
def get(self):
""" See ITransactionManager.
"""
if self._txn is None:
self._txn = Transaction(self._synchs, self)
return self._txn
def free(self, txn):
if txn is not self._txn:
raise ValueError("Foreign transaction")
self._txn = None
def registerSynch(self, synch):
""" See ITransactionManager.
"""
self._synchs.add(synch)
def unregisterSynch(self, synch):
""" See ITransactionManager.
"""
self._synchs.remove(synch)
def isDoomed(self):
""" See ITransactionManager.
"""
return self.get().isDoomed()
def doom(self):
""" See ITransactionManager.
"""
return self.get().doom()
def commit(self):
""" See ITransactionManager.
"""
return self.get().commit()
def abort(self):
""" See ITransactionManager.
"""
return self.get().abort()
def __exit__(self, t, v, tb):
if v is None:
self.commit()
else:
self.abort()
def savepoint(self, optimistic=False):
""" See ITransactionManager.
"""
return self.get().savepoint(optimistic)
def attempts(self, number=3):
if number <= 0:
raise ValueError("number must be positive")
while number:
number -= 1
if number:
yield Attempt(self)
else:
yield self
def _retryable(self, error_type, error):
if issubclass(error_type, TransientError):
return True
for dm in self.get()._resources:
should_retry = getattr(dm, 'should_retry', None)
if (should_retry is not None) and should_retry(error):
return True
class ThreadTransactionManager(TransactionManager, threading.local):
"""Thread-aware transaction manager.
Each thread is associated with a unique transaction.
"""
class Attempt(object):
def __init__(self, manager):
self.manager = manager
def _retry_or_raise(self, t, v, tb):
retry = self.manager._retryable(t, v)
self.manager.abort()
if retry:
return retry # suppress the exception if necessary
reraise(t, v, tb) # otherwise reraise the exception
def __enter__(self):
return self.manager.__enter__()
def __exit__(self, t, v, tb):
if v is None:
try:
self.manager.commit()
except:
return self._retry_or_raise(*sys.exc_info())
else:
return self._retry_or_raise(t, v, tb)
| 31.284916 | 78 | 0.641071 |
5b60c52155e7d21318d7baca04b5bda5c8483fe3 | 869 | py | Python | botx/clients/methods/v3/chats/add_user.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 13 | 2021-01-21T12:43:10.000Z | 2022-03-23T11:11:59.000Z | botx/clients/methods/v3/chats/add_user.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 259 | 2020-02-26T08:51:03.000Z | 2022-03-23T11:08:36.000Z | botx/clients/methods/v3/chats/add_user.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 5 | 2019-12-02T16:19:22.000Z | 2021-11-22T20:33:34.000Z | """Method for adding new users into chat."""
from http import HTTPStatus
from typing import List
from uuid import UUID
from botx.clients.methods.base import AuthorizedBotXMethod
from botx.clients.methods.errors import (
bot_is_not_admin,
chat_is_not_modifiable,
chat_not_found,
)
class AddUser(AuthorizedBotXMethod[bool]):
"""Method for adding new users into chat."""
__url__ = "/api/v3/botx/chats/add_user"
__method__ = "POST"
__returning__ = bool
__errors_handlers__ = {
HTTPStatus.FORBIDDEN: (
bot_is_not_admin.handle_error,
chat_is_not_modifiable.handle_error,
),
HTTPStatus.NOT_FOUND: (chat_not_found.handle_error,),
}
#: ID of chat into which users should be added.
group_chat_id: UUID
#: IDs of users that should be added into chat.
user_huids: List[UUID]
| 26.333333 | 61 | 0.699655 |
31b787e868fb672523cd1b38444f2f25d7d60b7d | 13,060 | py | Python | improver_tests/generate_ancillaries/test_GenerateTimezoneMask.py | nivnac/improver | c16c794f62598017cebc6ae4f99af8f317219a77 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/generate_ancillaries/test_GenerateTimezoneMask.py | nivnac/improver | c16c794f62598017cebc6ae4f99af8f317219a77 | [
"BSD-3-Clause"
] | 3 | 2020-04-25T12:55:42.000Z | 2020-07-23T11:50:46.000Z | improver_tests/generate_ancillaries/test_GenerateTimezoneMask.py | Kat-90/improver | a5c31be3430df429ae38e7c16e267fcbc2af1858 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the GenerateTimezoneMask plugin."""
from datetime import datetime
import iris
import numpy as np
import pytest
import pytz
from iris.cube import Cube, CubeList
from numpy.testing import assert_array_almost_equal, assert_array_equal
from improver.generate_ancillaries.generate_timezone_mask import GenerateTimezoneMask
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
pytest.importorskip("timezonefinder")
pytest.importorskip("numba")
GLOBAL_ATTRIBUTES = {
"title": "MOGREPS-G Model Forecast on Global 20 km Standard Grid",
"source": "Met Office Unified Model",
"institution": "Met Office",
}
UK_ATTRIBUTES = {
"title": "MOGREPS-UK Model Forecast on UK 2 km Standard Grid",
"source": "Met Office Unified Model",
"institution": "Met Office",
}
@pytest.fixture(name="global_grid")
def global_grid_fixture() -> Cube:
"""Global grid template"""
data = np.zeros((19, 37), dtype=np.float32)
cube = set_up_variable_cube(
data,
name="template",
grid_spacing=10,
domain_corner=(-90, -180),
attributes=GLOBAL_ATTRIBUTES,
)
return cube
@pytest.fixture(name="uk_grid")
def uk_grid_fixture() -> Cube:
"""UK grid template"""
data = np.zeros((21, 22), dtype=np.float32)
cube = set_up_variable_cube(
data,
name="template",
spatial_grid="equalarea",
grid_spacing=96900.0,
domain_corner=(-1036000.0, -1158000.0),
attributes=UK_ATTRIBUTES,
)
return cube
@pytest.fixture(name="timezone_mask")
def timezone_mask_fixture() -> CubeList:
"""A timezone mask cubelist"""
data = np.zeros((19, 37), dtype=np.float32)
cube = set_up_variable_cube(
data, name="template", grid_spacing=10, domain_corner=(-90, -180)
)
cubelist = CubeList()
for offset in range(0, 4):
mask = cube.copy()
utc_offset_coord = iris.coords.AuxCoord([offset], long_name="UTC_offset")
mask.add_aux_coord(utc_offset_coord)
mask = iris.util.new_axis(mask, "UTC_offset")
cubelist.append(mask)
return cubelist
def test__set_time(uk_grid):
"""Test time is set correctly from either the cube or user."""
# Set by the cube time coordinate
expected = datetime(2017, 11, 10, 4, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask()
plugin._set_time(uk_grid)
assert plugin.time == expected
# Set by the user provided argument
expected = datetime(2020, 7, 16, 15, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(time="20200716T1500Z")
plugin._set_time(uk_grid)
assert plugin.time == expected
# Check an exception is raised if no time information is provided
uk_grid.remove_coord("time")
plugin = GenerateTimezoneMask()
msg = (
"The input cube does not contain a 'time' coordinate. "
"As such a time must be provided by the user."
)
with pytest.raises(ValueError, match=msg):
plugin._set_time(uk_grid)
@pytest.mark.parametrize("grid_fixture", ["global_grid", "uk_grid"])
def test__get_coordinate_pairs(request, grid_fixture):
"""Test that a selection of the points returned by _get_coordinate_pairs
have the expected values. Tests are for both a native lat-long grid and for
an equal areas grid that must be transformed."""
sample_points = [0, 10, -1]
expected_data = {
"global_grid": [[-90.0, -180.0], [-90.0, -80.0], [90.0, 180.0]],
"uk_grid": [[44.517, -17.117], [45.548, -4.913], [62.026, 14.410]],
}
grid = request.getfixturevalue(grid_fixture)
result = GenerateTimezoneMask()._get_coordinate_pairs(grid)
assert isinstance(result, np.ndarray)
assert result.shape == (np.product(grid.shape), 2)
for i, ii in enumerate(sample_points):
assert_array_almost_equal(
result[ii, :], expected_data[grid_fixture][i], decimal=3
)
def test__get_coordinate_pairs_exception(global_grid):
"""Test that an exception is raised if longitudes are found outside the
range -180 to 180."""
global_grid.coord("longitude").points = global_grid.coord("longitude").points + 360
with pytest.raises(ValueError, match=r"TimezoneFinder requires .*"):
GenerateTimezoneMask()._get_coordinate_pairs(global_grid)
def test__calculate_tz_offsets():
"""
Test that the expected offsets are returned for several timezones, with and
without daylight savings.
These test also cover the functionality of _calculate_offset.
"""
# New York, London, and Melbourne
coordinate_pairs = np.array([[41, -74], [51.5, 0], [-37.9, 145]])
# Test ignoring daylight savings, so the result should be consistent
# regardless of the date.
expected = [-5 * 3600, 0, 10 * 3600]
# Northern hemisphere winter
time = datetime(2020, 1, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
# Check return type information as well
assert result.ndim == 1
assert isinstance(result, np.ndarray)
assert result.dtype == np.int32
# Southern hemisphere winter
time = datetime(2020, 7, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
# Test including daylight savings, so the result should change as the
# date is changed.
# Northern hemisphere winter
expected = [-5 * 3600, 0, 11 * 3600]
time = datetime(2020, 1, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(include_dst=True, time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
# Southern hemisphere winter
expected = [-4 * 3600, 1 * 3600, 10 * 3600]
time = datetime(2020, 7, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(include_dst=True, time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
@pytest.mark.parametrize("grid_fixture", ["global_grid", "uk_grid"])
@pytest.mark.parametrize("include_dst", [False, True])
def test__create_template_cube(request, grid_fixture, include_dst):
"""Test the construction of a template cube slice, checking the shape
data types, and attributes."""
grid = request.getfixturevalue(grid_fixture)
time = datetime(2020, 1, 1, 12, tzinfo=pytz.utc)
expected = {
"global_grid": {"shape": (19, 37), "attributes": GLOBAL_ATTRIBUTES},
"uk_grid": {"shape": (21, 22), "attributes": UK_ATTRIBUTES},
}
# Set expected includes_daylight_savings attribute
expected[grid_fixture]["attributes"]["includes_daylight_savings"] = str(include_dst)
plugin = GenerateTimezoneMask(include_dst=include_dst, time=time)
result = plugin._create_template_cube(grid)
assert result.name() == "timezone_mask"
assert result.units == 1
assert result.coord("time").points[0] == time.timestamp()
assert result.coord("time").dtype == np.int64
assert result.shape == expected[grid_fixture]["shape"]
assert result.dtype == np.int32
assert result.attributes == expected[grid_fixture]["attributes"]
@pytest.mark.parametrize("groups", ({0: [0, 1], 3: [2, 3]}, {0: [0, 2], 3: [3]}))
def test__group_timezones(timezone_mask, groups):
"""Test the grouping of different UTC offsets into larger groups using a
user provided specification. The input cube list contains cubes corresponding
to 4 UTC offsets. Two tests are run, grouping these first into equal sized
groups, and then into unequally sized groups."""
plugin = GenerateTimezoneMask(groupings=groups)
result = plugin._group_timezones(timezone_mask)
assert len(result) == len(groups)
for (offset, group), cube in zip(groups.items(), result):
assert cube.coord("UTC_offset").points[0] == offset
assert cube.coord("UTC_offset").bounds is not None
if len(group) > 1:
assert_array_equal(cube.coord("UTC_offset").bounds[0], group)
else:
assert cube.coord("UTC_offset").bounds[0][0] == group[0]
assert cube.coord("UTC_offset").bounds[0][-1] == group[0]
def test__group_timezones_empty_group(timezone_mask):
"""Test the grouping of different UTC offsets into larger groups in a case
for which a specified group contains no data."""
groups = {0: [0, 1], 3: [2, 3], 6: [4, 10]}
plugin = GenerateTimezoneMask(groupings=groups)
result = plugin._group_timezones(timezone_mask)
assert len(result) == 2
for (offset, group), cube in zip(list(groups.items())[:-1], result):
assert cube.coord("UTC_offset").points[0] == offset
assert_array_equal(cube.coord("UTC_offset").bounds[0], group)
@pytest.fixture(name="process_expected")
def process_expected_fixture() -> callable:
"""Returns expected results for parameterized process tests."""
def _make_expected(time, grid) -> dict:
data_indices = {"global_grid": (12, 2), "uk_grid": (2, 10)}
expected = {
None: {
"global_grid": {
"shape": (27, 19, 37),
"min": -12,
"max": 14,
"data": np.array([1, 1, 1, 1, 1, 0, 1, 1, 1, 1]),
},
"uk_grid": {
"shape": (4, 21, 22),
"min": -2,
"max": 1,
"data": np.array([1, 1, 0, 0, 0, 1]),
},
"expected_time": 1510286400,
},
"20200716T1500Z": {
"global_grid": {
"shape": (27, 19, 37),
"min": -12,
"max": 14,
"data": np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
},
"uk_grid": {
"shape": (5, 21, 22),
"min": -2,
"max": 2,
"data": np.array([1, 1, 1, 1, 0, 1]),
},
"expected_time": 1594911600,
},
}
return (
expected[time][grid],
expected[time]["expected_time"],
data_indices[grid],
)
return _make_expected
@pytest.mark.parametrize("time", [None, "20200716T1500Z"])
@pytest.mark.parametrize("grid_fixture", ["global_grid", "uk_grid"])
def test_process(request, grid_fixture, time, process_expected):
"""Test that the process method returns cubes that take the expected form
for different grids and different dates.
The output data is primarily checked in the acceptance tests as a reasonably
large number of data points are required to reliably check it. Here we check
only a small sample."""
expected, expected_time, index = process_expected(time, grid_fixture)
grid = request.getfixturevalue(grid_fixture)
result = GenerateTimezoneMask(time=time, include_dst=True)(grid)
assert result.coord("time").points[0] == expected_time
assert result.shape == expected["shape"]
assert result.coord("UTC_offset").points.min() == expected["min"]
assert result.coord("UTC_offset").points.max() == expected["max"]
assert_array_equal(result.data[index][::4], expected["data"])
| 36.788732 | 88 | 0.659724 |
b0eb29dae81edfb66ebc6d105ddf4faad7c5025f | 5,239 | py | Python | docs/conf.py | zoldaten/wizcoin | 80a8dc5f550d5ce6016c5baa8b17dbc68f07d2f2 | [
"MIT"
] | null | null | null | docs/conf.py | zoldaten/wizcoin | 80a8dc5f550d5ce6016c5baa8b17dbc68f07d2f2 | [
"MIT"
] | null | null | null | docs/conf.py | zoldaten/wizcoin | 80a8dc5f550d5ce6016c5baa8b17dbc68f07d2f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'wizcoin'
copyright = '2018, AL'
author = 'AL'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'wizcoindoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'wizcoin.tex', 'wizcoin Documentation',
'AL', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wizcoin', 'wizcoin Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'wizcoin', 'wizcoin Documentation',
author, 'wizcoin', 'a test module',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration ------------------------------------------------- | 29.268156 | 79 | 0.643634 |
73e43ad8c14379a243f30234f0a8ce236b048c1c | 327 | py | Python | amaze/player/player.py | StingrayMarineSolutions/amazeing-race | 0cb1653a3f3aeb22a81febfd4b32a2f50e5ae9ea | [
"MIT"
] | null | null | null | amaze/player/player.py | StingrayMarineSolutions/amazeing-race | 0cb1653a3f3aeb22a81febfd4b32a2f50e5ae9ea | [
"MIT"
] | null | null | null | amaze/player/player.py | StingrayMarineSolutions/amazeing-race | 0cb1653a3f3aeb22a81febfd4b32a2f50e5ae9ea | [
"MIT"
] | null | null | null | '''
Here you can implement your Amaze-ing Player.
You don't have to add all your implementation to this file, you can add additional files to this subpackage
'''
class Player(object):
def __init__(self):
pass
def forward(self, img, pos):
raise NotImplementedError("You're supposed to implement this") | 27.25 | 107 | 0.70948 |
9c374a650c28add1cf3582c6ffa93a964371daa5 | 657 | py | Python | scripts/docker/fill.py | Dans-labs/shebanq | e7efffc84bcedcc8c8a21638991fb765b95713df | [
"MIT"
] | null | null | null | scripts/docker/fill.py | Dans-labs/shebanq | e7efffc84bcedcc8c8a21638991fb765b95713df | [
"MIT"
] | null | null | null | scripts/docker/fill.py | Dans-labs/shebanq | e7efffc84bcedcc8c8a21638991fb765b95713df | [
"MIT"
] | null | null | null | import sys
import re
varRe = re.compile(r"«([^»]*)»")
def varRepl(match):
k = match.group(1)
if k in keyValues:
value = keyValues[k]
else:
value = ""
print(f"Undefined parameter: {k}")
return value
inFile = sys.argv[1]
outFile = sys.argv[2]
data = sys.argv[3]
keyValues = {}
for line in data.strip().split("\n"):
parts = line.strip().split("=", 1)
if len(parts) < 2:
print(f"Missing value for {parts[0]}")
continue
(k, v) = parts
keyValues[k] = v
with open(inFile) as f:
text = f.read()
text = varRe.sub(varRepl, text)
with open(outFile, "w") as f:
f.write(text)
| 17.289474 | 46 | 0.561644 |
d0a902212b0b25af61981e56f166915ba7e0119a | 286 | py | Python | jd/api/rest/KplOpenBatchConvertCpslinkRequest.py | fengjinqi/linjuanbang | 8cdc4e81df73ccd737ac547da7f2c7dca545862a | [
"MIT"
] | 5 | 2019-10-30T01:16:30.000Z | 2020-06-14T03:32:19.000Z | jd/api/rest/KplOpenBatchConvertCpslinkRequest.py | fengjinqi/linjuanbang | 8cdc4e81df73ccd737ac547da7f2c7dca545862a | [
"MIT"
] | 2 | 2020-10-12T07:12:48.000Z | 2021-06-02T03:15:47.000Z | jd/api/rest/KplOpenBatchConvertCpslinkRequest.py | fengjinqi/linjuanbang | 8cdc4e81df73ccd737ac547da7f2c7dca545862a | [
"MIT"
] | 3 | 2019-12-06T17:33:49.000Z | 2021-03-01T13:24:22.000Z | from jd.api.base import RestApi
class KplOpenBatchConvertCpslinkRequest(RestApi):
def __init__(self,domain='gw.api.360buy.com',port=80):
RestApi.__init__(self,domain, port)
self.KeplerUrlparam = None
def getapiname(self):
return 'jd.kpl.open.batch.convert.cpslink'
| 17.875 | 56 | 0.748252 |
1533ce0303f28488f58b46b6b1e5a765d0bda8d2 | 5,993 | py | Python | oscar/lib/python2.7/site-packages/pygments/formatters/svg.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/pygments/formatters/svg.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/pygments/formatters/svg.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| 38.915584 | 82 | 0.548807 |
b131329cae62795a0b29c60602537e86483ebc9a | 3,762 | py | Python | huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/list_instances_by_tags_response.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/list_instances_by_tags_response.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/list_instances_by_tags_response.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListInstancesByTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instances': 'list[InstanceResult]',
'total_count': 'int'
}
attribute_map = {
'instances': 'instances',
'total_count': 'total_count'
}
def __init__(self, instances=None, total_count=None):
"""ListInstancesByTagsResponse - a model defined in huaweicloud sdk"""
super(ListInstancesByTagsResponse, self).__init__()
self._instances = None
self._total_count = None
self.discriminator = None
if instances is not None:
self.instances = instances
if total_count is not None:
self.total_count = total_count
@property
def instances(self):
"""Gets the instances of this ListInstancesByTagsResponse.
实例列表。
:return: The instances of this ListInstancesByTagsResponse.
:rtype: list[InstanceResult]
"""
return self._instances
@instances.setter
def instances(self, instances):
"""Sets the instances of this ListInstancesByTagsResponse.
实例列表。
:param instances: The instances of this ListInstancesByTagsResponse.
:type: list[InstanceResult]
"""
self._instances = instances
@property
def total_count(self):
"""Gets the total_count of this ListInstancesByTagsResponse.
总记录数。
:return: The total_count of this ListInstancesByTagsResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ListInstancesByTagsResponse.
总记录数。
:param total_count: The total_count of this ListInstancesByTagsResponse.
:type: int
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListInstancesByTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.26087 | 80 | 0.577884 |
b6dcb873845d57a591322c08521d5e499b020d45 | 3,413 | py | Python | tests/alltests.py | VargaIonut23/restaurant | 3f991f30b03921481142187ef33f81d1dc4fe2ad | [
"MIT"
] | null | null | null | tests/alltests.py | VargaIonut23/restaurant | 3f991f30b03921481142187ef33f81d1dc4fe2ad | [
"MIT"
] | null | null | null | tests/alltests.py | VargaIonut23/restaurant | 3f991f30b03921481142187ef33f81d1dc4fe2ad | [
"MIT"
] | null | null | null | import os
from Domain.restaurantvalidator import restaurantvalidator
from Domain.reviewvalidator import reviewvalidator
from Repository.json_repository import JsonRepository
from Service.restaurantservice import restaurantservice
from Service.reviewservice import reviewservice
def alltests():
testcerinta1()
testcerinta2()
testcerinta3()
testJson()
def clearFile(filename):
with open(filename, "w") as f:
pass
def testcerinta1():
clearFile('restauranttest.json')
restaurantrepository = JsonRepository('restauranttest.json')
Restaurantvalidator = restaurantvalidator()
Restaurantservice = restaurantservice(restaurantrepository,
Restaurantvalidator)
Restaurantservice.adauga('1', '1', '1', 'da')
Restaurantservice.adauga('2', '2', '2', 'da')
lista = Restaurantservice.repository.read()
assert len(lista) == 2
assert lista[0].id_entity == '1'
assert lista[1].id_entity == '2'
def testcerinta2():
clearFile('restauranttest.json')
clearFile('reviewtest.json')
restaurantrepository = JsonRepository('restauranttest.json')
Restaurantvalidator = restaurantvalidator()
Restaurantservice = restaurantservice(restaurantrepository,
Restaurantvalidator)
reviewrepository = JsonRepository('reviewtest.json')
Reviewvalidator = reviewvalidator()
Reviewservice = reviewservice(reviewrepository,
Reviewvalidator,
Restaurantservice)
Restaurantservice.adauga('1', '1', '1', 'da')
Restaurantservice.adauga('2', '2', '2', 'da')
Reviewservice.adauga('1', '1', '1', '1', 10)
Reviewservice.adauga('2', '2', '2', '2', 10)
lista = Reviewservice.repository.read()
assert len(lista) == 2
assert lista[0].id_entity == '1'
assert lista[1].id_entity == '2'
def testcerinta3():
clearFile('restauranttest.json')
restaurantrepository = JsonRepository('restauranttest.json')
Restaurantvalidator = restaurantvalidator()
Restaurantservice = restaurantservice(restaurantrepository,
Restaurantvalidator)
Restaurantservice.adauga('1', 'c', '1', 'da')
Restaurantservice.adauga('2', 'a', '2', 'da')
lista = Restaurantservice.cerinta3()
assert len(lista) == 2
assert lista[0].id_entity == '2'
assert lista[1].id_entity == '1'
def testJson():
clearFile('restauranttest.json')
clearFile('reviewtest.json')
restaurantrepository = JsonRepository('restauranttest.json')
Restaurantvalidator = restaurantvalidator()
Restaurantservice = restaurantservice(restaurantrepository,
Restaurantvalidator)
reviewrepository = JsonRepository('reviewtest.json')
Reviewvalidator = reviewvalidator()
Reviewservice = reviewservice(reviewrepository,
Reviewvalidator,
Restaurantservice)
Restaurantservice.adauga('1', '1', '1', 'da')
Restaurantservice.adauga('2', '2', '2', 'da')
Reviewservice.adauga('1', '1', '1', '1', 10)
Reviewservice.adauga('2', '2', '2', '2', 10)
lista = Reviewservice.repository.read()
Reviewservice.Json("test_export.json")
assert os.path.isfile("test_export.json") is True
clearFile("test_export.json") | 37.922222 | 64 | 0.654556 |
790cade744279e033d1a42616d9659dc6e2a347f | 421 | py | Python | project/partners/migrations/0009_partner_is_published.py | TEDxNTUA/tedxntua2019 | 6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff | [
"MIT"
] | 7 | 2018-10-09T19:14:37.000Z | 2019-11-25T13:43:38.000Z | project/partners/migrations/0009_partner_is_published.py | TEDxNTUA/tedxntua2019 | 6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff | [
"MIT"
] | 16 | 2018-11-01T21:42:17.000Z | 2019-03-10T16:59:25.000Z | project/partners/migrations/0009_partner_is_published.py | TEDxNTUA/tedxntua2019 | 6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff | [
"MIT"
] | 5 | 2018-10-28T17:33:06.000Z | 2018-11-22T00:12:55.000Z | # Generated by Django 2.1.2 on 2019-03-19 22:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partners', '0008_merge_20190307_1527'),
]
operations = [
migrations.AddField(
model_name='partner',
name='is_published',
field=models.BooleanField(default=True, verbose_name='Published'),
),
]
| 22.157895 | 78 | 0.619952 |
5795b59b9591a38bf0910f2d077e237a8a35c9d1 | 789 | py | Python | jp.atcoder/abc075/abc075_b/11985583.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc075/abc075_b/11985583.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc075/abc075_b/11985583.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
import numpy as np
h, w = map(int, sys.stdin.readline().split())
grid = np.array(
[list(sys.stdin.readline().rstrip()) for _ in range(h)], dtype="U1"
)
grid = np.pad(grid, 1, "constant")
def main():
d = [(-1, 0), (0, -1), (1, 0), (0, 1)]
cnt = np.zeros_like(grid, dtype=np.int8)
for i in range(1, h + 1):
for j in range(1, w + 1):
if grid[i][j] == ".":
continue
for dy, dx in [
(dy, dx) for dy in range(-1, 2) for dx in range(-1, 2)
]:
cnt[i + dy][j + dx] += 1
cnt = cnt.astype("U1")
cnt[grid == "#"] = "#"
cnt = cnt[1:-1, 1:-1]
for i in range(h):
print("".join(cnt[i]))
if __name__ == "__main__":
main()
| 24.65625 | 72 | 0.443599 |
a491943c4fd19e80526aff5f142a7c3426e8eb11 | 23,691 | py | Python | tests/generic_inline_admin/tests.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2019-08-25T07:54:26.000Z | 2020-08-27T09:29:05.000Z | tests/generic_inline_admin/tests.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/generic_inline_admin/tests.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-06-19T06:09:41.000Z | 2020-06-19T06:09:41.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media, PhoneNumber
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
# Set DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(
DEBUG=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls",
)
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(
Media,
can_delete=False,
exclude=['description', 'keywords'],
extra=3,
)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-0-url" '
'value="http://example.com/podcast.mp3" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>'
% self.mp3_media_pk
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-1-url" '
'value="http://example.com/logo.png" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>'
% self.png_media_pk
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label>'
'<input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" '
'id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>'
)
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label>'
'<input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-0-url"'
'value="http://example.com/logo.png" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>'
% self.png_media_pk
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label>'
'<input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-1-url" '
'value="http://example.com/podcast.mp3" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>'
% self.mp3_media_pk
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" '
'id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>'
)
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label>'
' <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-0-url" '
'value="http://example.com/logo.png" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>'
% self.png_media_pk
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" '
'id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>'
)
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_delete(self):
from .models import Contact
c = Contact.objects.create(name='foo')
PhoneNumber.objects.create(
object_id=c.id,
content_type=ContentType.objects.get_for_model(Contact),
phone_number="555-555-5555",
)
response = self.client.post(reverse('admin:generic_inline_admin_contact_delete', args=[c.pk]))
self.assertContains(response, 'Are you sure you want to delete')
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
Ensure that get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
| 42.381038 | 110 | 0.649572 |
6ffe607845beac873afed7e8fbba7e4dfa82a889 | 153 | py | Python | profiles/apps.py | biligunb/portal-web-app | cd10cda3d8bfc8f055a268e14f9665f142051c90 | [
"MIT"
] | null | null | null | profiles/apps.py | biligunb/portal-web-app | cd10cda3d8bfc8f055a268e14f9665f142051c90 | [
"MIT"
] | null | null | null | profiles/apps.py | biligunb/portal-web-app | cd10cda3d8bfc8f055a268e14f9665f142051c90 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ProfilesConfig(AppConfig):
name = "profiles"
def ready(self):
import profiles.signals # noqa
| 17 | 39 | 0.69281 |
f3d7f4c563a98794cb71cc3ab175a63605e61ee0 | 2,024 | py | Python | examples/image.py | ahnlak/st7789-python | 66af35f4fb31a31ce33793d0e7906e7bea7b2fb3 | [
"MIT"
] | 15 | 2021-04-21T05:23:54.000Z | 2022-01-04T06:14:36.000Z | examples/image.py | KeyChainGame/st7789-python | f9e3e4a32179ba05e1d89b06ecbbc7ce84e93df3 | [
"MIT"
] | null | null | null | examples/image.py | KeyChainGame/st7789-python | f9e3e4a32179ba05e1d89b06ecbbc7ce84e93df3 | [
"MIT"
] | 2 | 2021-04-20T23:54:07.000Z | 2021-12-31T02:36:20.000Z | # Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from PIL import Image
import ST7789 as ST7789
print("""
image.py - Display an image on the LCD.
If you're using Breakout Garden, plug the 1.3" LCD (SPI)
breakout into the front slot.
""")
if len(sys.argv) < 2:
print("Usage: {} <image_file>".format(sys.argv[0]))
sys.exit(1)
image_file = sys.argv[1]
# Create ST7789 LCD display class.
disp = ST7789.ST7789(
port=0,
cs=ST7789.BG_SPI_CS_FRONT, # BG_SPI_CS_BACK or BG_SPI_CS_FRONT
dc=9,
backlight=19, # 18 for back BG slot, 19 for front BG slot.
spi_speed_hz=80 * 1000 * 1000
)
WIDTH = disp.width
HEIGHT = disp.height
# Initialize display.
disp.begin()
# Load an image.
print('Loading image: {}...'.format(image_file))
image = Image.open(image_file)
# Resize the image
image = image.resize((WIDTH, HEIGHT))
# Draw the image on the display hardware.
print('Drawing image')
disp.display(image)
| 31.138462 | 79 | 0.737154 |
e3f805f605262e439a3b473f1ca579ab21840662 | 913 | py | Python | docs_src/request_files/tutorial003.py | mbhavya/fastapi | 1876ebc77949a9a254909ec61ea0c09365169ec2 | [
"MIT"
] | 1 | 2022-01-08T16:39:28.000Z | 2022-01-08T16:39:28.000Z | docs_src/request_files/tutorial003.py | mbhavya/fastapi | 1876ebc77949a9a254909ec61ea0c09365169ec2 | [
"MIT"
] | 1 | 2022-01-07T21:04:04.000Z | 2022-01-07T21:04:04.000Z | docs_src/request_files/tutorial003.py | mbhavya/fastapi | 1876ebc77949a9a254909ec61ea0c09365169ec2 | [
"MIT"
] | null | null | null | from typing import List
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import HTMLResponse
app = FastAPI()
@app.post("/files/")
async def create_files(
files: List[bytes] = File(description="Multiple files as bytes"),
):
return {"file_sizes": [len(file) for file in files]}
@app.post("/uploadfiles/")
async def create_upload_files(
files: List[UploadFile] = File(description="Multiple files as UploadFile"),
):
return {"filenames": [file.filename for file in files]}
@app.get("/")
async def main():
content = """
<body>
<form action="/files/" enctype="multipart/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
<form action="/uploadfiles/" enctype="multipart/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
</body>
"""
return HTMLResponse(content=content)
| 24.026316 | 79 | 0.696605 |
871c316839bf845f14ac8b3d61bd58306748d68b | 4,124 | py | Python | build.vs/build_tests/build_tests.py | gajgeospatial/pthreads-3.0.0 | 2ad01868c959502c71f26ebcc75c7b9b234265fb | [
"Apache-2.0"
] | 34 | 2015-10-01T14:28:25.000Z | 2022-01-21T03:46:07.000Z | build.vs/build_tests/build_tests.py | gajgeospatial/pthreads-3.0.0 | 2ad01868c959502c71f26ebcc75c7b9b234265fb | [
"Apache-2.0"
] | 1 | 2021-09-03T13:46:14.000Z | 2021-09-04T07:37:29.000Z | build.vs/build_tests/build_tests.py | gajgeospatial/pthreads-3.0.0 | 2ad01868c959502c71f26ebcc75c7b9b234265fb | [
"Apache-2.0"
] | 27 | 2016-05-10T09:37:28.000Z | 2022-01-21T03:46:10.000Z | '''
Set up Visual Sudio to build a specified MPIR configuration
Copyright (C) 2011, 2012, 2013, 2014, 2015 Brian Gladman
'''
from operator import itemgetter
from os import listdir, walk, unlink, makedirs
from os.path import split, splitext, isdir, relpath, join, exists
from os.path import dirname, normpath
from copy import deepcopy
from sys import argv, exit
from filecmp import cmp
from shutil import copy
from re import compile, search
from collections import defaultdict
from uuid import uuid4
from time import sleep
from _msvccompiler import MSVCCompiler
# for script debugging
debug = False
# add user choice
flib_type = 'single' # ('gc', 'reentrant', 'single')
# The path to flint, solution and project directories
script_dir = dirname(__file__)
project_name = 'pthreads'
build_vc = 'build.vs19'
pthreads_dir = normpath(join(script_dir, '../../tests'))
solution_dir = normpath(join(pthreads_dir, 'build.vs'))
app_type, lib_type, dll_type = 0, 1, 2
app_str = ('Application', 'StaticLibrary', 'DynamicLibrary')
app_ext = ('.exe', '.lib', '.dll')
# copy from file ipath to file opath but avoid copying if
# opath exists and is the same as ipath (this is to avoid
# triggering an unecessary rebuild).
def write_f(ipath, opath):
if exists(ipath) and not isdir(ipath):
if exists(opath):
if isdir(opath) or cmp(ipath, opath):
return
copy(ipath, opath)
ignore_dirs = ( '.git', 'doc', 'examples', 'lib', 'exe', 'dll', 'win_hdrs')
req_extns = ( '.h', '.c', '.cc', '.cpp' )
def find_src(path):
c, h, cx, hx, t, tx, p = [], [], [], [], [], [], []
for root, dirs, files in walk(path):
if 'template' in root:
continue
_, _t = split(root)
if _t in ignore_dirs:
continue
if 'build.vc' in root:
for di in list(dirs):
dirs.remove(di)
for di in list(dirs):
if di in ignore_dirs:
dirs.remove(di)
if 'template' in di:
dirs.remove(di)
relp = relpath(root, pthreads_dir)
if relp == '.':
relp = ''
for f in files:
if 'template' in f:
continue
n, x = splitext(f)
if x not in req_extns:
continue
pth, leaf = split(root)
fp = join(relp, f)
if leaf == 'tune':
continue
if leaf == 'test':
p2, l2 = split(pth)
l2 = '' if l2 == 'flint2' else l2
if 'flintxx' in pth:
tx += [(l2, fp)]
else:
t += [(l2, fp)]
elif leaf == 'profile':
p2, l2 = split(pth)
l2 = '' if l2 == 'flint2' else l2
p += [(l2, fp)]
elif leaf == 'flintxx':
cx += [fp]
elif x == '.c':
c += [(leaf, fp)]
elif x == '.h':
if n.endswith('xx'):
hx += [fp]
else:
h += [fp]
for x in (c, h, cx, hx, t, tx, p):
x.sort()
return (c, h, cx, hx, t, tx, p)
c, h, cx, hx, t, tx, p = find_src(pthreads_dir)
# def compile(self, sources,
# output_dir=None, macros=None, include_dirs=None, debug=0,
# extra_preargs=None, extra_postargs=None, depends=None):
# def link(self, target_desc, objects, output_filename, output_dir=None,
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, debug=0, extra_preargs=None,
# extra_postargs=None, build_temp=None, target_lang=None):
# def create_static_lib(self, objects, output_libname, output_dir=None,
# debug=0, target_lang=None):
intd = '\\x64\\Release\\'
cc = MSVCCompiler()
error_list = []
for l2, fp in c:
fdn, fx = splitext(fp)
fd, fn = split(fdn)
source = [join('..\\..\\tests', fp)]
inc_dirs = [
'..\\..\\'
]
defines = [
'/D PTW32_STATIC_LIB',
'/D HAVE_PTW32_CONFIG_H',
'/FI ..\..\config.h'
]
libs = [
'..\\..\\lib' + intd + 'pthreads'
]
tmp_dir = '.\\tests'
outd = '.\\tests' + intd
try:
obj = cc.compile(source, output_dir=tmp_dir, defines=defines, include_dirs=inc_dirs)
cc.link("executable", obj, fn + '.exe', output_dir=outd, libraries=libs)
except:
error_list += [(l2, fp)]
print('Build Errors:')
for l2, fp in error_list:
print(' ', l2, fp)
| 27.311258 | 88 | 0.601843 |
bf922df5028088a9e68317e47bd568d5ca59cb18 | 8,032 | py | Python | tensor2tensor/models/vanilla_gan.py | kpe/tensor2tensor | 453c473030c354a3d9a4c27b12bcec8942334bf4 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | tensor2tensor/models/vanilla_gan.py | kpe/tensor2tensor | 453c473030c354a3d9a4c27b12bcec8942334bf4 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | tensor2tensor/models/vanilla_gan.py | kpe/tensor2tensor | 453c473030c354a3d9a4c27b12bcec8942334bf4 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Generative Adversarial Model with two linear layers.
Example of how to create a GAN in T2T.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def deconv2d(
input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"):
"""Deconvolution layer."""
with tf.variable_scope(name):
w = tf.get_variable(
"w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(
input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable(
"biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0))
return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
def reverse_gradient(x):
return -x + tf.stop_gradient(2 * x)
class AbstractGAN(t2t_model.T2TModel):
"""Base class for all GANs."""
def discriminator(self, x, is_training, reuse=False):
"""Discriminator architecture based on InfoGAN.
Args:
x: input images, shape [bs, h, w, channels]
is_training: boolean, are we in train or eval model.
reuse: boolean, should params be re-used.
Returns:
out_logit: the output logits (before sigmoid).
"""
hparams = self.hparams
with tf.variable_scope(
"discriminator", reuse=reuse,
initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = common_layers.shape_list(x)[:3]
# Mapping x from [bs, h, w, c] to [bs, 1]
net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2),
padding="SAME", name="d_conv1")
# [bs, h/2, w/2, 64]
net = lrelu(net)
net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2),
padding="SAME", name="d_conv2")
# [bs, h/4, w/4, 128]
if hparams.discriminator_batchnorm:
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="d_bn2")
net = lrelu(net)
size = height * width
net = tf.reshape(net, [batch_size, size * 8]) # [bs, h * w * 8]
net = tf.layers.dense(net, 1024, name="d_fc3") # [bs, 1024]
if hparams.discriminator_batchnorm:
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="d_bn3")
net = lrelu(net)
return net
def generator(self, z, is_training, out_shape):
"""Generator outputting image in [0, 1]."""
hparams = self.hparams
height, width, c_dim = out_shape
batch_size = hparams.batch_size
with tf.variable_scope(
"generator",
initializer=tf.random_normal_initializer(stddev=0.02)):
net = tf.layers.dense(z, 1024, name="g_fc1")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn1")
net = lrelu(net)
net = tf.layers.dense(net, 128 * (height // 4) * (width // 4),
name="g_fc2")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn2")
net = lrelu(net)
net = tf.reshape(net, [batch_size, height // 4, width // 4, 128])
net = deconv2d(net, [batch_size, height // 2, width // 2, 64],
4, 4, 2, 2, name="g_dc3")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn3")
net = lrelu(net)
net = deconv2d(net, [batch_size, height, width, c_dim],
4, 4, 2, 2, name="g_dc4")
out = tf.nn.sigmoid(net)
return common_layers.convert_real_to_rgb(out)
def losses(self, inputs, generated):
"""Return the losses dictionary."""
raise NotImplementedError
def body(self, features):
"""Body of the model.
Args:
features: a dictionary with the tensors.
Returns:
A pair (predictions, losses) where predictions is the generated image
and losses is a dictionary of losses (that get added for the final loss).
"""
features["targets"] = features["inputs"]
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
# Input images.
inputs = tf.to_float(features["targets_raw"])
# Noise vector.
z = tf.random_uniform([self.hparams.batch_size,
self.hparams.bottleneck_bits],
minval=-1, maxval=1, name="z")
# Generator output: fake images.
out_shape = common_layers.shape_list(inputs)[1:4]
g = self.generator(z, is_training, out_shape)
losses = self.losses(inputs, g) # pylint: disable=not-callable
summary_g_image = tf.reshape(
g[0, :], [1] + common_layers.shape_list(inputs)[1:])
tf.summary.image("generated", summary_g_image, max_outputs=1)
if is_training: # Returns an dummy output and the losses dictionary.
return tf.zeros_like(inputs), losses
return tf.reshape(g, tf.shape(inputs)), losses
def top(self, body_output, features):
"""Override the top function to not do anything."""
return body_output
@registry.register_model
class SlicedGan(AbstractGAN):
"""Sliced GAN for demonstration."""
def losses(self, inputs, generated):
"""Losses in the sliced case."""
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
def discriminate(x):
return self.discriminator(x, is_training=is_training, reuse=False)
generator_loss = common_layers.sliced_gan_loss(
inputs, reverse_gradient(generated), discriminate,
self.hparams.num_sliced_vecs)
return {"training": - generator_loss}
def infer(self, *args, **kwargs): # pylint: disable=arguments-differ
del args, kwargs
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
with tf.variable_scope("body/vanilla_gan", reuse=tf.AUTO_REUSE):
hparams = self.hparams
z = tf.random_uniform([hparams.batch_size, hparams.bottleneck_bits],
minval=-1, maxval=1, name="z")
out_shape = (hparams.sample_height, hparams.sample_width, num_channels)
g_sample = self.generator(z, False, out_shape)
return g_sample
@registry.register_hparams
def sliced_gan():
"""Basic parameters for a vanilla_gan."""
hparams = common_hparams.basic_params1()
hparams.optimizer = "adam"
hparams.learning_rate_constant = 0.0002
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup"
hparams.label_smoothing = 0.0
hparams.batch_size = 128
hparams.hidden_size = 128
hparams.initializer = "uniform_unit_scaling"
hparams.initializer_gain = 1.0
hparams.weight_decay = 1e-6
hparams.kernel_height = 4
hparams.kernel_width = 4
hparams.bottleneck_bits = 128
hparams.add_hparam("discriminator_batchnorm", True)
hparams.add_hparam("num_sliced_vecs", 4096)
return hparams
| 36.844037 | 79 | 0.656375 |
5ac0db356de3bd75a7b60e6aa9900dd2df7f1fa9 | 8,237 | py | Python | gitea/mod/operator/ops/main.py | timClicks/gitea-charm | a2694c8e38d9ba55376e7d0781019f962555897b | [
"Apache-2.0"
] | null | null | null | gitea/mod/operator/ops/main.py | timClicks/gitea-charm | a2694c8e38d9ba55376e7d0781019f962555897b | [
"Apache-2.0"
] | null | null | null | gitea/mod/operator/ops/main.py | timClicks/gitea-charm | a2694c8e38d9ba55376e7d0781019f962555897b | [
"Apache-2.0"
] | 1 | 2020-04-14T03:06:36.000Z | 2020-04-14T03:06:36.000Z | #!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from pathlib import Path
import yaml
import ops.charm
import ops.framework
import ops.model
import logging
from ops.log import setup_root_logging
CHARM_STATE_FILE = '.unit-state.db'
logger = logging.getLogger()
def _get_charm_dir():
charm_dir = os.environ.get("JUJU_CHARM_DIR")
if charm_dir is None:
# Assume $JUJU_CHARM_DIR/lib/op/main.py structure.
charm_dir = Path('{}/../../..'.format(__file__)).resolve()
else:
charm_dir = Path(charm_dir).resolve()
return charm_dir
def _load_metadata(charm_dir):
metadata = yaml.safe_load((charm_dir / 'metadata.yaml').read_text())
actions_meta = charm_dir / 'actions.yaml'
if actions_meta.exists():
actions_metadata = yaml.safe_load(actions_meta.read_text())
else:
actions_metadata = {}
return metadata, actions_metadata
def _create_event_link(charm, bound_event):
"""Create a symlink for a particular event.
charm -- A charm object.
bound_event -- An event for which to create a symlink.
"""
if issubclass(bound_event.event_type, ops.charm.HookEvent):
event_dir = charm.framework.charm_dir / 'hooks'
event_path = event_dir / bound_event.event_kind.replace('_', '-')
elif issubclass(bound_event.event_type, ops.charm.ActionEvent):
if not bound_event.event_kind.endswith("_action"):
raise RuntimeError(
'action event name {} needs _action suffix'.format(bound_event.event_kind))
event_dir = charm.framework.charm_dir / 'actions'
# The event_kind is suffixed with "_action" while the executable is not.
event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-')
else:
raise RuntimeError(
'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type))
event_dir.mkdir(exist_ok=True)
if not event_path.exists():
# CPython has different implementations for populating sys.argv[0] for Linux and Windows.
# For Windows it is always an absolute path (any symlinks are resolved)
# while for Linux it can be a relative path.
target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir))
# Ignore the non-symlink files or directories
# assuming the charm author knows what they are doing.
logger.debug(
'Creating a new relative symlink at %s pointing to %s',
event_path, target_path)
event_path.symlink_to(target_path)
def _setup_event_links(charm_dir, charm):
"""Set up links for supported events that originate from Juju.
Whether a charm can handle an event or not can be determined by
introspecting which events are defined on it.
Hooks or actions are created as symlinks to the charm code file
which is determined by inspecting symlinks provided by the charm
author at hooks/install or hooks/start.
charm_dir -- A root directory of the charm.
charm -- An instance of the Charm class.
"""
for bound_event in charm.on.events().values():
# Only events that originate from Juju need symlinks.
if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)):
_create_event_link(charm, bound_event)
def _emit_charm_event(charm, event_name):
"""Emits a charm event based on a Juju event name.
charm -- A charm instance to emit an event from.
event_name -- A Juju event name to emit on a charm.
"""
event_to_emit = None
try:
event_to_emit = getattr(charm.on, event_name)
except AttributeError:
logger.debug("event %s not defined for %s", event_name, charm)
# If the event is not supported by the charm implementation, do
# not error out or try to emit it. This is to support rollbacks.
if event_to_emit is not None:
args, kwargs = _get_event_args(charm, event_to_emit)
logger.debug('Emitting Juju event %s', event_name)
event_to_emit.emit(*args, **kwargs)
def _get_event_args(charm, bound_event):
event_type = bound_event.event_type
model = charm.framework.model
if issubclass(event_type, ops.charm.RelationEvent):
relation_name = os.environ['JUJU_RELATION']
relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
relation = model.get_relation(relation_name, relation_id)
else:
relation = None
remote_app_name = os.environ.get('JUJU_REMOTE_APP', '')
remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '')
if remote_app_name or remote_unit_name:
if not remote_app_name:
if '/' not in remote_unit_name:
raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name))
remote_app_name = remote_unit_name.split('/')[0]
args = [relation, model.get_app(remote_app_name)]
if remote_unit_name:
args.append(model.get_unit(remote_unit_name))
return args, {}
elif relation:
return [relation], {}
return [], {}
def main(charm_class):
"""Setup the charm and dispatch the observed event.
The event name is based on the way this executable was called (argv[0]).
"""
charm_dir = _get_charm_dir()
# Process the Juju event relevant to the current hook execution
# JUJU_HOOK_NAME, JUJU_FUNCTION_NAME, and JUJU_ACTION_NAME are not used
# in order to support simulation of events from debugging sessions.
#
# TODO: For Windows, when symlinks are used, this is not a valid
# method of getting an event name (see LP: #1854505).
juju_exec_path = Path(sys.argv[0])
juju_event_name = juju_exec_path.name.replace('-', '_')
if juju_exec_path.parent.name == 'actions':
juju_event_name = '{}_action'.format(juju_event_name)
model_backend = ops.model.ModelBackend()
debug = ('JUJU_DEBUG' in os.environ)
setup_root_logging(model_backend, debug=debug)
metadata, actions_metadata = _load_metadata(charm_dir)
meta = ops.charm.CharmMeta(metadata, actions_metadata)
unit_name = os.environ['JUJU_UNIT_NAME']
model = ops.model.Model(unit_name, meta, model_backend)
# TODO: If Juju unit agent crashes after exit(0) from the charm code
# the framework will commit the snapshot but Juju will not commit its
# operation.
charm_state_path = charm_dir / CHARM_STATE_FILE
framework = ops.framework.Framework(charm_state_path, charm_dir, meta, model)
try:
charm = charm_class(framework, None)
# When a charm is force-upgraded and a unit is in an error state Juju
# does not run upgrade-charm and instead runs the failed hook followed
# by config-changed. Given the nature of force-upgrading the hook setup
# code is not triggered on config-changed.
#
# 'start' event is included as Juju does not fire the install event for
# K8s charms (see LP: #1854635).
if (juju_event_name in ('install', 'start', 'upgrade_charm')
or juju_event_name.endswith('_storage_attached')):
_setup_event_links(charm_dir, charm)
# TODO: Remove the collect_metrics check below as soon as the relevant
# Juju changes are made.
#
# Skip reemission of deferred events for collect-metrics events because
# they do not have the full access to all hook tools.
if juju_event_name != 'collect_metrics':
framework.reemit()
_emit_charm_event(charm, juju_event_name)
framework.commit()
finally:
framework.close()
| 37.958525 | 97 | 0.6886 |
12f1c0c412c03a26287959612c6c530e99d57251 | 1,697 | py | Python | tests/unit/_helper_cheesecake.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 12 | 2015-01-15T01:13:42.000Z | 2022-03-04T21:14:27.000Z | tests/unit/_helper_cheesecake.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 6 | 2015-01-19T19:46:51.000Z | 2019-02-05T20:20:26.000Z | tests/unit/_helper_cheesecake.py | griggheo/cheesecake | 2b7aa5c980e8becb163cbcb801b202b17f715054 | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2015-10-24T20:08:09.000Z | 2019-02-04T20:53:21.000Z |
import os
import shutil
from mock import Mock
if 'set' not in dir(__builtins__):
from sets import Set as set
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/'))
SAMPLE_PACKAGE_PATH = os.path.join(DATA_PATH, "nose-0.8.3.tar.gz")
SAMPLE_PACKAGE_URL = "http://www.agilistas.org/cheesecake/nose-0.8.3.tar.gz"
VALID_URLS = [ SAMPLE_PACKAGE_URL, 'file://%s' % SAMPLE_PACKAGE_PATH ]
class Glutton(object):
"Eat everything."
def __getattr__(self, name):
return Glutton()
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwds):
pass
def create_empty_file(file_path):
fd = file(file_path, "w")
fd.close()
def create_empty_files_in_directory(files, directory):
for filename in files:
create_empty_file(os.path.join(directory, filename))
def dump_str_to_file(string, filename):
fd = file(filename, 'w')
fd.write(string)
fd.close()
def mocked_urlretrieve(url, filename):
if url in VALID_URLS:
shutil.copy(os.path.join(DATA_PATH, "nose-0.8.3.tar.gz"), filename)
headers = Mock({'gettype': 'application/x-gzip'})
elif url == 'connection_refused':
raise IOError("[Errno socket error] (111, 'Connection refused')")
else:
response_content = '''<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>404 Not Found</title>
</head><body>
<h1>Not Found</h1>
<p>The requested URL was not found on this server.</p>
</body></html>
'''
dump_str_to_file(response_content, filename)
headers = Mock({'gettype': 'text/html'})
return filename, headers
| 27.819672 | 80 | 0.647024 |
88757dd1676d88f173bd36a8688ba2fd014e1315 | 8,103 | py | Python | frappe/tests/test_permissions.py | ashokrajbathu/secondrep | 6e6a469a0956db01b5640c8bb16c5752556a219e | [
"MIT"
] | null | null | null | frappe/tests/test_permissions.py | ashokrajbathu/secondrep | 6e6a469a0956db01b5640c8bb16c5752556a219e | [
"MIT"
] | null | null | null | frappe/tests/test_permissions.py | ashokrajbathu/secondrep | 6e6a469a0956db01b5640c8bb16c5752556a219e | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
import json
import frappe.model.meta
from frappe.core.page.user_permissions.user_permissions import add, remove, get_permissions
from frappe.permissions import clear_user_permissions_for_doctype, get_doc_permissions
test_records = frappe.get_test_records('Blog Post')
test_dependencies = ["User"]
class TestPermissions(unittest.TestCase):
def setUp(self):
frappe.clear_cache(doctype="Blog Post")
user = frappe.get_doc("User", "test1@example.com")
user.add_roles("Website Manager")
user = frappe.get_doc("User", "test2@example.com")
user.add_roles("Blogger")
frappe.set_user("test1@example.com")
def tearDown(self):
frappe.set_user("Administrator")
frappe.db.set_value("Blogger", "_Test Blogger 1", "user", None)
clear_user_permissions_for_doctype("Blog Category")
clear_user_permissions_for_doctype("Blog Post")
clear_user_permissions_for_doctype("Blogger")
frappe.db.sql("""update `tabDocPerm` set user_permission_doctypes=null
where parent='Blog Post' and permlevel=0 and apply_user_permissions=1
and `read`=1""")
frappe.db.sql("""update `tabDocPerm` set if_owner=0
where parent='Blog Post' and permlevel=0 and permlevel=0 and role='Blogger'""")
def test_basic_permission(self):
post = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(post.has_permission("read"))
def test_user_permissions_in_doc(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertFalse(post.has_permission("read"))
self.assertFalse(get_doc_permissions(post).get("read"))
post1 = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertTrue(post1.has_permission("read"))
self.assertTrue(get_doc_permissions(post1).get("read"))
def test_user_permissions_in_report(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1", "test2@example.com")
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "blog_category"])]
self.assertTrue("_test-blog-post-1" in names)
self.assertFalse("_test-blog-post" in names)
def test_default_values(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1", "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.new_doc("Blog Post")
self.assertEquals(doc.get("blog_category"), "_Test Blog Category 1")
def test_user_link_match_doc(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "_test-blog-post-2")
self.assertTrue(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(post1.has_permission("read"))
def test_user_link_match_report(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "owner"])]
self.assertTrue("_test-blog-post-2" in names)
self.assertFalse("_test-blog-post-1" in names)
def test_set_user_permissions(self):
frappe.set_user("test1@example.com")
add("test2@example.com", "Blog Post", "_test-blog-post")
def test_not_allowed_to_set_user_permissions(self):
frappe.set_user("test2@example.com")
# this user can't add user permissions
self.assertRaises(frappe.PermissionError, add,
"test2@example.com", "Blog Post", "_test-blog-post")
def test_read_if_explicit_user_permissions_are_set(self):
self.test_set_user_permissions()
frappe.set_user("test2@example.com")
# user can only access permitted blog post
doc = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(doc.has_permission("read"))
# and not this one
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
def test_not_allowed_to_remove_user_permissions(self):
self.test_set_user_permissions()
defname = get_permissions("test2@example.com", "Blog Post", "_test-blog-post")[0].name
frappe.set_user("test2@example.com")
# user cannot remove their own user permissions
self.assertRaises(frappe.PermissionError, remove,
"test2@example.com", defname, "Blog Post", "_test-blog-post")
def test_user_permissions_based_on_blogger(self):
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("test1@example.com")
add("test2@example.com", "Blog Post", "_test-blog-post")
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertTrue(doc.has_permission("read"))
def test_set_only_once(self):
blog_post = frappe.get_meta("Blog Post")
blog_post.get_field("title").set_only_once = 1
doc = frappe.get_doc("Blog Post", "_test-blog-post-1")
doc.title = "New"
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
blog_post.get_field("title").set_only_once = 0
def test_user_permission_doctypes(self):
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
frappe.permissions.add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
frappe.set_user("test2@example.com")
frappe.db.sql("""update `tabDocPerm` set user_permission_doctypes=%s
where parent='Blog Post' and permlevel=0 and apply_user_permissions=1
and `read`=1""", json.dumps(["Blogger"]))
frappe.model.meta.clear_cache("Blog Post")
doc = frappe.get_doc("Blog Post", "_test-blog-post")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "_test-blog-post-2")
self.assertTrue(doc.has_permission("read"))
frappe.model.meta.clear_cache("Blog Post")
def if_owner_setup(self):
frappe.db.sql("""update `tabDocPerm` set if_owner=1
where parent='Blog Post' and permlevel=0 and permlevel=0 and role='Blogger'""")
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
frappe.permissions.add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
frappe.db.sql("""update `tabDocPerm` set user_permission_doctypes=%s
where parent='Blog Post' and permlevel=0 and apply_user_permissions=1
and `read`=1""", json.dumps(["Blog Category"]))
frappe.model.meta.clear_cache("Blog Post")
def test_insert_if_owner_with_user_permissions(self):
"""If `If Owner` is checked for a Role, check if that document is allowed to be read, updated, submitted, etc. except be created, even if the document is restricted based on User Permissions."""
self.if_owner_setup()
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "_Test Blog Category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertRaises(frappe.PermissionError, doc.insert)
frappe.set_user("Administrator")
frappe.permissions.add_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc.insert()
frappe.set_user("Administrator")
frappe.permissions.remove_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertFalse(doc.has_permission("create"))
| 35.230435 | 196 | 0.742935 |
80c44dd77afc333844e3e87dc2f6b42cb4a81e06 | 538 | py | Python | Task2C.py | Tim-Bonner-1825/Flood-Warning-System | d6d2c232216de521a8ba196e175ee59e820dd1c2 | [
"MIT"
] | null | null | null | Task2C.py | Tim-Bonner-1825/Flood-Warning-System | d6d2c232216de521a8ba196e175ee59e820dd1c2 | [
"MIT"
] | null | null | null | Task2C.py | Tim-Bonner-1825/Flood-Warning-System | d6d2c232216de521a8ba196e175ee59e820dd1c2 | [
"MIT"
] | null | null | null | from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
def run():
# Build list of stations
stations = build_station_list()
# Update latest level data for all stations
update_water_levels(stations)
highestLevels = stations_highest_rel_level(stations, 10)
for st in highestLevels:
print(st.name, st.relative_water_level())
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run()
| 26.9 | 75 | 0.736059 |
ded37daffe4fdf43031973fa70ebb77ea1ef7c8d | 41,199 | py | Python | tensorflow_data_validation/statistics/stats_impl.py | dhruvesh09/data-validation | c1d38bef69a0cabf1bab09357a9182bf5b446658 | [
"Apache-2.0"
] | 2 | 2019-10-02T18:04:01.000Z | 2020-03-23T12:54:28.000Z | tensorflow_data_validation/statistics/stats_impl.py | dhruvesh09/data-validation | c1d38bef69a0cabf1bab09357a9182bf5b446658 | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/statistics/stats_impl.py | dhruvesh09/data-validation | c1d38bef69a0cabf1bab09357a9182bf5b446658 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of statistics generators."""
import itertools
import math
import random
from typing import Any, Callable, cast, Dict, Iterable, List, Optional, Text, Tuple
import apache_beam as beam
import numpy as np
import pyarrow as pa
from tensorflow_data_validation import constants
from tensorflow_data_validation import types
from tensorflow_data_validation.arrow import arrow_util
from tensorflow_data_validation.statistics import stats_options
from tensorflow_data_validation.statistics.generators import basic_stats_generator
from tensorflow_data_validation.statistics.generators import image_stats_generator
from tensorflow_data_validation.statistics.generators import lift_stats_generator
from tensorflow_data_validation.statistics.generators import natural_language_domain_inferring_stats_generator
from tensorflow_data_validation.statistics.generators import natural_language_stats_generator
from tensorflow_data_validation.statistics.generators import sparse_feature_stats_generator
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.statistics.generators import time_stats_generator
from tensorflow_data_validation.statistics.generators import top_k_uniques_combiner_stats_generator
from tensorflow_data_validation.statistics.generators import top_k_uniques_sketch_stats_generator
from tensorflow_data_validation.statistics.generators import top_k_uniques_stats_generator
from tensorflow_data_validation.statistics.generators import weighted_feature_stats_generator
from tensorflow_data_validation.utils import slicing_util
from tensorflow_data_validation.utils import stats_util
from tfx_bsl.arrow import table_util
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
_DEFAULT_MG_SKETCH_SIZE = 1024
_DEFAULT_KMV_SKETCH_SIZE = 16384
@beam.typehints.with_input_types(pa.RecordBatch)
@beam.typehints.with_output_types(statistics_pb2.DatasetFeatureStatisticsList)
class GenerateStatisticsImpl(beam.PTransform):
"""PTransform that applies a set of generators over input examples."""
def __init__(
self,
options: stats_options.StatsOptions = stats_options.StatsOptions()
) -> None:
self._options = options
def expand(self, dataset: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
# If a set of allowed features are provided, keep only those features.
if self._options.feature_allowlist:
dataset |= ('FilterFeaturesByAllowList' >> beam.Map(
_filter_features, feature_allowlist=self._options.feature_allowlist))
if self._options.experimental_slice_functions:
# Add default slicing function.
slice_functions = [slicing_util.default_slicer]
slice_functions.extend(self._options.experimental_slice_functions)
dataset = (
dataset
| 'GenerateSliceKeys' >> beam.FlatMap(
slicing_util.generate_slices, slice_functions=slice_functions))
else:
# TODO(pachristopher): Remove this special case if this doesn't give any
# performance improvement.
dataset = (dataset
| 'KeyWithVoid' >> beam.Map(lambda v: (None, v)))
return dataset | GenerateSlicedStatisticsImpl(self._options)
# This transform will be used by the example validation API to compute
# statistics over anomalous examples. Specifically, it is used to compute
# statistics over examples found for each anomaly (i.e., the anomaly type
# will be the slice key).
@beam.typehints.with_input_types(types.BeamSlicedRecordBatch)
@beam.typehints.with_output_types(statistics_pb2.DatasetFeatureStatisticsList)
class GenerateSlicedStatisticsImpl(beam.PTransform):
"""PTransform that applies a set of generators to sliced input examples."""
def __init__(
self,
options: stats_options.StatsOptions = stats_options.StatsOptions(),
is_slicing_enabled: bool = False,
) -> None:
"""Initializes GenerateSlicedStatisticsImpl.
Args:
options: `tfdv.StatsOptions` for generating data statistics.
is_slicing_enabled: Whether to include slice keys in the resulting proto,
even if slice functions are not provided in `options`. If slice
functions are provided in `options`, slice keys are included regardless
of this value.
"""
self._options = options
self._is_slicing_enabled = (
is_slicing_enabled or bool(self._options.experimental_slice_functions))
def expand(self, dataset: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
# Handles generators by their type:
# - CombinerStatsGenerators will be wrapped in a single CombinePerKey by
# _CombinerStatsGeneratorsCombineFn.
# - TransformStatsGenerator will be invoked separately with `dataset`.
combiner_stats_generators = []
result_protos = []
for generator in get_generators(self._options):
if isinstance(generator, stats_generator.CombinerStatsGenerator):
combiner_stats_generators.append(generator)
elif isinstance(generator, stats_generator.TransformStatsGenerator):
result_protos.append(
dataset
| generator.name >> generator.ptransform)
else:
raise TypeError('Statistics generator must extend one of '
'CombinerStatsGenerator or TransformStatsGenerator, '
'found object of type %s' %
generator.__class__.__name__)
if combiner_stats_generators:
# TODO(b/162543416): Obviate the need for explicit fanout.
fanout = max(
32, 5 * int(math.ceil(math.sqrt(len(combiner_stats_generators)))))
result_protos.append(dataset
| 'RunCombinerStatsGenerators'
>> beam.CombinePerKey(
_CombinerStatsGeneratorsCombineFn(
combiner_stats_generators,
self._options.desired_batch_size
)).with_hot_key_fanout(fanout))
# result_protos is a list of PCollections of (slice key,
# DatasetFeatureStatistics proto) pairs. We now flatten the list into a
# single PCollection, combine the DatasetFeatureStatistics protos by key,
# and then merge the DatasetFeatureStatistics protos in the PCollection into
# a single DatasetFeatureStatisticsList proto.
return (result_protos
| 'FlattenFeatureStatistics' >> beam.Flatten()
| 'MergeDatasetFeatureStatisticsProtos' >>
beam.CombinePerKey(_merge_dataset_feature_stats_protos)
| 'AddSliceKeyToStatsProto' >> beam.Map(
_add_slice_key,
self._is_slicing_enabled)
| 'ToList' >> beam.combiners.ToList()
| 'MakeDatasetFeatureStatisticsListProto' >>
beam.Map(_make_dataset_feature_statistics_list_proto))
def get_generators(options: stats_options.StatsOptions,
in_memory: bool = False
) -> List[stats_generator.StatsGenerator]:
"""Initializes the list of stats generators, including custom generators.
Args:
options: A StatsOptions object.
in_memory: Whether the generators will be used to generate statistics in
memory (True) or using Beam (False).
Returns:
A list of stats generator objects.
"""
generators = [NumExamplesStatsGenerator(options.weight_feature)]
if options.add_default_generators:
generators.extend(_get_default_generators(options, in_memory))
if options.generators:
# Add custom stats generators.
generators.extend(options.generators)
if options.enable_semantic_domain_stats:
semantic_domain_feature_stats_generators = [
image_stats_generator.ImageStatsGenerator(),
natural_language_domain_inferring_stats_generator
.NLDomainInferringStatsGenerator(),
time_stats_generator.TimeStatsGenerator(),
]
# Wrap semantic domain feature stats generators as a separate combiner
# stats generator, so that we can apply sampling only for those and other
# feature stats generators are not affected by it.
generators.append(
CombinerFeatureStatsWrapperGenerator(
semantic_domain_feature_stats_generators,
sample_rate=options.semantic_domain_stats_sample_rate))
if options.schema is not None:
if _schema_has_sparse_features(options.schema):
generators.append(
sparse_feature_stats_generator.SparseFeatureStatsGenerator(
options.schema))
if _schema_has_natural_language_domains(options.schema):
generators.append(
natural_language_stats_generator.NLStatsGenerator(
options.schema, options.vocab_paths,
options.num_histogram_buckets,
options.num_quantiles_histogram_buckets,
options.num_rank_histogram_buckets))
if options.schema.weighted_feature:
generators.append(
weighted_feature_stats_generator.WeightedFeatureStatsGenerator(
options.schema))
if options.label_feature and not in_memory:
# The LiftStatsGenerator is not a CombinerStatsGenerator and therefore
# cannot currenty be used for in_memory executions.
generators.append(
lift_stats_generator.LiftStatsGenerator(
y_path=types.FeaturePath([options.label_feature]),
schema=options.schema,
example_weight_map=options.example_weight_map,
output_custom_stats=True))
# Replace all CombinerFeatureStatsGenerator with a single
# CombinerFeatureStatsWrapperGenerator.
feature_generators = [
x for x in generators
if isinstance(x, stats_generator.CombinerFeatureStatsGenerator)
]
if feature_generators:
generators = [
x for x in generators
if not isinstance(x, stats_generator.CombinerFeatureStatsGenerator)
] + [
CombinerFeatureStatsWrapperGenerator(feature_generators)
]
if in_memory:
for generator in generators:
if not isinstance(generator, stats_generator.CombinerStatsGenerator):
raise TypeError('Statistics generator used in '
'generate_statistics_in_memory must '
'extend CombinerStatsGenerator, found object of '
'type %s.' % generator.__class__.__name__)
return generators
def _get_default_generators(
options: stats_options.StatsOptions, in_memory: bool = False
) -> List[stats_generator.StatsGenerator]:
"""Initializes default list of stats generators.
Args:
options: A StatsOptions object.
in_memory: Whether the generators will be used to generate statistics in
memory (True) or using Beam (False).
Returns:
A list of stats generator objects.
"""
stats_generators = [
basic_stats_generator.BasicStatsGenerator(
schema=options.schema,
example_weight_map=options.example_weight_map,
num_values_histogram_buckets=options.num_values_histogram_buckets,
num_histogram_buckets=options.num_histogram_buckets,
num_quantiles_histogram_buckets=options
.num_quantiles_histogram_buckets,
epsilon=options.epsilon),
]
if options.experimental_use_sketch_based_topk_uniques:
stats_generators.append(
top_k_uniques_sketch_stats_generator.TopKUniquesSketchStatsGenerator(
schema=options.schema,
example_weight_map=options.example_weight_map,
num_top_values=options.num_top_values,
num_rank_histogram_buckets=options.num_rank_histogram_buckets,
frequency_threshold=options.frequency_threshold,
weighted_frequency_threshold=options.weighted_frequency_threshold,
num_misragries_buckets=_DEFAULT_MG_SKETCH_SIZE,
num_kmv_buckets=_DEFAULT_KMV_SKETCH_SIZE))
elif in_memory:
stats_generators.append(
top_k_uniques_combiner_stats_generator
.TopKUniquesCombinerStatsGenerator(
schema=options.schema,
example_weight_map=options.example_weight_map,
num_top_values=options.num_top_values,
frequency_threshold=options.frequency_threshold,
weighted_frequency_threshold=options.weighted_frequency_threshold,
num_rank_histogram_buckets=options.num_rank_histogram_buckets))
else:
stats_generators.append(
top_k_uniques_stats_generator.TopKUniquesStatsGenerator(
schema=options.schema,
example_weight_map=options.example_weight_map,
num_top_values=options.num_top_values,
frequency_threshold=options.frequency_threshold,
weighted_frequency_threshold=options.weighted_frequency_threshold,
num_rank_histogram_buckets=options.num_rank_histogram_buckets),
)
return stats_generators
def _schema_has_sparse_features(schema: schema_pb2.Schema) -> bool:
"""Returns whether there are any sparse features in the specified schema."""
def _has_sparse_features(
feature_container: Iterable[schema_pb2.Feature]
) -> bool:
"""Helper function used to determine whether there are sparse features."""
for f in feature_container:
if isinstance(f, schema_pb2.SparseFeature):
return True
if f.type == schema_pb2.STRUCT:
if f.struct_domain.sparse_feature:
return True
return _has_sparse_features(f.struct_domain.feature)
return False
if schema.sparse_feature:
return True
return _has_sparse_features(schema.feature)
def _schema_has_natural_language_domains(schema: schema_pb2.Schema) -> bool:
"""Returns whether there are features in the schema with a nl domain."""
for f in schema.feature:
if f.WhichOneof('domain_info') == 'natural_language_domain':
return True
return False
def _filter_features(
record_batch: pa.RecordBatch,
feature_allowlist: List[types.FeatureName]) -> pa.RecordBatch:
"""Removes features that are not on the allowlist.
Args:
record_batch: Input Arrow RecordBatch.
feature_allowlist: A set of feature names to keep.
Returns:
An Arrow RecordBatch containing only features on the allowlist.
"""
columns_to_select = []
column_names_to_select = []
for feature_name in feature_allowlist:
col = arrow_util.get_column(record_batch, feature_name, missing_ok=True)
if col is None:
continue
columns_to_select.append(col)
column_names_to_select.append(feature_name)
return pa.RecordBatch.from_arrays(columns_to_select, column_names_to_select)
def _add_slice_key(
stats_proto_per_slice: Tuple[types.SliceKey,
statistics_pb2.DatasetFeatureStatistics],
is_slicing_enabled: bool
) -> statistics_pb2.DatasetFeatureStatistics:
"""Add slice key to stats proto."""
result = statistics_pb2.DatasetFeatureStatistics()
result.CopyFrom(stats_proto_per_slice[1])
if is_slicing_enabled:
result.name = stats_proto_per_slice[0]
return result
def _merge_dataset_feature_stats_protos(
stats_protos: Iterable[statistics_pb2.DatasetFeatureStatistics]
) -> statistics_pb2.DatasetFeatureStatistics:
"""Merges together a list of DatasetFeatureStatistics protos.
Args:
stats_protos: A list of DatasetFeatureStatistics protos to merge.
Returns:
The merged DatasetFeatureStatistics proto.
"""
stats_per_feature = {}
# Create a new DatasetFeatureStatistics proto.
result = statistics_pb2.DatasetFeatureStatistics()
# Iterate over each DatasetFeatureStatistics proto and merge the
# FeatureNameStatistics protos per feature and add the cross feature stats.
for stats_proto in stats_protos:
if stats_proto.cross_features:
result.cross_features.extend(stats_proto.cross_features)
for feature_stats_proto in stats_proto.features:
feature_path = types.FeaturePath.from_proto(feature_stats_proto.path)
if feature_path not in stats_per_feature:
# Make a copy for the "cache" since we are modifying it in 'else' below.
new_feature_stats_proto = statistics_pb2.FeatureNameStatistics()
new_feature_stats_proto.CopyFrom(feature_stats_proto)
stats_per_feature[feature_path] = new_feature_stats_proto
else:
stats_for_feature = stats_per_feature[feature_path]
# MergeFrom would concatenate repeated fields which is not what we want
# for path.step.
del stats_for_feature.path.step[:]
stats_for_feature.MergeFrom(feature_stats_proto)
num_examples = None
for feature_stats_proto in stats_per_feature.values():
# Add the merged FeatureNameStatistics proto for the feature
# into the DatasetFeatureStatistics proto.
new_feature_stats_proto = result.features.add()
new_feature_stats_proto.CopyFrom(feature_stats_proto)
# Get the number of examples from one of the features that
# has common stats.
if num_examples is None:
stats_type = feature_stats_proto.WhichOneof('stats')
stats_proto = None
if stats_type == 'num_stats':
stats_proto = feature_stats_proto.num_stats
else:
stats_proto = feature_stats_proto.string_stats
if stats_proto.HasField('common_stats'):
num_examples = (stats_proto.common_stats.num_non_missing +
stats_proto.common_stats.num_missing)
# Set the num_examples field.
if num_examples is not None:
result.num_examples = num_examples
return result
def _update_example_and_missing_count(
stats: statistics_pb2.DatasetFeatureStatistics) -> None:
"""Updates example count of the dataset and missing count for all features."""
if not stats.features:
return
dummy_feature = stats_util.get_feature_stats(stats, _DUMMY_FEATURE_PATH)
num_examples = stats_util.get_custom_stats(dummy_feature, _NUM_EXAMPLES_KEY)
weighted_num_examples = stats_util.get_custom_stats(
dummy_feature, _WEIGHTED_NUM_EXAMPLES_KEY)
stats.features.remove(dummy_feature)
for feature_stats in stats.features:
# For features nested under a STRUCT feature, their num_missing is computed
# in the basic stats generator (because their num_missing is relative to
# their parent's value count).
if len(feature_stats.path.step) > 1:
continue
common_stats = None
which_oneof_stats = feature_stats.WhichOneof('stats')
if which_oneof_stats is None:
# There are not common_stats for this feature (which can be the case when
# generating only custom_stats for a sparse or weighted feature). In that
# case, simply continue without modifying the common stats.
continue
common_stats = getattr(feature_stats, which_oneof_stats).common_stats
assert num_examples >= common_stats.num_non_missing, (
'Total number of examples: {} is less than number of non missing '
'examples: {} for feature {}.'.format(
num_examples, common_stats.num_non_missing,
'.'.join(feature_stats.path.step)))
num_missing = int(num_examples - common_stats.num_non_missing)
common_stats.num_missing = num_missing
if common_stats.presence_and_valency_stats:
common_stats.presence_and_valency_stats[0].num_missing = num_missing
if weighted_num_examples != 0:
weighted_num_missing = (
weighted_num_examples -
common_stats.weighted_common_stats.num_non_missing)
common_stats.weighted_common_stats.num_missing = weighted_num_missing
if common_stats.weighted_presence_and_valency_stats:
common_stats.weighted_presence_and_valency_stats[0].num_missing = (
weighted_num_missing)
stats.num_examples = int(num_examples)
stats.weighted_num_examples = weighted_num_examples
def _make_dataset_feature_statistics_list_proto(
stats_protos: List[statistics_pb2.DatasetFeatureStatistics]
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Constructs a DatasetFeatureStatisticsList proto.
Args:
stats_protos: List of DatasetFeatureStatistics protos.
Returns:
The DatasetFeatureStatisticsList proto containing the input stats protos.
"""
# Create a new DatasetFeatureStatisticsList proto.
result = statistics_pb2.DatasetFeatureStatisticsList()
for stats_proto in stats_protos:
# Add the input DatasetFeatureStatistics proto.
new_stats_proto = result.datasets.add()
new_stats_proto.CopyFrom(stats_proto)
# We now update the example count for the dataset and the missing count
# for all the features, using the number of examples computed separately
# using NumExamplesStatsGenerator. Note that we compute the number of
# examples separately to avoid ignoring example counts for features which
# may be completely missing in a shard. We set the missing count of a
# feature to be num_examples - non_missing_count.
_update_example_and_missing_count(new_stats_proto)
if not stats_protos:
# Handle the case in which there are no examples. In that case, we want to
# output a DatasetFeatureStatisticsList proto with a dataset containing
# num_examples == 0 instead of an empty DatasetFeatureStatisticsList proto.
result.datasets.add(num_examples=0)
return result
_DUMMY_FEATURE_PATH = types.FeaturePath(['__TFDV_INTERNAL_FEATURE__'])
_NUM_EXAMPLES_KEY = '__NUM_EXAMPLES__'
_WEIGHTED_NUM_EXAMPLES_KEY = '__WEIGHTED_NUM_EXAMPLES__'
class NumExamplesStatsGenerator(stats_generator.CombinerStatsGenerator):
"""Computes total number of examples."""
def __init__(self,
weight_feature: Optional[types.FeatureName] = None) -> None:
self._weight_feature = weight_feature
def create_accumulator(self) -> List[float]:
return [0, 0] # [num_examples, weighted_num_examples]
def add_input(self, accumulator: List[float],
examples: pa.RecordBatch) -> List[float]:
accumulator[0] += examples.num_rows
if self._weight_feature:
weights_column = arrow_util.get_column(examples, self._weight_feature)
accumulator[1] += np.sum(np.asarray(weights_column.flatten()))
return accumulator
def merge_accumulators(self, accumulators: Iterable[List[float]]
) -> List[float]:
result = self.create_accumulator()
for acc in accumulators:
result[0] += acc[0]
result[1] += acc[1]
return result
def extract_output(self, accumulator: List[float]
) -> statistics_pb2.DatasetFeatureStatistics:
result = statistics_pb2.DatasetFeatureStatistics()
dummy_feature = result.features.add()
dummy_feature.path.CopyFrom(_DUMMY_FEATURE_PATH.to_proto())
dummy_feature.custom_stats.add(name=_NUM_EXAMPLES_KEY, num=accumulator[0])
dummy_feature.custom_stats.add(name=_WEIGHTED_NUM_EXAMPLES_KEY,
num=accumulator[1])
return result
class _CombinerStatsGeneratorsCombineFnAcc(object):
"""accumulator for _CombinerStatsGeneratorsCombineFn."""
__slots__ = [
'partial_accumulators', 'input_record_batches', 'curr_batch_size',
'curr_byte_size'
]
def __init__(self, partial_accumulators: List[Any]):
# Partial accumulator states of the underlying CombinerStatsGenerators.
self.partial_accumulators = partial_accumulators
# Input record batches to be processed.
self.input_record_batches = []
# Current batch size.
self.curr_batch_size = 0
# Current total byte size of all the pa.RecordBatches accumulated.
self.curr_byte_size = 0
@beam.typehints.with_input_types(pa.RecordBatch)
@beam.typehints.with_output_types(statistics_pb2.DatasetFeatureStatistics)
class _CombinerStatsGeneratorsCombineFn(beam.CombineFn):
"""A beam.CombineFn wrapping a list of CombinerStatsGenerators with batching.
This wrapper does two things:
1. Wraps a list of combiner stats generators. Its accumulator is a list
of accumulators for each wrapped stats generators.
2. Batches input examples before passing it to the underlying
stats generators.
We do this by accumulating examples in the combiner state until we
accumulate a large enough batch, at which point we send them through the
add_input step of each of the underlying combiner stats generators. When
merging, we merge the accumulators of the stats generators and accumulate
examples accordingly. We finally process any remaining examples
before producing the final output value.
This wrapper is needed to support slicing as we need the ability to
perform slice-aware batching. But currently there is no way to do key-aware
batching in Beam. Hence, this wrapper does batching and combining together.
See also:
BEAM-3737: Key-aware batching function
(https://issues.apache.org/jira/browse/BEAM-3737).
"""
# This needs to be large enough to allow for efficient merging of
# accumulators in the individual stats generators, but shouldn't be too large
# as it also acts as cap on the maximum memory usage of the computation.
# TODO(b/73789023): Ideally we should automatically infer the batch size.
_DESIRED_MERGE_ACCUMULATOR_BATCH_SIZE = 100
# The combiner accumulates record batches from the upstream and merges them
# when certain conditions are met. A merged record batch would allow better
# vectorized processing, but we have to pay for copying and the RAM to
# contain the merged record batch. If the total byte size of accumulated
# record batches exceeds this threshold a merge will be forced to avoid
# consuming too much memory.
_MERGE_RECORD_BATCH_BYTE_SIZE_THRESHOLD = 20 << 20 # 20MiB
def __init__(
self,
generators: List[stats_generator.CombinerStatsGenerator],
desired_batch_size: Optional[int] = None) -> None:
self._generators = generators
# We really want the batch size to be adaptive like it is in
# beam.BatchElements(), but there isn't an easy way to make it so.
# TODO(b/73789023): Figure out how to make this batch size dynamic.
if desired_batch_size and desired_batch_size > 0:
self._desired_batch_size = desired_batch_size
else:
self._desired_batch_size = constants.DEFAULT_DESIRED_INPUT_BATCH_SIZE
# Metrics
self._combine_batch_size = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE, 'combine_batch_size')
self._combine_byte_size = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE, 'combine_byte_size')
self._num_compacts = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'num_compacts')
self._num_instances = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'num_instances')
def _for_each_generator(self,
func: Callable[..., Any],
*args: Iterable[Any]) -> List[Any]:
"""Apply `func` for each wrapped generators.
Args:
func: a function that takes N + 1 arguments where N is the size of `args`.
the first argument is the stats generator.
*args: Iterables parallel to wrapped stats generators (i.e. the i-th item
corresponds to the self._generators[i]).
Returns:
A list whose i-th element is the result of
func(self._generators[i], args[0][i], args[1][i], ...).
"""
return [func(gen, *args_for_func) for gen, args_for_func in zip(
self._generators, zip(*args))]
def _should_do_batch(self, accumulator: _CombinerStatsGeneratorsCombineFnAcc,
force: bool) -> bool:
curr_batch_size = accumulator.curr_batch_size
if force and curr_batch_size > 0:
return True
if curr_batch_size >= self._desired_batch_size:
return True
if (accumulator.curr_byte_size >=
self._MERGE_RECORD_BATCH_BYTE_SIZE_THRESHOLD):
return True
return False
def _maybe_do_batch(
self,
accumulator: _CombinerStatsGeneratorsCombineFnAcc,
force: bool = False) -> None:
"""Maybe updates accumulator in place.
Checks if accumulator has enough examples for a batch, and if so, does the
stats computation for the batch and updates accumulator in place.
Args:
accumulator: Accumulator. Will be updated in place.
force: Force computation of stats even if accumulator has less examples
than the batch size.
"""
if self._should_do_batch(accumulator, force):
self._combine_batch_size.update(accumulator.curr_batch_size)
self._combine_byte_size.update(accumulator.curr_byte_size)
if len(accumulator.input_record_batches) == 1:
record_batch = accumulator.input_record_batches[0]
else:
record_batch = table_util.MergeRecordBatches(
accumulator.input_record_batches)
accumulator.partial_accumulators = self._for_each_generator(
lambda gen, gen_acc: gen.add_input(gen_acc, record_batch),
accumulator.partial_accumulators)
del accumulator.input_record_batches[:]
accumulator.curr_batch_size = 0
accumulator.curr_byte_size = 0
def setup(self):
"""Prepares each generator for combining."""
for gen in self._generators:
gen.setup()
def create_accumulator(self) -> _CombinerStatsGeneratorsCombineFnAcc:
return _CombinerStatsGeneratorsCombineFnAcc(
[g.create_accumulator() for g in self._generators])
def add_input(
self, accumulator: _CombinerStatsGeneratorsCombineFnAcc,
input_record_batch: pa.RecordBatch
) -> _CombinerStatsGeneratorsCombineFnAcc:
accumulator.input_record_batches.append(input_record_batch)
num_rows = input_record_batch.num_rows
accumulator.curr_batch_size += num_rows
accumulator.curr_byte_size += table_util.TotalByteSize(input_record_batch)
self._maybe_do_batch(accumulator)
self._num_instances.inc(num_rows)
return accumulator
def merge_accumulators(
self,
accumulators: Iterable[_CombinerStatsGeneratorsCombineFnAcc]
) -> _CombinerStatsGeneratorsCombineFnAcc:
result = self.create_accumulator()
# Make sure accumulators is an iterator (so it remembers its position).
accumulators = iter(accumulators)
while True:
# Repeatedly take the next N from `accumulators` (an iterator).
# If there are less than N remaining, all is taken.
batched_accumulators = list(itertools.islice(
accumulators, self._DESIRED_MERGE_ACCUMULATOR_BATCH_SIZE))
if not batched_accumulators:
break
# Batch together remaining examples in each accumulator, and
# feed to each generator. Note that there might still be remaining
# examples after this, but a compact() might follow and flush the
# remaining examples, and extract_output() in the end will flush anyways.
batched_partial_accumulators = []
for acc in batched_accumulators:
result.input_record_batches.extend(acc.input_record_batches)
result.curr_batch_size += acc.curr_batch_size
result.curr_byte_size += acc.curr_byte_size
self._maybe_do_batch(result)
batched_partial_accumulators.append(acc.partial_accumulators)
batched_accumulators_by_generator = list(
zip(*batched_partial_accumulators))
result.partial_accumulators = self._for_each_generator(
lambda gen, b, m: gen.merge_accumulators(itertools.chain((b,), m)),
result.partial_accumulators,
batched_accumulators_by_generator)
return result
def compact(
self,
accumulator: _CombinerStatsGeneratorsCombineFnAcc
) -> _CombinerStatsGeneratorsCombineFnAcc:
self._maybe_do_batch(accumulator, force=True)
accumulator.partial_accumulators = self._for_each_generator(
lambda gen, acc: gen.compact(acc), accumulator.partial_accumulators)
self._num_compacts.inc(1)
return accumulator
def extract_output(
self,
accumulator: _CombinerStatsGeneratorsCombineFnAcc
) -> statistics_pb2.DatasetFeatureStatistics: # pytype: disable=invalid-annotation
# Make sure we have processed all the examples.
self._maybe_do_batch(accumulator, force=True)
return _merge_dataset_feature_stats_protos(
self._for_each_generator(lambda gen, acc: gen.extract_output(acc),
accumulator.partial_accumulators))
def generate_partial_statistics_in_memory(
record_batch: pa.RecordBatch, options: stats_options.StatsOptions,
stats_generators: List[stats_generator.CombinerStatsGenerator]
) -> List[Any]:
"""Generates statistics for an in-memory list of examples.
Args:
record_batch: Arrow RecordBatch.
options: Options for generating data statistics.
stats_generators: A list of combiner statistics generators.
Returns:
A list of accumulators containing partial statistics.
"""
result = []
if options.feature_allowlist:
columns, features = [], []
for feature_name in options.feature_allowlist:
c = arrow_util.get_column(record_batch, feature_name, missing_ok=True)
if c is not None:
columns.append(c)
features.append(feature_name)
record_batch = pa.RecordBatch.from_arrays(columns, features)
for generator in stats_generators:
result.append(
generator.add_input(generator.create_accumulator(), record_batch))
return result
def generate_statistics_in_memory(
record_batch: pa.RecordBatch,
options: stats_options.StatsOptions = stats_options.StatsOptions()
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Generates statistics for an in-memory list of examples.
Args:
record_batch: Arrow RecordBatch.
options: Options for generating data statistics.
Returns:
A DatasetFeatureStatisticsList proto.
"""
stats_generators = cast(List[stats_generator.CombinerStatsGenerator],
get_generators(options, in_memory=True))
partial_stats = generate_partial_statistics_in_memory(record_batch, options,
stats_generators)
return extract_statistics_output(partial_stats, stats_generators)
def extract_statistics_output(
partial_stats: List[Any],
stats_generators: List[stats_generator.CombinerStatsGenerator]
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Extracts final stats output from the accumulators holding partial stats."""
# We call compact before extract_output to guarentee that `compact()` is
# called at least once, for testing coverage.
outputs = [
gen.extract_output(gen.compact(stats))
for (gen, stats) in zip(stats_generators, partial_stats) # pytype: disable=attribute-error
]
return _make_dataset_feature_statistics_list_proto(
[_merge_dataset_feature_stats_protos(outputs)])
# Type for the wrapper_accumulator of a CombinerFeatureStatsWrapperGenerator.
# See documentation below for more details.
WrapperAccumulator = Dict[types.FeaturePath, List[Any]]
class CombinerFeatureStatsWrapperGenerator(
stats_generator.CombinerStatsGenerator):
"""A combiner that wraps multiple CombinerFeatureStatsGenerators.
This combiner wraps multiple CombinerFeatureStatsGenerators by generating
and updating wrapper_accumulators where:
wrapper_accumulator[feature_path][feature_generator_index] contains the
generator specific accumulator for the pair (feature_path,
feature_generator_index).
"""
def __init__(self,
feature_stats_generators: List[
stats_generator.CombinerFeatureStatsGenerator],
name: Text = 'CombinerFeatureStatsWrapperGenerator',
schema: Optional[schema_pb2.Schema] = None,
sample_rate: Optional[float] = None) -> None:
"""Initializes a CombinerFeatureStatsWrapperGenerator.
Args:
feature_stats_generators: A list of CombinerFeatureStatsGenerator.
name: An optional unique name associated with the statistics generator.
schema: An optional schema for the dataset.
sample_rate: An optional sampling rate. If specified, statistics is
computed over the sample.
"""
super(CombinerFeatureStatsWrapperGenerator, self).__init__(name, schema)
self._feature_stats_generators = feature_stats_generators
self._sample_rate = sample_rate
def _get_wrapped_accumulators(self, wrapper_accumulator: WrapperAccumulator,
feature_path: types.FeaturePath) -> List[Any]:
"""Initializes the feature_path key if it does not exist."""
result = wrapper_accumulator.get(feature_path, None)
if result is not None:
return result
# Note: This manual initialization could have been avoided if
# wrapper_accumulator was a defaultdict, but this breaks pickling.
result = [
generator.create_accumulator()
for generator in self._feature_stats_generators
]
wrapper_accumulator[feature_path] = result
return result
def setup(self):
"""Prepares every CombinerFeatureStatsGenerator instance for combining."""
for gen in self._feature_stats_generators:
gen.setup()
def create_accumulator(self) -> WrapperAccumulator:
"""Returns a fresh, empty wrapper_accumulator.
Returns:
An empty wrapper_accumulator.
"""
return {}
def add_input(self, wrapper_accumulator: WrapperAccumulator,
input_record_batch: pa.RecordBatch) -> WrapperAccumulator:
"""Returns result of folding a batch of inputs into wrapper_accumulator.
Args:
wrapper_accumulator: The current wrapper accumulator.
input_record_batch: An arrow RecordBatch representing a batch of examples,
which should be added to the accumulator.
Returns:
The wrapper_accumulator after updating the statistics for the batch of
inputs.
"""
if self._sample_rate is not None and random.random() <= self._sample_rate:
return wrapper_accumulator
for feature_path, feature_array, _ in arrow_util.enumerate_arrays(
input_record_batch,
example_weight_map=None,
enumerate_leaves_only=True):
wrapped_accumulators = self._get_wrapped_accumulators(
wrapper_accumulator, feature_path)
for index, generator in enumerate(self._feature_stats_generators):
wrapped_accumulators[index] = generator.add_input(
wrapped_accumulators[index], feature_path, feature_array)
return wrapper_accumulator
def merge_accumulators(
self,
wrapper_accumulators: Iterable[WrapperAccumulator]) -> WrapperAccumulator:
"""Merges several wrapper_accumulators to a single one.
Args:
wrapper_accumulators: The wrapper accumulators to merge.
Returns:
The merged accumulator.
"""
result = self.create_accumulator()
for wrapper_accumulator in wrapper_accumulators:
for feature_path, accumulator_for_feature in wrapper_accumulator.items():
wrapped_accumulators = self._get_wrapped_accumulators(
result, feature_path)
for index, generator in enumerate(self._feature_stats_generators):
wrapped_accumulators[index] = generator.merge_accumulators(
[wrapped_accumulators[index], accumulator_for_feature[index]])
return result
def compact(self,
wrapper_accumulator: WrapperAccumulator) -> WrapperAccumulator:
"""Returns a compacted wrapper_accumulator.
This overrides the base class's implementation. This is optionally called
before an accumulator is sent across the wire.
Args:
wrapper_accumulator: The wrapper accumulator to compact.
"""
for accumulator_for_feature in wrapper_accumulator.values():
for index, generator in enumerate(self._feature_stats_generators):
accumulator_for_feature[index] = generator.compact(
accumulator_for_feature[index])
return wrapper_accumulator
def extract_output(self, wrapper_accumulator: WrapperAccumulator
) -> statistics_pb2.DatasetFeatureStatistics:
"""Returns result of converting wrapper_accumulator into the output value.
Args:
wrapper_accumulator: The final wrapper_accumulator value.
Returns:
A proto representing the result of this stats generator.
"""
result = statistics_pb2.DatasetFeatureStatistics()
for feature_path, accumulator_for_feature in wrapper_accumulator.items():
feature_stats = result.features.add()
feature_stats.path.CopyFrom(feature_path.to_proto())
for index, generator in enumerate(self._feature_stats_generators):
feature_stats.MergeFrom(
generator.extract_output(accumulator_for_feature[index]))
return result
| 41.826396 | 110 | 0.738513 |
4bb3905cafc7d6da97b446e666c04ee2d4ba8cc2 | 5,527 | py | Python | scripts/generate_client_publish_job.py | craigcomstock/mender-qa | 56d7d528d6a831b0936fdd84624643bdeda8c653 | [
"Apache-2.0"
] | 7 | 2016-11-16T01:00:55.000Z | 2018-07-05T13:06:45.000Z | scripts/generate_client_publish_job.py | craigcomstock/mender-qa | 56d7d528d6a831b0936fdd84624643bdeda8c653 | [
"Apache-2.0"
] | 262 | 2016-10-05T11:53:53.000Z | 2022-03-28T13:49:40.000Z | scripts/generate_client_publish_job.py | craigcomstock/mender-qa | 56d7d528d6a831b0936fdd84624643bdeda8c653 | [
"Apache-2.0"
] | 23 | 2016-08-31T11:52:10.000Z | 2022-02-09T19:04:45.000Z | #!/usr/bin/python3
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os, subprocess
import re
import tempfile
import shutil
import yaml
def initWorkspace():
path = tempfile.mkdtemp()
subprocess.check_output(
["git", "clone", "https://github.com/mendersoftware/integration.git", path],
)
return path
def generate(integration_repo, args):
release_tool = os.path.join(integration_repo, "extra", "release_tool.py")
release_tool_args = [
release_tool,
"--integration-versions-including",
args.trigger,
"--version",
args.version,
]
if args.feature_branches:
release_tool_args.append("--feature-branches")
integration_versions = subprocess.check_output(
release_tool_args,
)
# Filter out saas-* versions
# Historically, there have been some saas- releases using "master" of independent components
# (namely: mender-connect), but we certainly don't wont these versions in the generated jobs
integration_versions_list = [
ver
for ver in integration_versions.decode("utf-8").splitlines()
if not ver.startswith("saas-")
]
stage_name = "trigger"
document = {
"stages": [stage_name],
}
for integ_version in integration_versions_list:
subprocess.check_output(
["git", "checkout", integ_version],
cwd=integration_repo,
)
all_repos = subprocess.check_output(
[release_tool, "--list", "git"]
)
job_key = "trigger:mender-qa:" + integ_version.split("/")[1]
repos = {}
any_tag = False
for repo in all_repos.decode("utf-8").splitlines():
repo_version = subprocess.check_output(
[
release_tool,
"--version-of",
repo,
"--in-integration-version",
integ_version,
],
)
# For origin/master, the tool returns origin/master, but for
# releases like origin/2.7.x, the tool returns 2.7.x (?)
repo_version = repo_version.decode("utf-8").rstrip()
if len(repo_version.split("/")) > 1:
repo_version = repo_version.split("/")[1]
repos[repo.replace("-", "_").upper()] = repo_version
# Do not allow any job which will push build or final tags. These
# should never be done outside of manual releases.
if re.match("^[0-9]+\.[0-9]+\.[0-9]+(-build[0-9]+)?$", repo_version) is not None:
any_tag = True
break
if any_tag:
continue
repos["META_MENDER"] = args.meta_mender_version
document[job_key] = {
"stage": stage_name,
"trigger": {
"project": "Northern.tech/Mender/mender-qa",
"branch": "master",
"strategy": "depend",
},
"variables": {
"PUBLISH_DOCKER_CLIENT_IMAGES": "true",
"BUILD_CLIENT": "true",
"BUILD_SERVERS": "false",
"BUILD_QEMUX86_64_UEFI_GRUB": "false",
"TEST_QEMUX86_64_UEFI_GRUB": "false",
"BUILD_QEMUX86_64_BIOS_GRUB": "false",
"TEST_QEMUX86_64_BIOS_GRUB": "false",
"BUILD_QEMUX86_64_BIOS_GRUB_GPT": "false",
"TEST_QEMUX86_64_BIOS_GRUB_GPT": "false",
"BUILD_VEXPRESS_QEMU_UBOOT_UEFI_GRUB": "false",
"TEST_VEXPRESS_QEMU_UBOOT_UEFI_GRUB": "false",
"BUILD_VEXPRESS_QEMU": "false",
"TEST_VEXPRESS_QEMU": "false",
"BUILD_VEXPRESS_QEMU_FLASH": "false",
"TEST_VEXPRESS_QEMU_FLASH": "false",
"BUILD_BEAGLEBONEBLACK": "false",
"TEST_BEAGLEBONEBLACK": "false",
"BUILD_RASPBERRYPI3": "false",
"TEST_RASPBERRYPI3": "false",
"RUN_INTEGRATION_TESTS": "false",
},
}
for repo, version in repos.items():
document[job_key]["variables"][f"{repo}_REV"] = version
with open(args.filename, "w") as f:
yaml.dump(document, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--trigger", required=True)
parser.add_argument("--workspace", default=None)
parser.add_argument("--version", default="master")
parser.add_argument("--meta-mender-version", default="master")
parser.add_argument("--feature-branches", action="store_true")
parser.add_argument("--filename", default="gitlab-ci-client-qemu-publish-job.yml")
args = parser.parse_args()
if args.workspace:
generate(args.workspace, args)
else:
integration_repo = initWorkspace()
generate(integration_repo, args)
shutil.rmtree(integration_repo)
| 34.981013 | 96 | 0.590013 |
af49c4bc7d316d3bc651adc4ef18addecc347df6 | 11,042 | py | Python | courses/machine_learning/deepdive/10_recommend/labs/endtoend/airflow/plugins/gae_admin_plugin.py | pranaynanda/training-data-analyst | f10ab778589129239fd5b277cfdefb41638eded5 | [
"Apache-2.0"
] | null | null | null | courses/machine_learning/deepdive/10_recommend/labs/endtoend/airflow/plugins/gae_admin_plugin.py | pranaynanda/training-data-analyst | f10ab778589129239fd5b277cfdefb41638eded5 | [
"Apache-2.0"
] | null | null | null | courses/machine_learning/deepdive/10_recommend/labs/endtoend/airflow/plugins/gae_admin_plugin.py | pranaynanda/training-data-analyst | f10ab778589129239fd5b277cfdefb41638eded5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Airflow plugin for Google App Engine Admin functions."""
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
from apiclient.discovery import build
from datetime import datetime
from googleapiclient import errors
import logging
from oauth2client.client import GoogleCredentials
import time
class AppEngineAdminHook(GoogleCloudBaseHook):
"""Hook for App Engine Flex Admin."""
def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
super(AppEngineAdminHook, self).__init__(gcp_conn_id, delegate_to)
self._gaeadmin = self.get_ae_conn()
self._svcadmin = self.get_svc_conn()
def get_ae_conn(self):
"""Returns: a App Engine service object."""
credentials = GoogleCredentials.get_application_default()
return build('appengine', 'v1', credentials=credentials)
def get_svc_conn(self):
"""Returns: a Services Management service object."""
credentials = GoogleCredentials.get_application_default()
return build('servicemanagement', 'v1', credentials=credentials)
def create_version(self, project_id, service_id, version_spec):
"""Creates new service version on App Engine Engine.
Args:
project_id: project id
service_id: service id
version_spec: app version spec
Returns:
The operation if the version was created successfully and
raises an error otherwise.
"""
create_request = self._gaeadmin.apps().services().versions().create(
appsId=project_id, servicesId=service_id, body=version_spec)
response = create_request.execute()
op_name = response['name'].split('/')[-1]
return self._wait_for_operation_done(project_id, op_name)
def migrate_traffic(self, project_id, service_id, new_version):
"""Migrate AE traffic from current version to new version.
Args:
project_id: project id
service_id: service id
new_version: new version id
Returns:
the operation if the migration was successful and
raises an error otherwise.
"""
split_config = {'split': {'allocations': {new_version: '1'}}}
migrate_request = self._gaeadmin.apps().services().patch(
appsId=project_id,
servicesId=service_id,
updateMask='split',
body=split_config)
response = migrate_request.execute()
op_name = response['name'].split('/')[-1]
return self._wait_for_operation_done(project_id, op_name)
def get_endpoint_config(self, service_id):
"""Get latest endpoint config for an endpoint service.
Args:
service_id: service id
Returns:
the config version if successful and raises an error otherwise.
"""
resource = self._svcadmin.services().rollouts()
list_request = resource.list(serviceName=service_id)
response = list_request.execute()
config_id = response['rollouts'][0]['rolloutId']
return config_id
def get_version(self, project_id, service_id, version):
"""Get spec for a version of a service on App Engine Engine.
Args:
project_id: project id
service_id: service id
version: version id
Returns:
the version spec if successful and raises an error otherwise.
"""
resource = self._gaeadmin.apps().services().versions()
get_request = resource.get(appsId=project_id,
servicesId=service_id,
versionsId=version,
view='FULL')
response = get_request.execute()
return response
def get_version_identifiers(self, project_id, service_id):
"""Get list of versions of a service on App Engine Engine.
Args:
project_id: project id
service_id: service id
Returns:
the list of version identifiers if successful and raises an error otherwise.
"""
request = self._gaeadmin.apps().services().versions().list(
appsId=project_id, servicesId=service_id)
versions = []
while request is not None:
versions_doc = request.execute()
versions.extend([v['id'] for v in versions_doc['versions']])
request = self._gaeadmin.apps().services().versions().list_next(
request, versions_doc)
return versions
def _get_operation(self, project_id, op_name):
"""Gets an AppEngine operation based on the operation name.
Args:
project_id: project id
op_name: operation name
Returns:
AppEngine operation object if succeed.
Raises:
apiclient.errors.HttpError: if HTTP error is returned from server
"""
resource = self._gaeadmin.apps().operations()
request = resource.get(appsId=project_id, operationsId=op_name)
return request.execute()
def _wait_for_operation_done(self, project_id, op_name, interval=30):
"""Waits for the Operation to reach a terminal state.
This method will periodically check the job state until the operation reaches
a terminal state.
Args:
project_id: project id
op_name: operation name
interval: check interval in seconds
Returns:
AppEngine operation object if succeed.
Raises:
apiclient.errors.HttpError: if HTTP error is returned when getting
the operation
"""
assert interval > 0
while True:
operation = self._get_operation(project_id, op_name)
if 'done' in operation and operation['done']:
return operation
time.sleep(interval)
class AppEngineVersionOperator(BaseOperator):
"""Operator for creating a new AppEngine flex service version."""
@apply_defaults
def __init__(self,
project_id,
service_id,
region,
service_spec=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(AppEngineVersionOperator, self).__init__(*args, **kwargs)
self._project_id = project_id
self._service_id = service_id
self._region = region
self._service_spec = service_spec
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
if not self._project_id:
raise AirflowException('Cloud project id is required.')
if not self._service_id:
raise AirflowException('App Engine service name is required.')
if not self._region:
raise AirflowException('Compute Engine region is required.')
def execute(self, context):
hook = AppEngineAdminHook(gcp_conn_id=self._gcp_conn_id,
delegate_to=self._delegate_to)
# if version spec is not provided, use spec for latest version
if self._service_spec is None:
# get version spec for latest version, assuming version ids are sortable
version_list = hook.get_version_identifiers(
self._project_id, self._service_id)
latest_version = max(version_list)
version_spec = hook.get_version(self._project_id, self._service_id,
latest_version)
# get endpoints config id
endpoint_service = '{0}.appspot.com'.format(self._project_id)
config_id = hook.get_endpoint_config(endpoint_service)
# clean irrelevant params
version_spec.pop('name', None)
version_spec.pop('threadsafe', None)
version_spec.pop('servingStatus', None)
version_spec.pop('createTime', None)
version_spec.pop('createdBy', None)
version_spec.pop('runtimeApiVersion', None)
version_spec.pop('versionUrl', None)
version_spec.pop('betaSettings', None)
# fix docker container ref
container_ref = version_spec['deployment']['container']['image']
container_ref = container_ref.split('@sha')[0]
version_spec['deployment']['container']['image'] = container_ref
# add endpoint service params
version_spec.update({
'endpointsApiService': {
'name': '{0}.appspot.com'.format(self._project_id),
'configId': config_id
}
})
else:
version_spec = self._version_spec
if 'id' not in version_spec:
# generate version id and add to params
now = datetime.now()
version_spec['id'] = '{0}t{1}'.format(now.strftime('%Y%m%d'),
now.strftime('%H%M%S'))
# deploy new version
try:
finished_version_op = hook.create_version(self._project_id,
self._service_id,
version_spec)
except errors.HttpError:
raise
if 'error' in finished_version_op:
logging.error('AppEngine version deploy failed: %s',
str(finished_version_op))
raise RuntimeError(finished_version_op['error']['message'])
# migrate traffic to new version
new_version = version_spec['id']
try:
finished_migrate_op = hook.migrate_traffic(self._project_id,
self._service_id,
new_version)
except errors.HttpError:
raise
if 'error' in finished_migrate_op:
logging.error('AppEngine version migrate failed: %s',
str(finished_version_op))
raise RuntimeError(finished_migrate_op['error']['message'])
# Plugin class for GAEAdmin
class AppEngineAdminPlugin(AirflowPlugin):
name = 'app_engine_admin_plugin'
operators = [AppEngineVersionOperator]
hooks = [AppEngineAdminHook]
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
| 36.442244 | 84 | 0.628872 |
742575a3b31fd2a768cec60be82e2ca8d6257ab6 | 5,607 | py | Python | python/blockchain.py | selvendiranj/pyth-coin | 84ec07cf99e0e3adcbc2550cd4e04f495aa88b9b | [
"MIT"
] | null | null | null | python/blockchain.py | selvendiranj/pyth-coin | 84ec07cf99e0e3adcbc2550cd4e04f495aa88b9b | [
"MIT"
] | null | null | null | python/blockchain.py | selvendiranj/pyth-coin | 84ec07cf99e0e3adcbc2550cd4e04f495aa88b9b | [
"MIT"
] | null | null | null | """
blockchain
"""
import hashlib
import json
from time import time
from urllib.parse import urlparse
import requests
class Blockchain:
"""
blockchain core functions
"""
def __init__(self):
self.current_transactions = []
self.chain = []
self.nodes = set()
# Create the genesis block
self.create_block(previous_hash='1', proof=100)
def register_node(self, address):
"""
Add a new node to the list of nodes
:param address: Address of node. Eg. 'http://192.168.0.5:5000'
"""
parsed_url = urlparse(address)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
# Accepts an URL without scheme like '192.168.0.5:5000'.
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def is_chain_valid(self, chain):
"""
Determine if a given blockchain is valid
:param chain: A blockchain
:return: True if valid, False if not
"""
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
print(f'{last_block}')
print(f'{block}')
print("\n-----------\n")
# Check that the hash of the block is correct
if block['previous_hash'] != self.get_hash(last_block):
return False
# Check that the Proof of Work is correct
if not self.is_proof_valid(last_block['proof'], block['proof'],
last_block['previous_hash']):
return False
last_block = block
current_index += 1
return True
def resolve_conflicts(self):
"""
This is our consensus algorithm, it resolves conflicts
by replacing our chain with the longest one in the network.
:return: True if our chain was replaced, False if not
"""
neighbours = self.nodes
new_chain = None
# We're only looking for chains longer than ours
max_length = len(self.chain)
# Grab and verify the chains from all the nodes in our network
for node in neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Check if the length is longer and the chain is valid
if length > max_length and self.is_chain_valid(chain):
max_length = length
new_chain = chain
# Replace our chain if we discovered a new, valid chain longer than ours
if new_chain:
self.chain = new_chain
return True
return False
def create_block(self, proof, previous_hash):
"""
Create a new Block in the Blockchain
:param proof: The proof given by the Proof of Work algorithm
:param previous_hash: Hash of previous Block
:return: New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.get_hash(self.chain[-1]),
}
# Reset the current list of transactions
self.current_transactions = []
self.chain.append(block)
return block
def create_transaction(self, sender, recipient, amount):
"""
Creates a new transaction to go into the next mined Block
:param sender: Address of the Sender
:param recipient: Address of the Recipient
:param amount: Amount
:return: The index of the Block that will hold this transaction
"""
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.get_last_block['index'] + 1
@property
def get_last_block(self):
return self.chain[-1]
@staticmethod
def get_hash(block):
"""
Creates a SHA-256 hash of a Block
:param block: Block
"""
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def get_proof_of_work(self, last_block):
"""
Simple Proof of Work Algorithm:
- Find a number p' such that hash(pp') contains leading 4 zeroes
- Where p is the previous proof, and p' is the new proof
:param last_block: <dict> last Block
:return: <int>
"""
last_proof = last_block['proof']
last_hash = self.get_hash(last_block)
proof = 0
while self.is_proof_valid(last_proof, proof, last_hash) is False:
proof += 1
return proof
@staticmethod
def is_proof_valid(last_proof, proof, last_hash):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
| 28.461929 | 93 | 0.576779 |
a01db959b13d1e9d9fd2b4d0f05e2e6db0337d21 | 672 | py | Python | knx_stack/definition/knxnet_ip/__init__.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 2 | 2021-07-28T07:42:28.000Z | 2022-01-25T18:56:05.000Z | knx_stack/definition/knxnet_ip/__init__.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 6 | 2021-07-25T21:36:01.000Z | 2022-02-20T21:11:31.000Z | knx_stack/definition/knxnet_ip/__init__.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | null | null | null | from knx_stack.definition.knxnet_ip.definitions import (
Services,
ConnectionTypes,
ErrorCodes,
ConnectResponseStatusCode,
ConnectionstateResponseStatusCode,
KNXNETIP_VERSION_10,
HEADER_SIZE_10,
IPV4_UDP,
DISCOVERY_MULTICAST_ADDR,
DISCOVERY_MULTICAST_PORT,
CONNECT_REQUEST_TIMEOUT,
CONNECTION_ALIVE_TIME,
CONNECTIONSTATE_REQUEST_TIMEOUT,
DEVICE_CONFIGURATION_REQUEST_TIMEOUT,
TUNNELING_REQUEST_TIMEOUT,
)
from knx_stack.definition.knxnet_ip.msg import Msg
from knx_stack.definition.knxnet_ip.state import State
from knx_stack.definition.knxnet_ip import core
from knx_stack.definition.knxnet_ip import tunneling
| 30.545455 | 56 | 0.8125 |
8bd8956648011d2b4603fc8ed217c58c918bd151 | 3,005 | py | Python | airflow/providers/email/operators/email.py | mebelousov/airflow | d99833c9b5be9eafc0c7851343ee86b6c20aed40 | [
"Apache-2.0"
] | 3 | 2015-08-25T13:56:44.000Z | 2020-03-21T10:26:58.000Z | airflow/providers/email/operators/email.py | mebelousov/airflow | d99833c9b5be9eafc0c7851343ee86b6c20aed40 | [
"Apache-2.0"
] | 37 | 2020-07-21T07:50:02.000Z | 2022-03-29T22:31:28.000Z | airflow/providers/email/operators/email.py | mebelousov/airflow | d99833c9b5be9eafc0c7851343ee86b6c20aed40 | [
"Apache-2.0"
] | 4 | 2020-07-17T14:02:28.000Z | 2022-02-23T04:29:58.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, Optional, Union
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.email import send_email
class EmailOperator(BaseOperator):
"""
Sends an email.
:param to: list of emails to send the email to. (templated)
:type to: list or string (comma or semicolon delimited)
:param subject: subject line for the email. (templated)
:type subject: str
:param html_content: content of the email, html markup
is allowed. (templated)
:type html_content: str
:param files: file names to attach in email
:type files: list
:param cc: list of recipients to be added in CC field
:type cc: list or string (comma or semicolon delimited)
:param bcc: list of recipients to be added in BCC field
:type bcc: list or string (comma or semicolon delimited)
:param mime_subtype: MIME sub content type
:type mime_subtype: str
:param mime_charset: character set parameter added to the Content-Type
header.
:type mime_charset: str
"""
template_fields = ('to', 'subject', 'html_content')
template_ext = ('.html',)
ui_color = '#e6faf9'
@apply_defaults
def __init__(
self,
to: Union[List[str], str],
subject: str,
html_content: str,
files: Optional[List] = None,
cc: Optional[Union[List[str], str]] = None,
bcc: Optional[Union[List[str], str]] = None,
mime_subtype: str = 'mixed',
mime_charset: str = 'utf-8',
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.to = to # pylint: disable=invalid-name
self.subject = subject
self.html_content = html_content
self.files = files or []
self.cc = cc # pylint: disable=invalid-name
self.bcc = bcc
self.mime_subtype = mime_subtype
self.mime_charset = mime_charset
def execute(self, context):
send_email(self.to, self.subject, self.html_content,
files=self.files, cc=self.cc, bcc=self.bcc,
mime_subtype=self.mime_subtype, mime_charset=self.mime_charset)
| 38.037975 | 82 | 0.671547 |
3a7393201a08074aaae3247c3483abaea37d1f31 | 979 | py | Python | argparse/setup.py | psclafer/micropython-lib-1 | 52950a56a5f8e2d674ee668fc1dc2a319088ec26 | [
"PSF-2.0"
] | null | null | null | argparse/setup.py | psclafer/micropython-lib-1 | 52950a56a5f8e2d674ee668fc1dc2a319088ec26 | [
"PSF-2.0"
] | null | null | null | argparse/setup.py | psclafer/micropython-lib-1 | 52950a56a5f8e2d674ee668fc1dc2a319088ec26 | [
"PSF-2.0"
] | null | null | null | import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(name='micropython-argparse',
version='0.4',
description='argparse module for MicroPython',
long_description="This is a module reimplemented specifically for MicroPython standard library,\nwith efficient and lean design in mind. Note that this module is likely work\nin progress and likely supports just a subset of CPython's corresponding\nmodule. Please help with the development if you are interested in this\nmodule.",
url='https://github.com/pfalcon/micropython-lib',
author='Damien George',
author_email='micropython-lib@googlegroups.com',
maintainer='Paul Sokolovsky',
maintainer_email='micropython-lib@googlegroups.com',
license='MIT',
cmdclass={'sdist': sdist_upip.sdist},
py_modules=['argparse'])
| 46.619048 | 336 | 0.742594 |
560f56c8af89dfedcf33d3309438d485ddcd4f7f | 81 | py | Python | web/apps.py | oogou11/analysis | 1dd9b3869f1f63d699a4fc68c8090ae09a1e3162 | [
"MIT"
] | 270 | 2019-09-05T05:51:19.000Z | 2022-03-12T18:26:13.000Z | web/apps.py | oogou11/analysis | 1dd9b3869f1f63d699a4fc68c8090ae09a1e3162 | [
"MIT"
] | 295 | 2019-08-19T12:40:29.000Z | 2022-01-24T14:03:20.000Z | machines/web/apps.py | Guilehm/machines | 859b909cebfd6b72e3f4e0bb6e1a6f4b7afcd255 | [
"MIT"
] | 70 | 2019-03-03T22:21:45.000Z | 2022-02-25T12:55:23.000Z | from django.apps import AppConfig
class WebConfig(AppConfig):
name = 'web'
| 13.5 | 33 | 0.728395 |
7d64a076ba266fc5f00d1d18b7ca5a00638fc937 | 7,165 | py | Python | alloy2vec/processing/raw-data-processing/fun_read_process_abstract.py | peizong/alloy2vec | df3515f50a112d3b950a7a31eaeea8f51bdcb978 | [
"MIT"
] | null | null | null | alloy2vec/processing/raw-data-processing/fun_read_process_abstract.py | peizong/alloy2vec | df3515f50a112d3b950a7a31eaeea8f51bdcb978 | [
"MIT"
] | null | null | null | alloy2vec/processing/raw-data-processing/fun_read_process_abstract.py | peizong/alloy2vec | df3515f50a112d3b950a7a31eaeea8f51bdcb978 | [
"MIT"
] | null | null | null |
import json
from collections import OrderedDict
from gensim.parsing.preprocessing import remove_stopwords,strip_punctuation
import numpy as np
def remove_stopword_punctuations(text):
no_stopword = remove_stopwords(text)
filtered_sentence=strip_punctuation(no_stopword)
return filtered_sentence
def write(abstracts,filename):
with open(filename,'a+',encoding="utf-8") as fw:
for i in range(len(abstracts)):
#filtered_abstract=remove_stopword_punctuations(str(abstracts[i]))
#fw.write(str(filtered_abstract)+'\n')
fw.write(str(abstracts[i])+'\n') #,encoding="utf-8")
fw.close()
def extract_abstract_all(json_dir):
corpus=[]
corpus_unique_doi=[]
dois=[]
dois_unique=[]
with open(json_dir,encoding="utf-8") as f:
for jsonObj in f:
entry = json.loads(jsonObj)['entry']
abstract = dict(eval(entry))['description']
corpus.append(abstract)
#cited=dict(eval(entry))['citedby_count']
#print(cited)
#journal=dict(eval(entry))['publicationName']
#print(journal)
data = dict(eval(entry))['coverDate']
doi = dict(eval(entry))['doi']
dois.append(doi)
#print(abstract)
#print(data)
if doi not in dois_unique:
if str(abstract) != 'None':
# print(doi,dois)
dois_unique.append(doi)
corpus_unique_doi.append(abstract)
return corpus,corpus_unique_doi, dois, dois_unique
def extract_abstract_double_materials(json_dir,journals):
corpus=[]
corpus_unique_doi=[]
dois=[]
dois_unique=[]
with open(json_dir,encoding="utf-8") as f:
for jsonObj in f:
entry = json.loads(jsonObj)['entry']
abstract = dict(eval(entry))['description']
corpus.append(abstract)
#cited=dict(eval(entry))['citedby_count']
#print(cited)
journal=dict(eval(entry))['publicationName']
#print(journal)
data = dict(eval(entry))['coverDate']
doi = dict(eval(entry))['doi']
dois.append(doi)
#print(abstract)
#print(data)
if doi not in dois_unique:
if str(abstract) != 'None':
dois_unique.append(doi)
corpus_unique_doi.append(abstract)
if journal in journals:
dois_unique.append(doi)
corpus_unique_doi.append(abstract)
return corpus,corpus_unique_doi, dois, dois_unique
def init_list_of_objects(size):
list_of_objects = list()
for i in range(0,size):
list_of_objects.append( list() ) #different object reference each time
return list_of_objects
def extract_abstract_time(json_dir,years):
corpus=[]
dois=[]
dois_unique=[]
dois_unique_years=init_list_of_objects(len(years))
corpus_unique_doi_years=init_list_of_objects(len(years))
#corpus_unique_doi_years.append([np.zeros(1,len(years))])
#dois_unique_years.append([np.zeros(1,len(years))])
with open(json_dir,encoding="utf-8") as f:
print("f: ",f)
for jsonObj in f:
entry = json.loads(jsonObj)['entry']
abstract = dict(eval(entry))['description']
corpus.append(abstract)
#cited=dict(eval(entry))['citedby_count']
#print(cited)
data = dict(eval(entry))['coverDate']
year=data[0:4]
print("year ",year)
doi = dict(eval(entry))['doi']
dois.append(doi)
#print(abstract)
#print(data)
if doi not in dois_unique:
if str(abstract) != 'None':
dois_unique.append(doi)
# corpus_unique_doi.append(abstract)
if year in years:
index=years.index(year)
#print("index ",index,corpus_unique_doi_years)
corpus_unique_doi_years[index].append(abstract)
dois_unique_years[index].append(doi)
#corpus_unique_doi.append(abstract)
if int(year) < int(years[0]):
index=0
corpus_unique_doi_years[index].append(abstract)
dois_unique_years[index].append(doi)
#for index in range(len(years)):
# corpus_unique_doi_years[index].pop(0)
return corpus,dois, dois_unique_years,corpus_unique_doi_years
def extract_authors_keywords_citations(json_dir,years,keyword):
citations=[]
dois=[]
dois_unique=[]
dois_unique_years=init_list_of_objects(len(years))
citations_unique_doi_years=init_list_of_objects(len(years))
with open(json_dir,encoding="utf-8") as f:
for jsonObj in f:
entry = json.loads(jsonObj)['entry']
abstract = dict(eval(entry))['description']
#corpus.append(abstract)
cited=dict(eval(entry))['citedby_count']
#print(cited)
citations.append(cited)
data = dict(eval(entry))['coverDate']
year=data[0:4]
#print("year ",year)
doi = dict(eval(entry))['doi']
dois.append(doi)
title = dict(eval(entry))['title']
keywords= dict(eval(entry))['authkeywords']
#print("title, type(title): ", title, type(title), str(title))
if str(title) != 'None' and str(keywords) == 'None':
title = title.lower()
keywords = title.split()
keywords =[i.strip().lower() for i in keywords]
#print("title, type(title): ", keywords, type(keywords), str(keywords))
elif str(title) == 'None' and str(keywords) != 'None':
keywords=keywords.lower()
keywords=keywords.split('|')
keywords =[i.strip().lower() for i in keywords]
elif str(title) != 'None' and str(keywords) != 'None':
keywords=keywords.lower()
keywords=keywords.split('|')
title = title.lower()
keywords += title.split()
keywords =[i.strip().lower() for i in keywords]
else:
keywords = [' ']
#print(title.split(),keywords.split('|'))
#else: keywords = title.split()
#print(keywords)
if doi not in dois_unique:
if str(abstract) != 'None':
dois_unique.append(doi)
if year in years:
index=years.index(year)
#print("index ",index,corpus_unique_doi_years)
#print("check keyword ", keyword[0] == any(keywords), keyword[0], any(keywords))
keyword_logic=[x for x in keyword if x in keywords]
#print("keyword logic,keyword,keywords: ",keyword_logic,keyword,keywords)
if keyword_logic != []:
citations_unique_doi_years[index].append(cited)
dois_unique_years[index].append(doi)
if int(year) < int(years[0]):
index=0
if any(keyword) == any(keywords):
citations_unique_doi_years[index].append(cited)
dois_unique_years[index].append(doi)
return citations,dois, dois_unique_years,citations_unique_doi_years
if __name__=="__main__":
corpus_unique_doi, dois, dois_unique=extract_abstract_all(json_dir)
print("Total #dois: ",len(dois))
new = list(set(dois))
print("Unique #dois: ",len(new))
print("use set funtion")
print("Total #abstracts: ",len(corpus))
write(corpus,'all_abstracts_11_29.dat') #,encoding="utf-8")
new = list(set(corpus))
print("Unique #abstracts: ",len(new))
print("use dos screening for unique")
print("Unique #dois: ",len(dois_unique))
print("Unique #abstracts: ",len(corpus_unique_doi))
write(corpus_unique_doi,'all_abstracts_doi_unique.dat') #,encoding="utf-8")
| 37.317708 | 90 | 0.651779 |
7a280e144ed597c9361248edc50a24c0f1b30d36 | 2,228 | py | Python | relationshipTree/sqlite_Neo4j/importsqlite2neo4j.py | KellyShang/Python-Pilots | e6539bdc1419bcd8f76e8765b841d7bc633aec79 | [
"MIT"
] | null | null | null | relationshipTree/sqlite_Neo4j/importsqlite2neo4j.py | KellyShang/Python-Pilots | e6539bdc1419bcd8f76e8765b841d7bc633aec79 | [
"MIT"
] | null | null | null | relationshipTree/sqlite_Neo4j/importsqlite2neo4j.py | KellyShang/Python-Pilots | e6539bdc1419bcd8f76e8765b841d7bc633aec79 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Create By: Kelly Shang
# Create Date: Apr. 2017
# Purpose: import data from sqlite to Neo4j database
import operator
import os
import sys
import sqlite3
from py2neo import Graph
class ImportSQLite2Neo4j(object):
dbaddr = os.path.dirname(os.path.dirname(__file__)) + '\csv_sqlite'
def trans_dict(rows, col_name_list):
dict_all = []
for i in rows:
m = 0
dict_one = {} ## note location
for k in i:
dict_one[col_name_list[m]] = i[m]
m = m + 1
print('dict_one: ', dict_one)
dict_all.append(dict_one)
print('dict_all: ', dict_all)
sorted_dict = sorted(dict_all, key=operator.itemgetter('staff_id'))
return sorted_dict
# link to Sqlite
conn = sqlite3.connect(dbaddr + '\\callTree.db')
cur = conn.cursor()
# ----------------for user 表--------------
cur.execute('select * from staff')
# 返回一个list,list中的对象类型为tuple(元组)
rows_user = cur.fetchall()
cur.close()
print('rows staff: ',rows_user) # staff信息数组
user_col_name_list = [tuple[0] for tuple in cur.description] # ['staff_id', 'staff_name', 'phone', 'report_to']
print('user_col_name_list: ', user_col_name_list)
sorted_user_dict = trans_dict(rows_user, user_col_name_list)
print('sorted_user_dict after def: ', sorted_user_dict)
# link to Neo4j
link = "http://neo4j:neo4j@localhost:7474/db/data/"
gdb = Graph(link)
rec = gdb.run("MATCH(staff:Staff) return staff limit 1")
print('length1: ', len(rec.data()))
for line in sorted_user_dict:
query = "Merge (staff:Staff {staff_id: %s, staff_name: '%s', phone: '%s', report_to:'%s'})" % (
line['staff_id'], line['staff_name'], line['phone'], line['report_to'])
print('query: ', query)
insert = gdb.run(query)
query2 = """ MATCH (s1:Staff),(s2:Staff)
where s1.staff_name = s2.report_to
MERGE (s2) -[:Report_To]-> (s1)"""
insert2 = gdb.run(query2)
rec = gdb.run("MATCH(staff:Staff) return staff ")
print('length2: ', len(rec.data()))
if __name__ == '__main__':
ImportSQLite2Neo4j()
| 30.520548 | 116 | 0.598743 |
6a3d3c853c1ff3cff4db57b0a57726426fc11128 | 4,343 | py | Python | ibm_whcs_sdk/annotator_for_clinical_data/tests/common/test_attribute_values.py | Bhaskers-Blu-Org1/whcs-python-sdk | df71cf68471061a0975dda5121a2ea933c80fdfa | [
"Apache-2.0"
] | null | null | null | ibm_whcs_sdk/annotator_for_clinical_data/tests/common/test_attribute_values.py | Bhaskers-Blu-Org1/whcs-python-sdk | df71cf68471061a0975dda5121a2ea933c80fdfa | [
"Apache-2.0"
] | null | null | null | ibm_whcs_sdk/annotator_for_clinical_data/tests/common/test_attribute_values.py | Bhaskers-Blu-Org1/whcs-python-sdk | df71cf68471061a0975dda5121a2ea933c80fdfa | [
"Apache-2.0"
] | 1 | 2020-07-30T10:28:30.000Z | 2020-07-30T10:28:30.000Z | # coding: utf-8
# Copyright 2018 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibm_whcs_sdk.annotator_for_clinical_data.tests.common import test_concept_annotation as tc
from ibm_whcs_sdk.annotator_for_clinical_data.tests.common import test_disambiguation as td
from ibm_whcs_sdk.annotator_for_clinical_data.tests.common import test_insight_model as ti
class TestAttributeValueAnnotation(object):
@staticmethod
def test_attribute_values(annotation_list=None):
if annotation_list is not None:
for annotation in annotation_list:
if annotation.id is not None:
assert len(annotation.id) > 0
if annotation.type is not None:
assert len(annotation.type) > 0
assert annotation.begin is not None
assert annotation.end is not None
assert annotation.covered_text is not None
if annotation.uid is not None:
assert annotation.uid > 0
if annotation.preferred_name is not None:
assert len(annotation.preferred_name) > 0
if annotation.source is not None:
assert len(annotation.source) > 0
if annotation.source_version is not None:
assert len(annotation.source_version) > 0
if annotation.name is not None:
assert len(annotation.name) > 0
if annotation.icd9_code is not None:
assert len(annotation.icd9_code) > 0
if annotation.icd10_code is not None:
assert len(annotation.icd10_code) > 0
if annotation.nci_code is not None:
assert len(annotation.nci_code) > 0
if annotation.snomed_concept_id is not None:
assert len(annotation.snomed_concept_id) > 0
if annotation.mesh_id is not None:
assert len(annotation.mesh_id) > 0
if annotation.rx_norm_id is not None:
assert len(annotation.rx_norm_id) > 0
if annotation.loinc_id is not None:
assert len(annotation.loinc_id) > 0
if annotation.vocabs is not None:
assert len(annotation.vocabs) > 0
if annotation.section_normalized_name is not None:
assert len(annotation.section_normalized_name) > 0
if annotation.section_surface_form is not None:
assert len(annotation.section_surface_form) > 0
if annotation.cpt_code is not None:
assert len(annotation.cpt_code) > 0
if annotation.values is not None:
for value in annotation.values:
assert value is not None
if annotation.concept is not None:
tc.TestConceptAnnotation.test_concept_annotation(annotation.concept)
if annotation.disambiguation_data is not None:
td.TestDisambiguation.test_disambiguation_data(annotation.disambiguation_data)
if annotation.insight_model_data is not None:
ti.TestInsightModel.test_insight_model_data(annotation.insight_model_data)
if annotation.ccs_code is not None:
assert len(annotation.ccs_code) > 0
if annotation.hcc_code is not None:
assert len(annotation.hcc_code) > 0
if annotation.rule_id is not None:
assert len(annotation.rule_id) > 0
if annotation.derived_from is not None:
for entry in annotation.derived_from:
assert entry is not None
| 51.702381 | 98 | 0.616164 |
b51a02ba7bf96a5361aef5a08fcb2be98d4026e0 | 260 | py | Python | tests/artificial/transf_None/trend_PolyTrend/cycle_0/ar_/test_artificial_1024_None_PolyTrend_0__20.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_None/trend_PolyTrend/cycle_0/ar_/test_artificial_1024_None_PolyTrend_0__20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_None/trend_PolyTrend/cycle_0/ar_/test_artificial_1024_None_PolyTrend_0__20.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | 37.142857 | 160 | 0.726923 |
ebd2700c9cd697ba4cb796ca3b2a3e41115d078b | 24,100 | py | Python | frappe/commands/site.py | gwhitney/frappe | d0fbc17261be24d8347a49bcbbf9d13119e310f1 | [
"MIT"
] | null | null | null | frappe/commands/site.py | gwhitney/frappe | d0fbc17261be24d8347a49bcbbf9d13119e310f1 | [
"MIT"
] | null | null | null | frappe/commands/site.py | gwhitney/frappe | d0fbc17261be24d8347a49bcbbf9d13119e310f1 | [
"MIT"
] | null | null | null | # imports - standard imports
import os
import sys
import shutil
# imports - third party imports
import click
# imports - module imports
import frappe
from frappe.commands import get_site, pass_context
from frappe.exceptions import SiteNotSpecifiedError
@click.command('new-site')
@click.argument('site')
@click.option('--db-name', help='Database name')
@click.option('--db-password', help='Database password')
@click.option('--db-type', default='mariadb', type=click.Choice(['mariadb', 'postgres']), help='Optional "postgres" or "mariadb". Default is "mariadb"')
@click.option('--db-host', help='Database Host')
@click.option('--db-port', type=int, help='Database Port')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--no-mariadb-socket', is_flag=True, default=False, help='Set MariaDB host to % and use TCP/IP Socket instead of using the UNIX Socket')
@click.option('--admin-password', help='Administrator password for new site', default=None)
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False)
@click.option('--source_sql', help='Initiate database with a SQL file')
@click.option('--install-app', multiple=True, help='Install app after installation')
def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None,
verbose=False, install_apps=None, source_sql=None, force=None, no_mariadb_socket=False,
install_app=None, db_name=None, db_password=None, db_type=None, db_host=None, db_port=None):
"Create a new site"
from frappe.installer import _new_site
frappe.init(site=site, new_site=True)
_new_site(db_name, site, mariadb_root_username=mariadb_root_username,
mariadb_root_password=mariadb_root_password, admin_password=admin_password,
verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force,
no_mariadb_socket=no_mariadb_socket, db_password=db_password, db_type=db_type, db_host=db_host,
db_port=db_port, new_site=True)
if len(frappe.utils.get_sites()) == 1:
use(site)
@click.command('restore')
@click.argument('sql-file-path')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--db-name', help='Database name for site in case it is a new one')
@click.option('--admin-password', help='Administrator password for new site')
@click.option('--install-app', multiple=True, help='Install app after installation')
@click.option('--with-public-files', help='Restores the public files of the site, given path to its tar file')
@click.option('--with-private-files', help='Restores the private files of the site, given path to its tar file')
@click.option('--force', is_flag=True, default=False, help='Ignore the validations and downgrade warnings. This action is not recommended')
@pass_context
def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None, with_public_files=None, with_private_files=None):
"Restore site database from an sql file"
from frappe.installer import (
_new_site,
extract_sql_from_archive,
extract_files,
is_downgrade,
is_partial,
validate_database_sql
)
force = context.force or force
decompressed_file_name = extract_sql_from_archive(sql_file_path)
# check if partial backup
if is_partial(decompressed_file_name):
click.secho(
"Partial Backup file detected. You cannot use a partial file to restore a Frappe Site.",
fg="red"
)
click.secho(
"Use `bench partial-restore` to restore a partial backup to an existing site.",
fg="yellow"
)
sys.exit(1)
# check if valid SQL file
validate_database_sql(decompressed_file_name, _raise=not force)
site = get_site(context)
frappe.init(site=site)
# dont allow downgrading to older versions of frappe without force
if not force and is_downgrade(decompressed_file_name, verbose=True):
warn_message = (
"This is not recommended and may lead to unexpected behaviour. "
"Do you want to continue anyway?"
)
click.confirm(warn_message, abort=True)
_new_site(frappe.conf.db_name, site, mariadb_root_username=mariadb_root_username,
mariadb_root_password=mariadb_root_password, admin_password=admin_password,
verbose=context.verbose, install_apps=install_app, source_sql=decompressed_file_name,
force=True, db_type=frappe.conf.db_type)
# Extract public and/or private files to the restored site, if user has given the path
if with_public_files:
public = extract_files(site, with_public_files)
os.remove(public)
if with_private_files:
private = extract_files(site, with_private_files)
os.remove(private)
# Removing temporarily created file
if decompressed_file_name != sql_file_path:
os.remove(decompressed_file_name)
success_message = "Site {0} has been restored{1}".format(
site,
" with files" if (with_public_files or with_private_files) else ""
)
click.secho(success_message, fg="green")
@click.command('partial-restore')
@click.argument('sql-file-path')
@click.option("--verbose", "-v", is_flag=True)
@pass_context
def partial_restore(context, sql_file_path, verbose):
from frappe.installer import partial_restore
verbose = context.verbose or verbose
site = get_site(context)
frappe.init(site=site)
frappe.connect(site=site)
partial_restore(sql_file_path, verbose)
frappe.destroy()
@click.command('reinstall')
@click.option('--admin-password', help='Administrator Password for reinstalled site')
@click.option('--mariadb-root-username', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--yes', is_flag=True, default=False, help='Pass --yes to skip confirmation')
@pass_context
def reinstall(context, admin_password=None, mariadb_root_username=None, mariadb_root_password=None, yes=False):
"Reinstall site ie. wipe all data and start over"
site = get_site(context)
_reinstall(site, admin_password, mariadb_root_username, mariadb_root_password, yes, verbose=context.verbose)
def _reinstall(site, admin_password=None, mariadb_root_username=None, mariadb_root_password=None, yes=False, verbose=False):
from frappe.installer import _new_site
if not yes:
click.confirm('This will wipe your database. Are you sure you want to reinstall?', abort=True)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=verbose, force=True, reinstall=True, install_apps=installed,
mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password,
admin_password=admin_password)
@click.command('install-app')
@click.argument('apps', nargs=-1)
@pass_context
def install_app(context, apps):
"Install a new app to site, supports multiple apps"
from frappe.installer import install_app as _install_app
exit_code = 0
if not context.sites:
raise SiteNotSpecifiedError
for site in context.sites:
frappe.init(site=site)
frappe.connect()
for app in apps:
try:
_install_app(app, verbose=context.verbose)
except frappe.IncompatibleApp as err:
err_msg = ":\n{}".format(err) if str(err) else ""
print("App {} is Incompatible with Site {}{}".format(app, site, err_msg))
exit_code = 1
except Exception as err:
err_msg = ":\n{}".format(err if str(err) else frappe.get_traceback())
print("An error occurred while installing {}{}".format(app, err_msg))
exit_code = 1
frappe.destroy()
sys.exit(exit_code)
@click.command("list-apps")
@click.option("--format", "-f", type=click.Choice(["text", "json"]), default="text")
@pass_context
def list_apps(context, format):
"List apps in site"
summary_dict = {}
def fix_whitespaces(text):
if site == context.sites[-1]:
text = text.rstrip()
if len(context.sites) == 1:
text = text.lstrip()
return text
for site in context.sites:
frappe.init(site=site)
frappe.connect()
site_title = (
click.style(f"{site}", fg="green") if len(context.sites) > 1 else ""
)
apps = frappe.get_single("Installed Applications").installed_applications
if apps:
name_len, ver_len = [
max([len(x.get(y)) for x in apps])
for y in ["app_name", "app_version"]
]
template = "{{0:{0}}} {{1:{1}}} {{2}}".format(name_len, ver_len)
installed_applications = [
template.format(app.app_name, app.app_version, app.git_branch)
for app in apps
]
applications_summary = "\n".join(installed_applications)
summary = f"{site_title}\n{applications_summary}\n"
summary_dict[site] = [app.app_name for app in apps]
else:
installed_applications = frappe.get_installed_apps()
applications_summary = "\n".join(installed_applications)
summary = f"{site_title}\n{applications_summary}\n"
summary_dict[site] = installed_applications
summary = fix_whitespaces(summary)
if format == "text" and applications_summary and summary:
print(summary)
frappe.destroy()
if format == "json":
import json
click.echo(json.dumps(summary_dict))
@click.command('add-system-manager')
@click.argument('email')
@click.option('--first-name')
@click.option('--last-name')
@click.option('--password')
@click.option('--send-welcome-email', default=False, is_flag=True)
@pass_context
def add_system_manager(context, email, first_name, last_name, send_welcome_email, password):
"Add a new system manager to a site"
import frappe.utils.user
for site in context.sites:
frappe.connect(site=site)
try:
frappe.utils.user.add_system_manager(email, first_name, last_name,
send_welcome_email, password)
frappe.db.commit()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('disable-user')
@click.argument('email')
@pass_context
def disable_user(context, email):
site = get_site(context)
with frappe.init_site(site):
frappe.connect()
user = frappe.get_doc("User", email)
user.enabled = 0
user.save(ignore_permissions=True)
frappe.db.commit()
@click.command('migrate')
@click.option('--skip-failing', is_flag=True, help="Skip patches that fail to run")
@click.option('--skip-search-index', is_flag=True, help="Skip search indexing for web documents")
@pass_context
def migrate(context, skip_failing=False, skip_search_index=False):
"Run patches, sync schema and rebuild files/translations"
import re
from frappe.migrate import migrate
for site in context.sites:
print('Migrating', site)
frappe.init(site=site)
frappe.connect()
try:
migrate(
context.verbose,
skip_failing=skip_failing,
skip_search_index=skip_search_index
)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('migrate-to')
@click.argument('frappe_provider')
@pass_context
def migrate_to(context, frappe_provider):
"Migrates site to the specified provider"
from frappe.integrations.frappe_providers import migrate_to
for site in context.sites:
frappe.init(site=site)
frappe.connect()
migrate_to(site, frappe_provider)
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('run-patch')
@click.argument('module')
@click.option('--force', is_flag=True)
@pass_context
def run_patch(context, module, force):
"Run a particular patch"
import frappe.modules.patch_handler
for site in context.sites:
frappe.init(site=site)
try:
frappe.connect()
frappe.modules.patch_handler.run_single(module, force=force or context.force)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('reload-doc')
@click.argument('module')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def reload_doc(context, module, doctype, docname):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doc(module, doctype, docname, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('reload-doctype')
@click.argument('doctype')
@pass_context
def reload_doctype(context, doctype):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doctype(doctype, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('add-to-hosts')
@pass_context
def add_to_hosts(context):
"Add site to hosts"
for site in context.sites:
frappe.commands.popen('echo 127.0.0.1\t{0} | sudo tee -a /etc/hosts'.format(site))
if not context.sites:
raise SiteNotSpecifiedError
@click.command('use')
@click.argument('site')
def _use(site, sites_path='.'):
"Set a default site"
use(site, sites_path=sites_path)
def use(site, sites_path='.'):
if os.path.exists(os.path.join(sites_path, site)):
with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile:
sitefile.write(site)
print("Current Site set to {}".format(site))
else:
print("Site {} does not exist".format(site))
@click.command('backup')
@click.option('--with-files', default=False, is_flag=True, help="Take backup with files")
@click.option('--include', '--only', '-i', default="", type=str, help="Specify the DocTypes to backup seperated by commas")
@click.option('--exclude', '-e', default="", type=str, help="Specify the DocTypes to not backup seperated by commas")
@click.option('--backup-path', default=None, help="Set path for saving all the files in this operation")
@click.option('--backup-path-db', default=None, help="Set path for saving database file")
@click.option('--backup-path-files', default=None, help="Set path for saving public file")
@click.option('--backup-path-private-files', default=None, help="Set path for saving private file")
@click.option('--backup-path-conf', default=None, help="Set path for saving config file")
@click.option('--ignore-backup-conf', default=False, is_flag=True, help="Ignore excludes/includes set in config")
@click.option('--verbose', default=False, is_flag=True, help="Add verbosity")
@click.option('--compress', default=False, is_flag=True, help="Compress private and public files")
@pass_context
def backup(context, with_files=False, backup_path=None, backup_path_db=None, backup_path_files=None,
backup_path_private_files=None, backup_path_conf=None, ignore_backup_conf=False, verbose=False,
compress=False, include="", exclude=""):
"Backup"
from frappe.utils.backups import scheduled_backup
verbose = verbose or context.verbose
exit_code = 0
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
odb = scheduled_backup(
ignore_files=not with_files,
backup_path=backup_path,
backup_path_db=backup_path_db,
backup_path_files=backup_path_files,
backup_path_private_files=backup_path_private_files,
backup_path_conf=backup_path_conf,
ignore_conf=ignore_backup_conf,
include_doctypes=include,
exclude_doctypes=exclude,
compress=compress,
verbose=verbose,
force=True
)
except Exception:
click.secho("Backup failed for Site {0}. Database or site_config.json may be corrupted".format(site), fg="red")
if verbose:
print(frappe.get_traceback())
exit_code = 1
continue
odb.print_summary()
click.secho("Backup for Site {0} has been successfully completed{1}".format(site, " with files" if with_files else ""), fg="green")
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
sys.exit(exit_code)
@click.command('remove-from-installed-apps')
@click.argument('app')
@pass_context
def remove_from_installed_apps(context, app):
"Remove app from site's installed-apps list"
from frappe.installer import remove_from_installed_apps
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_from_installed_apps(app)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('uninstall-app')
@click.argument('app')
@click.option('--yes', '-y', help='To bypass confirmation prompt for uninstalling the app', is_flag=True, default=False, multiple=True)
@click.option('--dry-run', help='List all doctypes that will be deleted', is_flag=True, default=False)
@click.option('--no-backup', help='Do not backup the site', is_flag=True, default=False)
@click.option('--force', help='Force remove app from site', is_flag=True, default=False)
@pass_context
def uninstall(context, app, dry_run, yes, no_backup, force):
"Remove app and linked modules from site"
from frappe.installer import remove_app
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_app(app_name=app, dry_run=dry_run, yes=yes, no_backup=no_backup, force=force)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('drop-site')
@click.argument('site')
@click.option('--root-login', default='root')
@click.option('--root-password')
@click.option('--archived-sites-path')
@click.option('--no-backup', is_flag=True, default=False)
@click.option('--force', help='Force drop-site even if an error is encountered', is_flag=True, default=False)
def drop_site(site, root_login='root', root_password=None, archived_sites_path=None, force=False, no_backup=False):
_drop_site(site, root_login, root_password, archived_sites_path, force, no_backup)
def _drop_site(site, root_login='root', root_password=None, archived_sites_path=None, force=False, no_backup=False):
"Remove site from database and filesystem"
from frappe.database import drop_user_and_database
from frappe.utils.backups import scheduled_backup
frappe.init(site=site)
frappe.connect()
try:
if not no_backup:
scheduled_backup(ignore_files=False, force=True)
except Exception as err:
if force:
pass
else:
messages = [
"=" * 80,
"Error: The operation has stopped because backup of {0}'s database failed.".format(site),
"Reason: {0}\n".format(str(err)),
"Fix the issue and try again.",
"Hint: Use 'bench drop-site {0} --force' to force the removal of {0}".format(site)
]
click.echo("\n".join(messages))
sys.exit(1)
drop_user_and_database(frappe.conf.db_name, root_login, root_password)
if not archived_sites_path:
archived_sites_path = os.path.join(frappe.get_app_path('frappe'), '..', '..', '..', 'archived_sites')
if not os.path.exists(archived_sites_path):
os.mkdir(archived_sites_path)
move(archived_sites_path, site)
def move(dest_dir, site):
if not os.path.isdir(dest_dir):
raise Exception("destination is not a directory or does not exist")
frappe.init(site)
old_path = frappe.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
shutil.move(old_path, final_new_path)
frappe.destroy()
return final_new_path
@click.command('set-admin-password')
@click.argument('admin-password')
@click.option('--logout-all-sessions', help='Logout from all sessions', is_flag=True, default=False)
@pass_context
def set_admin_password(context, admin_password, logout_all_sessions=False):
"Set Administrator password for a site"
import getpass
from frappe.utils.password import update_password
for site in context.sites:
try:
frappe.init(site=site)
while not admin_password:
admin_password = getpass.getpass("Administrator's password for {0}: ".format(site))
frappe.connect()
update_password(user='Administrator', pwd=admin_password, logout_all_sessions=logout_all_sessions)
frappe.db.commit()
admin_password = None
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('set-last-active-for-user')
@click.option('--user', help="Setup last active date for user")
@pass_context
def set_last_active_for_user(context, user=None):
"Set users last active date to current datetime"
from frappe.core.doctype.user.user import get_system_users
from frappe.utils.user import set_last_active_to_now
site = get_site(context)
with frappe.init_site(site):
frappe.connect()
if not user:
user = get_system_users(limit=1)
if len(user) > 0:
user = user[0]
else:
return
set_last_active_to_now(user)
frappe.db.commit()
@click.command('publish-realtime')
@click.argument('event')
@click.option('--message')
@click.option('--room')
@click.option('--user')
@click.option('--doctype')
@click.option('--docname')
@click.option('--after-commit')
@pass_context
def publish_realtime(context, event, message, room, user, doctype, docname, after_commit):
"Publish realtime event from bench"
from frappe import publish_realtime
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
publish_realtime(event, message=message, room=room, user=user, doctype=doctype, docname=docname,
after_commit=after_commit)
frappe.db.commit()
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('browse')
@click.argument('site', required=False)
@pass_context
def browse(context, site):
'''Opens the site on web browser'''
import webbrowser
site = context.sites[0] if context.sites else site
if not site:
click.echo('''Please provide site name\n\nUsage:\n\tbench browse [site-name]\nor\n\tbench --site [site-name] browse''')
return
site = site.lower()
if site in frappe.utils.get_sites():
webbrowser.open(frappe.utils.get_site_url(site), new=2)
else:
click.echo("\nSite named \033[1m{}\033[0m doesn't exist\n".format(site))
@click.command('start-recording')
@pass_context
def start_recording(context):
import frappe.recorder
for site in context.sites:
frappe.init(site=site)
frappe.set_user("Administrator")
frappe.recorder.start()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('stop-recording')
@pass_context
def stop_recording(context):
import frappe.recorder
for site in context.sites:
frappe.init(site=site)
frappe.set_user("Administrator")
frappe.recorder.stop()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('ngrok')
@pass_context
def start_ngrok(context):
from pyngrok import ngrok
site = get_site(context)
frappe.init(site=site)
port = frappe.conf.http_port or frappe.conf.webserver_port
tunnel = ngrok.connect(addr=str(port), host_header=site)
print(f'Public URL: {tunnel.public_url}')
print('Inspect logs at http://localhost:4040')
ngrok_process = ngrok.get_ngrok_process()
try:
# Block until CTRL-C or some other terminating event
ngrok_process.proc.wait()
except KeyboardInterrupt:
print("Shutting down server...")
frappe.destroy()
ngrok.kill()
@click.command('build-search-index')
@pass_context
def build_search_index(context):
from frappe.search.website_search import build_index_for_all_routes
site = get_site(context)
if not site:
raise SiteNotSpecifiedError
print('Building search index for {}'.format(site))
frappe.init(site=site)
frappe.connect()
try:
build_index_for_all_routes()
finally:
frappe.destroy()
commands = [
add_system_manager,
backup,
drop_site,
install_app,
list_apps,
migrate,
migrate_to,
new_site,
reinstall,
reload_doc,
reload_doctype,
remove_from_installed_apps,
restore,
run_patch,
set_admin_password,
uninstall,
disable_user,
_use,
set_last_active_for_user,
publish_realtime,
browse,
start_recording,
stop_recording,
add_to_hosts,
start_ngrok,
build_search_index,
partial_restore
]
| 32.219251 | 220 | 0.748423 |
363c79a6c1025a673237492e8cb6a3b410a1ac2f | 2,388 | py | Python | tests/e2e/process_mira/tests.py | actris-cloudnet/data-processing | 8ab6fccd5cf48e10e985addcf339b9698a9b09cd | [
"MIT"
] | null | null | null | tests/e2e/process_mira/tests.py | actris-cloudnet/data-processing | 8ab6fccd5cf48e10e985addcf339b9698a9b09cd | [
"MIT"
] | 5 | 2020-08-27T12:34:08.000Z | 2021-09-28T14:49:20.000Z | tests/e2e/process_mira/tests.py | actris-cloudnet/data-processing | 8ab6fccd5cf48e10e985addcf339b9698a9b09cd | [
"MIT"
] | null | null | null | import netCDF4
from os import path
from data_processing import utils
import pytest
from test_utils.utils import count_strings, read_log_file
SCRIPT_PATH = path.dirname(path.realpath(__file__))
class TestMIRAProcessing:
product = 'radar'
instrument = 'mira'
n_img = 4
@pytest.fixture(autouse=True)
def _fetch_params(self, params):
self.full_path = params['full_path']
def test_that_does_not_call_pid_api(self):
f = open(f'{SCRIPT_PATH}/pid.log')
data = f.readlines()
assert len(data) == 0
def test_attributes(self):
nc = netCDF4.Dataset(self.full_path)
assert nc.year == '2021'
assert nc.month == '01'
assert nc.day == '27'
assert nc.cloudnet_file_type == self.product
assert nc.Conventions == 'CF-1.8'
assert nc.source == 'METEK MIRA-35'
assert nc.references == 'https://doi.org/10.21105/joss.02123'
assert hasattr(nc, 'pid') is False
nc.close()
def test_data_values(self):
nc = netCDF4.Dataset(self.full_path)
assert (nc.variables['latitude'][:] - 50.906) < 0.01
assert (nc.variables['longitude'][:] - 6.407) < 0.01
assert (nc.variables['altitude'][:] - 108) < 0.01
nc.close()
def test_that_calls_metadata_api(self):
data = read_log_file(SCRIPT_PATH)
n_raw_files = 2
n_gets = 4 # instrument checks (2) + product check (1) + mira raw (1)
n_puts = 2 + self.n_img
n_posts = n_raw_files
assert len(data) == n_gets + n_puts + n_posts
prefix = '?dateFrom=2021-01-27&dateTo=2021-01-27&site=juelich&developer=True&'
# Check product status
assert f'"GET /api/files{prefix}product=radar&showLegacy=True HTTP/1.1" 200 -' in data[0]
# Two instrument API calls...
# GET MIRA raw data
assert f'"GET /upload-metadata{prefix}instrument=mira&status%5B%5D=uploaded&status%5B%5D=processed HTTP/1.1" 200 -' in data[3]
# PUT file
assert '"PUT /files/20210127_juelich_mira.nc HTTP/1.1" 201 -' in data[4]
# PUT images
img_put = '"PUT /visualizations/20210127_juelich_mira-'
assert count_strings(data, img_put) == self.n_img
# POST metadata
file_put = '"POST /upload-metadata HTTP/1.1" 200 -'
assert count_strings(data, file_put) == n_raw_files
| 32.712329 | 134 | 0.629397 |
a87eb9fb536c63190d4a7bb78ed5f178d154ae34 | 568 | py | Python | packit/constants.py | jscotka/packit | 3e86d3047b36e196374ad95796e7417f4a9a788d | [
"MIT"
] | null | null | null | packit/constants.py | jscotka/packit | 3e86d3047b36e196374ad95796e7417f4a9a788d | [
"MIT"
] | null | null | null | packit/constants.py | jscotka/packit | 3e86d3047b36e196374ad95796e7417f4a9a788d | [
"MIT"
] | null | null | null | DG_PR_COMMENT_KEY_SG_PR = "Source-git pull request ID"
DG_PR_COMMENT_KEY_SG_COMMIT = "Source-git commit"
CONFIG_FILE_NAMES = [
".packit.yaml",
".packit.yml",
".packit.json",
"packit.yaml",
"packit.yml",
"packit.json",
]
# fedmsg topics
URM_NEW_RELEASE_TOPIC = "org.release-monitoring.prod.anitya.project.version.update"
# example:
# https://apps.fedoraproject.org/datagrepper/id?id=2019-a5034b55-339d-4fa5-a72b-db74579aeb5a
GH2FED_RELEASE_TOPIC = "org.fedoraproject.prod.github.release"
DEFAULT_BODHI_NOTE = "New upstream release: {version}"
| 29.894737 | 92 | 0.746479 |
8bb5d673d39e74c66d736fd20e2b047d60e3d765 | 827 | py | Python | tests/test_spyd/test_authentication/test_services/test_vanilla/test_punitive_model_adapter.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | 4 | 2015-05-05T16:44:42.000Z | 2020-10-27T09:45:23.000Z | tests/test_spyd/test_authentication/test_services/test_vanilla/test_punitive_model_adapter.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | null | null | null | tests/test_spyd/test_authentication/test_services/test_vanilla/test_punitive_model_adapter.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | 2 | 2016-12-13T22:21:08.000Z | 2020-03-14T16:44:20.000Z | import unittest
from mock import Mock
from spyd.authentication.services.vanilla import punitive_model_adapter
from spyd.authentication.services.vanilla.punitive_model_adapter import PunitiveModelAdapter
class TestPunitiveModelAdapter(unittest.TestCase):
def setUp(self):
self.effect_info = Mock()
punitive_model_adapter.EffectInfo = Mock(return_value=self.effect_info)
self.punitive_model = Mock()
self.pma = PunitiveModelAdapter(self.punitive_model)
def test_add_ban(self):
effect_desc = "127.0.0.1"
self.pma.add_ban(effect_desc)
self.punitive_model.add_effect.assert_called_once_with('ban', effect_desc, self.effect_info)
def test_clear_bans(self):
self.pma.clear_bans()
self.punitive_model.clear_effects.assert_called_once_with('ban')
| 35.956522 | 100 | 0.755744 |
c78a923d6f9ae611e27edd3f0d5446c5333aa5a4 | 862 | py | Python | setup.py | Rishav1/Bootstrapped-DQN | fa354d2966d4908eac2d22041178eec351f33710 | [
"MIT"
] | 66 | 2017-09-27T21:40:56.000Z | 2022-02-22T13:58:41.000Z | setup.py | Rishav1/Bootstrapped-DQN | fa354d2966d4908eac2d22041178eec351f33710 | [
"MIT"
] | 4 | 2017-09-27T19:29:26.000Z | 2021-02-22T10:01:33.000Z | setup.py | Rishav1/Bootstrapped-DQN | fa354d2966d4908eac2d22041178eec351f33710 | [
"MIT"
] | 14 | 2017-09-27T22:13:16.000Z | 2021-07-12T10:01:58.000Z | from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print("This Python is only compatible with Python 3, but you are running "
"Python {}. The installation will likely fail.".format(sys.version_info.major))
setup(name='baselines',
packages=[package for package in find_packages()
if package.startswith('baselines')],
install_requires=[
'gym>=0.9.1',
'scipy',
'tqdm',
'joblib',
'zmq',
'dill',
'tensorflow >= 1.0.0',
'azure==1.0.3',
'progressbar2',
],
description="OpenAI baselines: high quality implementations of reinforcement learning algorithms",
author="OpenAI",
url='https://github.com/openai/baselines',
author_email="gym@openai.com",
version="0.1.3")
| 31.925926 | 104 | 0.597448 |
48e5fe76f77a2fb9ff4f720bb5459b2dfc0c9314 | 1,823 | py | Python | bitex/api/REST/cryptopia.py | ligggooo/quant2018 | adbf68da414f422157dff8b744df214fc6631342 | [
"MIT"
] | 312 | 2018-01-06T13:51:48.000Z | 2022-03-01T21:14:21.000Z | bitex/api/REST/cryptopia.py | ligggooo/quant2018 | adbf68da414f422157dff8b744df214fc6631342 | [
"MIT"
] | 111 | 2016-06-14T18:44:12.000Z | 2018-01-06T00:58:31.000Z | bitex/api/REST/cryptopia.py | ligggooo/quant2018 | adbf68da414f422157dff8b744df214fc6631342 | [
"MIT"
] | 98 | 2018-01-06T15:24:36.000Z | 2022-01-13T03:00:05.000Z | """
Contains all API Client sub-classes, which store exchange specific details
and feature the respective exchanges authentication method (sign()).
"""
# Import Built-ins
import logging
import json
import hashlib
import hmac
import base64
import urllib
import urllib.parse
# Import Homebrew
from bitex.api.REST.api import APIClient
log = logging.getLogger(__name__)
class CryptopiaREST(APIClient):
def __init__(self, key=None, secret=None, api_version=None,
url='https://www.cryptopia.co.nz/api', timeout=5):
super(CryptopiaREST, self).__init__(url, api_version=api_version, key=key,
secret=secret, timeout=timeout)
def sign(self, uri, endpoint, endpoint_path, method_verb, *args, **kwargs):
nonce = self.nonce()
try:
params = kwargs['params']
except KeyError:
params = {}
post_data = json.dumps(params)
# generate signature
md5 = hashlib.md5()
md5.update(post_data.encode('utf-8'))
request_content_b64_string = base64.b64encode(md5.digest()).decode('utf-8')
signature = (self.key + 'POST' +
urllib.parse.quote_plus(uri).lower() +
nonce + request_content_b64_string)
hmac_sig = base64.b64encode(hmac.new(base64.b64decode(self.secret),
signature.encode('utf-8'),
hashlib.sha256).digest())
header_data = 'amx ' + self.key + ':' + hmac_sig.decode('utf-8') + ':' + nonce
# Update req_kwargs keys
headers = {'Authorization': header_data,
'Content-Type': 'application/json; charset=utf-8'}
return uri, {'headers': headers, 'data': post_data}
| 33.145455 | 86 | 0.595173 |
fa33fe9bb2ddbf5c7ccb7b8a48f35767dfd07c54 | 16,427 | py | Python | test/test_tag_matcher.py | DisruptiveLabs/behave | 04ef02550bdf90fad4e073fe39d1730ee2152d31 | [
"BSD-2-Clause"
] | null | null | null | test/test_tag_matcher.py | DisruptiveLabs/behave | 04ef02550bdf90fad4e073fe39d1730ee2152d31 | [
"BSD-2-Clause"
] | null | null | null | test/test_tag_matcher.py | DisruptiveLabs/behave | 04ef02550bdf90fad4e073fe39d1730ee2152d31 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from behave.tag_matcher import *
from mock import Mock
from unittest import TestCase
class TestOnlyWithCategoryTagMatcher(TestCase):
TagMatcher = OnlyWithCategoryTagMatcher
def setUp(self):
category = "xxx"
self.tag_matcher = OnlyWithCategoryTagMatcher(category, "alice")
self.active_tag = self.TagMatcher.make_category_tag(category, "alice")
self.similar_tag = self.TagMatcher.make_category_tag(category, "alice2")
self.other_tag = self.TagMatcher.make_category_tag(category, "other")
self.category = category
def test_should_exclude_with__returns_false_with_active_tag(self):
tags = [ self.active_tag ]
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags))
def test_should_exclude_with__returns_false_with_active_tag_and_more(self):
test_patterns = [
([ self.active_tag, self.other_tag ], "CASE: first"),
([ self.other_tag, self.active_tag ], "CASE: last"),
([ "foo", self.active_tag, self.other_tag, "bar" ], "CASE: middle"),
]
for tags, case in test_patterns:
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_exclude_with__returns_true_with_other_tag(self):
tags = [ self.other_tag ]
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags))
def test_should_exclude_with__returns_true_with_other_tag_and_more(self):
test_patterns = [
([ self.other_tag, "foo" ], "CASE: first"),
([ "foo", self.other_tag ], "CASE: last"),
([ "foo", self.other_tag, "bar" ], "CASE: middle"),
]
for tags, case in test_patterns:
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_exclude_with__returns_true_with_similar_tag(self):
tags = [ self.similar_tag ]
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags))
def test_should_exclude_with__returns_true_with_similar_and_more(self):
test_patterns = [
([ self.similar_tag, "foo" ], "CASE: first"),
([ "foo", self.similar_tag ], "CASE: last"),
([ "foo", self.similar_tag, "bar" ], "CASE: middle"),
]
for tags, case in test_patterns:
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_exclude_with__returns_false_without_category_tag(self):
test_patterns = [
([ ], "CASE: No tags"),
([ "foo" ], "CASE: One tag"),
([ "foo", "bar" ], "CASE: Two tags"),
]
for tags, case in test_patterns:
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_run_with__negates_result_of_should_exclude_with(self):
test_patterns = [
([ ], "CASE: No tags"),
([ "foo" ], "CASE: One non-category tag"),
([ "foo", "bar" ], "CASE: Two non-category tags"),
([ self.active_tag ], "CASE: active tag"),
([ self.active_tag, self.other_tag ], "CASE: active and other tag"),
([ self.active_tag, "foo" ], "CASE: active and foo tag"),
([ self.other_tag ], "CASE: other tag"),
([ self.other_tag, "foo" ], "CASE: other and foo tag"),
([ self.similar_tag ], "CASE: similar tag"),
([ "foo", self.similar_tag ], "CASE: foo and similar tag"),
]
for tags, case in test_patterns:
result1 = self.tag_matcher.should_run_with(tags)
result2 = self.tag_matcher.should_exclude_with(tags)
self.assertEqual(result1, not result2, "%s: tags=%s" % (case, tags))
self.assertEqual(not result1, result2, "%s: tags=%s" % (case, tags))
def test_make_category_tag__returns_category_tag_prefix_without_value(self):
category = "xxx"
tag1 = OnlyWithCategoryTagMatcher.make_category_tag(category)
tag2 = OnlyWithCategoryTagMatcher.make_category_tag(category, None)
tag3 = OnlyWithCategoryTagMatcher.make_category_tag(category, value=None)
self.assertEqual("only.with_xxx=", tag1)
self.assertEqual("only.with_xxx=", tag2)
self.assertEqual("only.with_xxx=", tag3)
self.assertTrue(tag1.startswith(OnlyWithCategoryTagMatcher.tag_prefix))
def test_make_category_tag__returns_category_tag_with_value(self):
category = "xxx"
tag1 = OnlyWithCategoryTagMatcher.make_category_tag(category, "alice")
tag2 = OnlyWithCategoryTagMatcher.make_category_tag(category, "bob")
self.assertEqual("only.with_xxx=alice", tag1)
self.assertEqual("only.with_xxx=bob", tag2)
def test_make_category_tag__returns_category_tag_with_tag_prefix(self):
my_tag_prefix = "ONLY_WITH."
category = "xxx"
TagMatcher = OnlyWithCategoryTagMatcher
tag0 = TagMatcher.make_category_tag(category, tag_prefix=my_tag_prefix)
tag1 = TagMatcher.make_category_tag(category, "alice", my_tag_prefix)
tag2 = TagMatcher.make_category_tag(category, "bob", tag_prefix=my_tag_prefix)
self.assertEqual("ONLY_WITH.xxx=", tag0)
self.assertEqual("ONLY_WITH.xxx=alice", tag1)
self.assertEqual("ONLY_WITH.xxx=bob", tag2)
self.assertTrue(tag1.startswith(my_tag_prefix))
def test_ctor__with_tag_prefix(self):
tag_prefix = "ONLY_WITH."
tag_matcher = OnlyWithCategoryTagMatcher("xxx", "alice", tag_prefix)
tags = ["foo", "ONLY_WITH.xxx=foo", "only.with_xxx=bar", "bar"]
actual_tags = tag_matcher.select_category_tags(tags)
self.assertEqual(["ONLY_WITH.xxx=foo"], actual_tags)
class TestOnlyWithAnyCategoryTagMatcher(TestCase):
TagMatcher = OnlyWithAnyCategoryTagMatcher
def setUp(self):
category_value_provider = {
"foo": "alice",
"bar": "BOB",
}
TagMatcher = OnlyWithCategoryTagMatcher
self.tag_matcher = OnlyWithAnyCategoryTagMatcher(category_value_provider)
self.category1_active_tag = TagMatcher.make_category_tag("foo", "alice")
self.category1_similar_tag = TagMatcher.make_category_tag("foo", "alice2")
self.category1_other_tag = TagMatcher.make_category_tag("foo", "bob")
self.category2_active_tag = TagMatcher.make_category_tag("bar", "BOB")
self.category2_similar_tag = TagMatcher.make_category_tag("bar", "BOB2")
self.category2_other_tag = TagMatcher.make_category_tag("bar", "CHARLY")
self.unknown_category_tag = TagMatcher.make_category_tag("UNKNOWN", "one")
def test_should_exclude_with__returns_false_with_active_tag(self):
tags1 = [ self.category1_active_tag ]
tags2 = [ self.category2_active_tag ]
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags1))
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags2))
def test_should_exclude_with__returns_false_with_active_tag_and_more(self):
test_patterns = [
([ self.category1_active_tag, self.category1_other_tag ], "CASE: first"),
([ self.category1_other_tag, self.category1_active_tag ], "CASE: last"),
([ "foo", self.category1_active_tag, self.category1_other_tag, "bar" ], "CASE: middle"),
]
for tags, case in test_patterns:
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_exclude_with__returns_true_with_other_tag(self):
tags = [ self.category1_other_tag ]
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags))
def test_should_exclude_with__returns_true_with_other_tag_and_more(self):
test_patterns = [
([ self.category1_other_tag, "foo" ], "CASE: first"),
([ "foo", self.category1_other_tag ], "CASE: last"),
([ "foo", self.category1_other_tag, "bar" ], "CASE: middle"),
]
for tags, case in test_patterns:
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_exclude_with__returns_true_with_similar_tag(self):
tags = [ self.category1_similar_tag ]
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags))
def test_should_exclude_with__returns_true_with_similar_and_more(self):
test_patterns = [
([ self.category1_similar_tag, "foo" ], "CASE: first"),
([ "foo", self.category1_similar_tag ], "CASE: last"),
([ "foo", self.category1_similar_tag, "bar" ], "CASE: middle"),
]
for tags, case in test_patterns:
self.assertEqual(True, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_exclude_with__returns_false_without_category_tag(self):
test_patterns = [
([ ], "CASE: No tags"),
([ "foo" ], "CASE: One tag"),
([ "foo", "bar" ], "CASE: Two tags"),
]
for tags, case in test_patterns:
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags),
"%s: tags=%s" % (case, tags))
def test_should_exclude_with__returns_false_with_unknown_category_tag(self):
"""Tags from unknown categories, not supported by category_value_provider,
should not be excluded.
"""
tags = [ self.unknown_category_tag ]
self.assertEqual("only.with_UNKNOWN=one", self.unknown_category_tag)
self.assertEqual(None, self.tag_matcher.category_value_provider.get("UNKNOWN"))
self.assertEqual(False, self.tag_matcher.should_exclude_with(tags))
def test_should_exclude_with__combinations_of_2_categories(self):
test_patterns = [
("CASE 00: 2 inactive category tags", True,
[ self.category1_other_tag, self.category2_other_tag]),
("CASE 01: inactive and active category tags", True,
[ self.category1_other_tag, self.category2_active_tag]),
("CASE 10: active and inactive category tags", True,
[ self.category1_active_tag, self.category2_other_tag]),
("CASE 11: 2 active category tags", False, # -- SHOULD-RUN
[ self.category1_active_tag, self.category2_active_tag]),
# -- SPECIAL CASE: With unknown category
("CASE 0x: inactive and unknown category tags", True,
[ self.category1_other_tag, self.unknown_category_tag]),
("CASE 1x: active and unknown category tags", False, # SHOULD-RUN
[ self.category1_active_tag, self.unknown_category_tag]),
]
for case, expected, tags in test_patterns:
actual_result = self.tag_matcher.should_exclude_with(tags)
self.assertEqual(expected, actual_result,
"%s: tags=%s" % (case, tags))
def test_should_run_with__negates_result_of_should_exclude_with(self):
test_patterns = [
([ ], "CASE: No tags"),
([ "foo" ], "CASE: One non-category tag"),
([ "foo", "bar" ], "CASE: Two non-category tags"),
([ self.category1_active_tag ], "CASE: active tag"),
([ self.category1_active_tag, self.category1_other_tag ], "CASE: active and other tag"),
([ self.category1_active_tag, "foo" ], "CASE: active and foo tag"),
([ self.category1_other_tag ], "CASE: other tag"),
([ self.category1_other_tag, "foo" ], "CASE: other and foo tag"),
([ self.category1_similar_tag ], "CASE: similar tag"),
([ "foo", self.category1_similar_tag ], "CASE: foo and similar tag"),
]
for tags, case in test_patterns:
result1 = self.tag_matcher.should_run_with(tags)
result2 = self.tag_matcher.should_exclude_with(tags)
self.assertEqual(result1, not result2, "%s: tags=%s" % (case, tags))
self.assertEqual(not result1, result2, "%s: tags=%s" % (case, tags))
class TestPredicateTagMatcher(TestCase):
def test_exclude_with__mechanics(self):
predicate_function_blueprint = lambda tags: False
predicate_function = Mock(predicate_function_blueprint)
predicate_function.return_value = True
tag_matcher = PredicateTagMatcher(predicate_function)
tags = [ "foo", "bar" ]
self.assertEqual(True, tag_matcher.should_exclude_with(tags))
predicate_function.assert_called_once_with(tags)
self.assertEqual(True, predicate_function(tags))
def test_should_exclude_with__returns_true_when_predicate_is_true(self):
predicate_always_true = lambda tags: True
tag_matcher1 = PredicateTagMatcher(predicate_always_true)
tags = [ "foo", "bar" ]
self.assertEqual(True, tag_matcher1.should_exclude_with(tags))
self.assertEqual(True, predicate_always_true(tags))
def test_should_exclude_with__returns_true_when_predicate_is_true2(self):
# -- CASE: Use predicate function instead of lambda.
def predicate_contains_foo(tags):
return any(x == "foo" for x in tags)
tag_matcher2 = PredicateTagMatcher(predicate_contains_foo)
tags = [ "foo", "bar" ]
self.assertEqual(True, tag_matcher2.should_exclude_with(tags))
self.assertEqual(True, predicate_contains_foo(tags))
def test_should_exclude_with__returns_false_when_predicate_is_false(self):
predicate_always_false = lambda tags: False
tag_matcher1 = PredicateTagMatcher(predicate_always_false)
tags = [ "foo", "bar" ]
self.assertEqual(False, tag_matcher1.should_exclude_with(tags))
self.assertEqual(False, predicate_always_false(tags))
class TestCompositeTagMatcher(TestCase):
@staticmethod
def count_tag_matcher_with_result(tag_matchers, tags, result_value):
count = 0
for tag_matcher in tag_matchers:
current_result = tag_matcher.should_exclude_with(tags)
if current_result == result_value:
count += 1
return count
def setUp(self):
predicate_false = lambda tags: False
predicate_contains_foo = lambda tags: any(x == "foo" for x in tags)
self.tag_matcher_false = PredicateTagMatcher(predicate_false)
self.tag_matcher_foo = PredicateTagMatcher(predicate_contains_foo)
tag_matchers = [
self.tag_matcher_foo,
self.tag_matcher_false
]
self.ctag_matcher = CompositeTagMatcher(tag_matchers)
def test_should_exclude_with__returns_true_when_any_tag_matcher_returns_true(self):
test_patterns = [
("CASE: with foo", ["foo", "bar"]),
("CASE: with foo2", ["foozy", "foo", "bar"]),
]
for case, tags in test_patterns:
actual_result = self.ctag_matcher.should_exclude_with(tags)
self.assertEqual(True, actual_result,
"%s: tags=%s" % (case, tags))
actual_true_count = self.count_tag_matcher_with_result(
self.ctag_matcher.tag_matchers, tags, True)
self.assertEqual(1, actual_true_count)
def test_should_exclude_with__returns_false_when_no_tag_matcher_return_true(self):
test_patterns = [
("CASE: without foo", ["fool", "bar"]),
("CASE: without foo2", ["foozy", "bar"]),
]
for case, tags in test_patterns:
actual_result = self.ctag_matcher.should_exclude_with(tags)
self.assertEqual(False, actual_result,
"%s: tags=%s" % (case, tags))
actual_true_count = self.count_tag_matcher_with_result(
self.ctag_matcher.tag_matchers, tags, True)
self.assertEqual(0, actual_true_count)
| 49.182635 | 101 | 0.638644 |
3277076cb56c2421806f01507eb9e278de8d29d5 | 722 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/EXT/polygon_offset_clamp.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/EXT/polygon_offset_clamp.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/EXT/polygon_offset_clamp.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_EXT_polygon_offset_clamp'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_EXT_polygon_offset_clamp',error_checker=_errors._error_checker)
GL_POLYGON_OFFSET_CLAMP_EXT=_C('GL_POLYGON_OFFSET_CLAMP_EXT',0x8E1B)
@_f
@_p.types(None,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glPolygonOffsetClampEXT(factor,units,clamp):pass
| 40.111111 | 128 | 0.797784 |
c1e6761d0d9f5cd5f528aab0415b2c3d1afe9475 | 1,760 | py | Python | onnxmltools/convert/coreml/shape_calculators/OneHotEncoder.py | szha/onnxmltools | b04d05bda625cbc006955ce0a220277739a95825 | [
"MIT"
] | 3 | 2019-02-27T21:03:43.000Z | 2020-04-07T22:16:50.000Z | onnxmltools/convert/coreml/shape_calculators/OneHotEncoder.py | szha/onnxmltools | b04d05bda625cbc006955ce0a220277739a95825 | [
"MIT"
] | null | null | null | onnxmltools/convert/coreml/shape_calculators/OneHotEncoder.py | szha/onnxmltools | b04d05bda625cbc006955ce0a220277739a95825 | [
"MIT"
] | 2 | 2020-10-01T09:24:55.000Z | 2021-04-17T13:57:31.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ...common._registration import register_shape_calculator
from ...common.data_types import FloatTensorType, StringTensorType
from ...common.utils import check_input_and_output_numbers
def calculate_one_hot_encoder_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, 1] ---> [N, C']
C' is the total number of categorical values.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
if operator.inputs[0].type.shape[1] != 1 or len(operator.inputs[0].type.shape) > 2:
raise RuntimeError('Input must be [N, 1]-tensor')
int_categories = operator.raw_operator.oneHotEncoder.int64Categories.vector
str_categories = operator.raw_operator.oneHotEncoder.stringCategories.vector
N = operator.inputs[0].type.shape[0]
if len(int_categories) > 0:
operator.outputs[0].type = FloatTensorType([N, len(int_categories)],
doc_string=operator.outputs[0].type.doc_string)
elif len(str_categories) > 0 and type(operator.inputs[0].type) == StringTensorType:
operator.outputs[0].type = FloatTensorType([N, len(str_categories)],
doc_string=operator.outputs[0].type.doc_string)
else:
raise ValueError('Categorical indexes are missing')
register_shape_calculator('oneHotEncoder', calculate_one_hot_encoder_output_shapes)
| 44 | 98 | 0.642614 |
69808839a7924ef85d67067045c935be9d37ae4d | 1,048 | py | Python | src/util/session.py | mbecker8600/FinancialTools | e91acda460e2839e265d0f566a4fe6458b391713 | [
"MIT"
] | null | null | null | src/util/session.py | mbecker8600/FinancialTools | e91acda460e2839e265d0f566a4fe6458b391713 | [
"MIT"
] | 3 | 2021-03-31T19:16:19.000Z | 2021-12-13T20:00:56.000Z | src/util/session.py | mbecker8600/FinancialTools | e91acda460e2839e265d0f566a4fe6458b391713 | [
"MIT"
] | null | null | null | import pickle
from definitions import ROOT_DATA
import os
class Session:
def __init__(self):
self.portfolio = None
self.name = None
class SessionManager:
def __init__(self):
pass
def load(self, name):
file = os.path.join(ROOT_DATA, "{}.txt".format(name))
filehandler = open(os.path.expanduser(file), 'rb')
return pickle.load(filehandler)
def save(self, session):
file = os.path.join(ROOT_DATA, "{}.txt".format(session.name))
filehandler = open(os.path.expanduser(file), 'wb')
pickle.dump(session, filehandler)
if __name__ == '__main__':
from src.instrument import Portfolio
session = Session()
initial_holdings = [
('VTI', 106.323),
('BND', 108.796),
('VXUS', 128.178),
('BNDX', 71.368)
]
portfolio = Portfolio(initial_holdings, initial_cash=11012.38)
session.portfolio = portfolio
session.name = 'becker-session'
session_manager = SessionManager()
session_manager.save(session)
| 22.782609 | 69 | 0.628817 |
fcbe184c459fe31e4c7e385805bb926b6f1c0468 | 764 | py | Python | test/plot.py | lgrignon/flame-detection-system | fff0a1fc1f9eaab66af6ef0ae3a0f506d8ebcdd8 | [
"MIT"
] | 49 | 2015-01-12T11:59:16.000Z | 2021-12-24T06:59:28.000Z | test/plot.py | lgrignon/flame-detection-system | fff0a1fc1f9eaab66af6ef0ae3a0f506d8ebcdd8 | [
"MIT"
] | 4 | 2016-10-31T08:21:02.000Z | 2017-09-22T12:27:30.000Z | test/plot.py | lgrignon/flame-detection-system | fff0a1fc1f9eaab66af6ef0ae3a0f506d8ebcdd8 | [
"MIT"
] | 33 | 2015-04-06T10:50:11.000Z | 2021-09-28T11:58:38.000Z | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
def plot(vals):
l = len(vals)
x = np.arange(0, l)
y = vals
z = map(abs, np.fft.fft(vals))
f, (ax1, ax2) = plt.subplots(2, 1)
# original
ax1.plot(x, y, 'o-')
ax1.grid()
ax1.set_xlabel("n")
ax1.set_ylabel("Area")
ax1.set_title("Time Domain")
ax1.set_xlim(0, np.max(x))
ax1.xaxis.set_ticks([i * l / 8 for i in range(8)])
# after fft
z[0] = 0
ax2.plot(x, z, 'o-')
ax2.grid()
ax2.set_xlabel("n")
ax2.set_ylabel("Amplitude")
ax2.set_title("Frequency Domain")
ax2.set_xlim(0, np.max(x))
ax2.xaxis.set_ticks([i * l / 8 for i in range(8)])
# show result
f.tight_layout()
plt.show()
| 23.151515 | 54 | 0.575916 |
17c6bd751fe2f5d04fe0cbd171aa5345f3a6a924 | 27,849 | py | Python | python/ccxt/kuna.py | ttodua/ccxt | 571bce1e66fb0d7a03879f7b45954043c261da3d | [
"MIT"
] | null | null | null | python/ccxt/kuna.py | ttodua/ccxt | 571bce1e66fb0d7a03879f7b45954043c261da3d | [
"MIT"
] | 1 | 2022-01-27T19:54:13.000Z | 2022-01-27T19:54:13.000Z | python/ccxt/kuna.py | ttodua/ccxt | 571bce1e66fb0d7a03879f7b45954043c261da3d | [
"MIT"
] | 1 | 2022-03-15T22:51:08.000Z | 2022-03-15T22:51:08.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.precise import Precise
class kuna(Exchange):
def describe(self):
return self.deep_extend(super(kuna, self).describe(), {
'id': 'kuna',
'name': 'Kuna',
'countries': ['UA'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'fetchL3OrderBook': True,
'cancelOrder': True,
'CORS': None,
'createOrder': True,
'fetchBalance': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'withdraw': None,
},
'timeframes': None,
'urls': {
'extension': '.json',
'referral': 'https://kuna.io?r=kunaid-gvfihe8az7o4',
'logo': 'https://user-images.githubusercontent.com/51840849/87153927-f0578b80-c2c0-11ea-84b6-74612568e9e1.jpg',
'api': {
'xreserve': 'https://api.xreserve.fund',
'v3': 'https://api.kuna.io',
'public': 'https://kuna.io', # v2
'private': 'https://kuna.io', # v2
},
'www': 'https://kuna.io',
'doc': 'https://kuna.io/documents/api',
'fees': 'https://kuna.io/documents/api',
},
'api': {
'xreserve': {
'get': {
'nonce': 1,
'fee': 1,
'delegated-transactions': 1,
},
'post': {
'delegate-transfer': 1,
},
},
'v3': {
'public': {
'get': {
'timestamp': 1,
'currencies': 1,
'markets': 1,
'tickers': 1,
'k': 1,
'trades_history': 1,
'fees': 1,
'exchange-rates': 1,
'exchange-rates/currency': 1,
'book/market': 1,
'kuna_codes/code/check': 1,
'landing_page_statistic': 1,
'translations/locale': 1,
'trades/market/hist': 1,
},
'post': {
'http_test': 1,
'deposit_channels': 1,
'withdraw_channels': 1,
'subscription_plans': 1,
'send_to': 1,
'confirm_token': 1,
'kunaid': 1,
'withdraw/prerequest': 1,
'deposit/prerequest': 1,
'deposit/exchange-rates': 1,
},
},
'sign': {
'get': {
'reset_password/token': 1,
},
'post': {
'signup/google': 1,
'signup/resend_confirmation': 1,
'signup': 1,
'signin': 1,
'signin/two_factor': 1,
'signin/resend_confirm_device': 1,
'signin/confirm_device': 1,
'reset_password': 1,
'cool-signin': 1,
},
'put': {
'reset_password/token': 1,
'signup/code/confirm': 1,
},
},
'private': {
'post': {
'auth/w/order/submit': 1,
'auth/r/orders': 1,
'auth/r/orders/market': 1,
'auth/r/orders/markets': 1,
'auth/api_tokens/delete': 1,
'auth/api_tokens/create': 1,
'auth/api_tokens': 1,
'auth/signin_history/uniq': 1,
'auth/signin_history': 1,
'auth/disable_withdraw_confirmation': 1,
'auth/change_password': 1,
'auth/deposit_address': 1,
'auth/announcements/accept': 1,
'auth/announcements/unaccepted': 1,
'auth/otp/deactivate': 1,
'auth/otp/activate': 1,
'auth/otp/secret': 1,
'auth/r/order/market/:order_id/trades': 1,
'auth/r/orders/market/hist': 1,
'auth/r/orders/hist': 1,
'auth/r/orders/hist/markets': 1,
'auth/r/orders/details': 1,
'auth/assets-history': 1,
'auth/assets-history/withdraws': 1,
'auth/assets-history/deposits': 1,
'auth/r/wallets': 1,
'auth/markets/favorites': 1,
'auth/markets/favorites/list': 1,
'auth/me/update': 1,
'auth/me': 1,
'auth/fund_sources': 1,
'auth/fund_sources/list': 1,
'auth/withdraw/resend_confirmation': 1,
'auth/withdraw': 1,
'auth/withdraw/details': 1,
'auth/withdraw/info': 1,
'auth/payment_addresses': 1,
'auth/deposit/prerequest': 1,
'auth/deposit/exchange-rates': 1,
'auth/deposit': 1,
'auth/deposit/details': 1,
'auth/deposit/info': 1,
'auth/kuna_codes/count': 1,
'auth/kuna_codes/details': 1,
'auth/kuna_codes/edit': 1,
'auth/kuna_codes/send-pdf': 1,
'auth/kuna_codes': 1,
'auth/kuna_codes/redeemed-by-me': 1,
'auth/kuna_codes/issued-by-me': 1,
'auth/payment_requests/invoice': 1,
'auth/payment_requests/type': 1,
'auth/referral_program/weekly_earnings': 1,
'auth/referral_program/stats': 1,
'auth/merchant/payout_services': 1,
'auth/merchant/withdraw': 1,
'auth/merchant/payment_services': 1,
'auth/merchant/deposit': 1,
'auth/verification/auth_token': 1,
'auth/kunaid_purchase/create': 1,
'auth/devices/list': 1,
'auth/sessions/list': 1,
'auth/subscriptions/reactivate': 1,
'auth/subscriptions/cancel': 1,
'auth/subscriptions/prolong': 1,
'auth/subscriptions/create': 1,
'auth/subscriptions/list': 1,
'auth/kuna_ids/list': 1,
'order/cancel/multi': 1,
'order/cancel': 1,
},
'put': {
'auth/fund_sources/id': 1,
'auth/kuna_codes/redeem': 1,
},
'delete': {
'auth/markets/favorites': 1,
'auth/fund_sources': 1,
'auth/devices': 1,
'auth/devices/list': 1,
'auth/sessions/list': 1,
'auth/sessions': 1,
},
},
},
'public': {
'get': [
'depth', # Get depth or specified market Both asks and bids are sorted from highest price to lowest.
'k_with_pending_trades', # Get K data with pending trades, which are the trades not included in K data yet, because there's delay between trade generated and processed by K data generator
'k', # Get OHLC(k line) of specific market
'markets', # Get all available markets
'order_book', # Get the order book of specified market
'order_book/{market}',
'tickers', # Get ticker of all markets
'tickers/{market}', # Get ticker of specific market
'timestamp', # Get server current time, in seconds since Unix epoch
'trades', # Get recent trades on market, each trade is included only once Trades are sorted in reverse creation order.
'trades/{market}',
],
},
'private': {
'get': [
'members/me', # Get your profile and accounts info
'deposits', # Get your deposits history
'deposit', # Get details of specific deposit
'deposit_address', # Where to deposit The address field could be empty when a new address is generating(e.g. for bitcoin), you should try again later in that case.
'orders', # Get your orders, results is paginated
'order', # Get information of specified order
'trades/my', # Get your executed trades Trades are sorted in reverse creation order.
'withdraws', # Get your cryptocurrency withdraws
'withdraw', # Get your cryptocurrency withdraw
],
'post': [
'orders', # Create a Sell/Buy order
'orders/multi', # Create multiple sell/buy orders
'orders/clear', # Cancel all my orders
'order/delete', # Cancel an order
'withdraw', # Create a withdraw
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.25 / 100,
'maker': 0.25 / 100,
},
'funding': {
'withdraw': {
'UAH': '1%',
'BTC': 0.001,
'BCH': 0.001,
'ETH': 0.01,
'WAVES': 0.01,
'GOL': 0.0,
'GBG': 0.0,
# 'RMC': 0.001 BTC
# 'ARN': 0.01 ETH
# 'R': 0.01 ETH
# 'EVR': 0.01 ETH
},
'deposit': {
# 'UAH': (amount) => amount * 0.001 + 5
},
},
},
'commonCurrencies': {
'PLA': 'Plair',
},
'exceptions': {
'2002': InsufficientFunds,
'2003': OrderNotFound,
},
})
def fetch_time(self, params={}):
response = self.publicGetTimestamp(params)
#
# 1594911427
#
return response * 1000
def fetch_markets(self, params={}):
quotes = ['btc', 'rub', 'uah', 'usd', 'usdt', 'usdc']
markets = []
response = self.publicGetTickers(params)
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
for j in range(0, len(quotes)):
quoteId = quotes[j]
# usd gets matched before usdt in usdtusd USDT/USD
# https://github.com/ccxt/ccxt/issues/9868
slicedId = id[1:]
index = slicedId.find(quoteId)
slice = slicedId[index:]
if (index > 0) and (slice == quoteId):
# usd gets matched before usdt in usdtusd USDT/USD
# https://github.com/ccxt/ccxt/issues/9868
baseId = id[0] + slicedId.replace(quoteId, '')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
markets.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': 'spot',
'spot': True,
'active': None,
'precision': {
'amount': None,
'price': None,
},
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': None,
})
return markets
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 300
orderbook = self.publicGetDepth(self.extend(request, params))
timestamp = self.safe_timestamp(orderbook, 'timestamp')
return self.parse_order_book(orderbook, symbol, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'at')
ticker = ticker['ticker']
symbol = None
if market:
symbol = market['symbol']
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': self.safe_number(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
base = id[0:3]
quote = id[3:6]
base = base.upper()
quote = quote.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
result[symbol] = self.parse_ticker(response[id], market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTickersMarket(self.extend(request, params))
return self.parse_ticker(response, market)
def fetch_l3_order_book(self, symbol, limit=None, params={}):
return self.fetch_order_book(symbol, limit, params)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
symbol = None
if market:
symbol = market['symbol']
side = self.safe_string_2(trade, 'side', 'trend')
if side is not None:
sideMap = {
'ask': 'sell',
'bid': 'buy',
}
side = self.safe_string(sideMap, side, side)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'volume')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.safe_number(trade, 'funds')
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
orderId = self.safe_string(trade, 'order_id')
id = self.safe_string(trade, 'id')
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'order': orderId,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
ohlcvc = self.build_ohlcvc(trades, timeframe, since, limit)
result = []
for i in range(0, len(ohlcvc)):
ohlcv = ohlcvc[i]
result.append([
ohlcv[0],
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
])
return result
def parse_balance(self, response):
balances = self.safe_value(response, 'accounts')
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetMembersMe(params)
return self.parse_balance(response)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'market': self.market_id(symbol),
'side': side,
'volume': str(amount),
'ord_type': type,
}
if type == 'limit':
request['price'] = str(price)
response = self.privatePostOrders(self.extend(request, params))
marketId = self.safe_value(response, 'market')
market = self.safe_value(self.markets_by_id, marketId)
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privatePostOrderDelete(self.extend(request, params))
order = self.parse_order(response)
status = order['status']
if status == 'closed' or status == 'canceled':
raise OrderNotFound(self.id + ' ' + self.json(order))
return order
def parse_order_status(self, status):
statuses = {
'done': 'closed',
'wait': 'open',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
marketId = self.safe_string(order, 'market')
symbol = self.safe_symbol(marketId, market)
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
status = self.parse_order_status(self.safe_string(order, 'state'))
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
id = self.safe_string(order, 'id')
return self.safe_order({
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': self.safe_string(order, 'price'),
'stopPrice': None,
'amount': self.safe_string(order, 'volume'),
'filled': self.safe_string(order, 'executed_volume'),
'remaining': self.safe_string(order, 'remaining_volume'),
'trades': None,
'fee': None,
'info': order,
'cost': None,
'average': None,
}, market)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': int(id),
}
response = self.privateGetOrder(self.extend(request, params))
return self.parse_order(response)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.privateGetOrders(self.extend(request, params))
# todo emulation of fetchClosedOrders, fetchOrders, fetchOrder
# with order cache + fetchOpenOrders
# as in BTC-e, Liqui, Yobit, DSX, Tidex, WEX
return self.parse_orders(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.privateGetTradesMy(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def nonce(self):
return self.milliseconds()
def encode_params(self, params):
if 'orders' in params:
orders = params['orders']
query = self.urlencode(self.keysort(self.omit(params, 'orders')))
for i in range(0, len(orders)):
order = orders[i]
keys = list(order.keys())
for k in range(0, len(keys)):
key = keys[k]
value = order[key]
query += '&orders%5B%5D%5B' + key + '%5D=' + str(value)
return query
return self.urlencode(self.keysort(params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = None
if isinstance(api, list):
version, access = api
url = self.urls['api'][version] + '/' + version + '/' + self.implode_params(path, params)
if access == 'public':
if method == 'GET':
if params:
url += '?' + self.urlencode(params)
elif (method == 'POST') or (method == 'PUT'):
headers = {'Content-Type': 'application/json'}
body = self.json(params)
elif access == 'private':
raise NotSupported(self.id + ' private v3 API is not supported yet')
else:
request = '/api/' + self.version + '/' + self.implode_params(path, params)
if 'extension' in self.urls:
request += self.urls['extension']
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
query = self.encode_params(self.extend({
'access_key': self.apiKey,
'tonce': nonce,
}, params))
auth = method + '|' + request + '|' + query
signed = self.hmac(self.encode(auth), self.encode(self.secret))
suffix = query + '&signature=' + signed
if method == 'GET':
url += '?' + suffix
else:
body = suffix
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 400:
error = self.safe_value(response, 'error')
errorCode = self.safe_string(error, 'code')
feedback = self.id + ' ' + self.json(response)
self.throw_exactly_matched_exception(self.exceptions, errorCode, feedback)
# fallback to default error handler
| 42.259484 | 212 | 0.443391 |
5a917d413979b02d7a3a2ca6af2fc56164e625f1 | 12,541 | py | Python | yellowbrick/cluster/silhouette.py | mmorrison1670/yellowbrick | c2028de2b7703e563503f7e85fdd65ad08de1ef6 | [
"Apache-2.0"
] | 2 | 2019-04-17T18:19:08.000Z | 2019-12-09T03:53:59.000Z | yellowbrick/cluster/silhouette.py | mmorrison1670/yellowbrick | c2028de2b7703e563503f7e85fdd65ad08de1ef6 | [
"Apache-2.0"
] | 1 | 2021-01-20T11:17:04.000Z | 2021-01-20T11:17:04.000Z | yellowbrick/cluster/silhouette.py | mmorrison1670/yellowbrick | c2028de2b7703e563503f7e85fdd65ad08de1ef6 | [
"Apache-2.0"
] | 2 | 2019-05-17T06:35:37.000Z | 2019-06-30T16:17:22.000Z | # yellowbrick.cluster.silhouette
# Implements visualizers using the silhouette metric for cluster evaluation.
#
# Author: Benjamin Bengfort
# Author: Rebecca Bilbro
# Created: Mon Mar 27 10:09:24 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: silhouette.py [57b563b] benjamin@bengfort.com $
"""
Implements visualizers that use the silhouette metric for cluster evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.ticker as ticker
from sklearn.metrics import silhouette_score, silhouette_samples
from yellowbrick.utils import check_fitted
from yellowbrick.style import resolve_colors
from yellowbrick.cluster.base import ClusteringScoreVisualizer
## Packages for export
__all__ = ["SilhouetteVisualizer", "silhouette_visualizer"]
##########################################################################
## Silhouette Method for K Selection
##########################################################################
class SilhouetteVisualizer(ClusteringScoreVisualizer):
"""
The Silhouette Visualizer displays the silhouette coefficient for each
sample on a per-cluster basis, visually evaluating the density and
separation between clusters. The score is calculated by averaging the
silhouette coefficient for each sample, computed as the difference
between the average intra-cluster distance and the mean nearest-cluster
distance for each sample, normalized by the maximum value. This produces a
score between -1 and +1, where scores near +1 indicate high separation
and scores near -1 indicate that the samples may have been assigned to
the wrong cluster.
In SilhouetteVisualizer plots, clusters with higher scores have wider
silhouettes, but clusters that are less cohesive will fall short of the
average score across all clusters, which is plotted as a vertical dotted
red line.
This is particularly useful for determining cluster imbalance, or for
selecting a value for K by comparing multiple visualizers.
Parameters
----------
model : a Scikit-Learn clusterer
Should be an instance of a centroidal clustering algorithm (``KMeans``
or ``MiniBatchKMeans``). If the estimator is not fitted, it is fit when
the visualizer is fitted, unless otherwise specified by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
colors : iterable or string, default: None
A collection of colors to use for each cluster group. If there are
fewer colors than cluster groups, colors will repeat. May also be a
Yellowbrick or matplotlib colormap string.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the
estimator will be fit when the visualizer is fit, otherwise, the
estimator will not be modified. If 'auto' (default), a helper method
will check if the estimator is fitted before fitting it again.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
silhouette_score_ : float
Mean Silhouette Coefficient for all samples. Computed via scikit-learn
`sklearn.metrics.silhouette_score`.
silhouette_samples_ : array, shape = [n_samples]
Silhouette Coefficient for each samples. Computed via scikit-learn
`sklearn.metrics.silhouette_samples`.
n_samples_ : integer
Number of total samples in the dataset (X.shape[0])
n_clusters_ : integer
Number of clusters (e.g. n_clusters or k value) passed to internal
scikit-learn model.
y_tick_pos_ : array of shape (n_clusters,)
The computed center positions of each cluster on the y-axis
Examples
--------
>>> from yellowbrick.cluster import SilhouetteVisualizer
>>> from sklearn.cluster import KMeans
>>> model = SilhouetteVisualizer(KMeans(10))
>>> model.fit(X)
>>> model.show()
"""
def __init__(self, model, ax=None, colors=None, is_fitted="auto", **kwargs):
# Initialize the visualizer bases
super(SilhouetteVisualizer, self).__init__(model, ax=ax, **kwargs)
# Visual Properties
# Use colors if it is given, otherwise attempt to use colormap which
# which will override colors. If neither is found, default to None.
# The colormap may yet still be found in resolve_colors
self.colors = colors
if "colormap" in kwargs:
self.colors = kwargs["colormap"]
def fit(self, X, y=None, **kwargs):
"""
Fits the model and generates the silhouette visualization.
"""
# TODO: decide to use this method or the score method to draw.
# NOTE: Probably this would be better in score, but the standard score
# is a little different and I'm not sure how it's used.
if not check_fitted(self.estimator, is_fitted_by=self.is_fitted):
# Fit the wrapped estimator
self.estimator.fit(X, y, **kwargs)
# Get the properties of the dataset
self.n_samples_ = X.shape[0]
self.n_clusters_ = self.estimator.n_clusters
# Compute the scores of the cluster
labels = self.estimator.predict(X)
self.silhouette_score_ = silhouette_score(X, labels)
self.silhouette_samples_ = silhouette_samples(X, labels)
# Draw the silhouette figure
self.draw(labels)
# Return the estimator
return self
def draw(self, labels):
"""
Draw the silhouettes for each sample and the average score.
Parameters
----------
labels : array-like
An array with the cluster label for each silhouette sample,
usually computed with ``predict()``. Labels are not stored on the
visualizer so that the figure can be redrawn with new data.
"""
# Track the positions of the lines being drawn
y_lower = 10 # The bottom of the silhouette
# Get the colors from the various properties
color_kwargs = {"n_colors": self.n_clusters_}
if self.colors is None:
color_kwargs["colormap"] = "Set1"
elif isinstance(self.colors, str):
color_kwargs["colormap"] = self.colors
else:
color_kwargs["colors"] = self.colors
colors = resolve_colors(**color_kwargs)
# For each cluster, plot the silhouette scores
self.y_tick_pos_ = []
for idx in range(self.n_clusters_):
# Collect silhouette scores for samples in the current cluster .
values = self.silhouette_samples_[labels == idx]
values.sort()
# Compute the size of the cluster and find upper limit
size = values.shape[0]
y_upper = y_lower + size
color = colors[idx]
self.ax.fill_betweenx(
np.arange(y_lower, y_upper),
0,
values,
facecolor=color,
edgecolor=color,
alpha=0.5,
)
# Collect the tick position for each cluster
self.y_tick_pos_.append(y_lower + 0.5 * size)
# Compute the new y_lower for next plot
y_lower = y_upper + 10
# The vertical line for average silhouette score of all the values
self.ax.axvline(
x=self.silhouette_score_,
color="red",
linestyle="--",
label="Average Silhouette Score",
)
return self.ax
def finalize(self):
"""
Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend.
"""
# Set the title
self.set_title(
("Silhouette Plot of {} Clustering for {} Samples in {} Centers").format(
self.name, self.n_samples_, self.n_clusters_
)
)
# Set the X and Y limits
# The silhouette coefficient can range from -1, 1;
# but here we scale the plot according to our visualizations
# l_xlim and u_xlim are lower and upper limits of the x-axis,
# set according to our calculated max and min score with necessary padding
l_xlim = max(-1, min(-0.1, round(min(self.silhouette_samples_) - 0.1, 1)))
u_xlim = min(1, round(max(self.silhouette_samples_) + 0.1, 1))
self.ax.set_xlim([l_xlim, u_xlim])
# The (n_clusters_+1)*10 is for inserting blank space between
# silhouette plots of individual clusters, to demarcate them clearly.
self.ax.set_ylim([0, self.n_samples_ + (self.n_clusters_ + 1) * 10])
# Set the x and y labels
self.ax.set_xlabel("silhouette coefficient values")
self.ax.set_ylabel("cluster label")
# Set the ticks on the axis object.
self.ax.set_yticks(self.y_tick_pos_)
self.ax.set_yticklabels(str(idx) for idx in range(self.n_clusters_))
# Set the ticks at multiples of 0.1
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(0.1))
# Show legend (Average Silhouette Score axis)
self.ax.legend(loc="best")
##########################################################################
## Quick Method
##########################################################################
def silhouette_visualizer(
model, X, y=None, ax=None, colors=None, is_fitted="auto", show=True, **kwargs
):
"""Quick Method:
The Silhouette Visualizer displays the silhouette coefficient for each
sample on a per-cluster basis, visually evaluating the density and
separation between clusters. The score is calculated by averaging the
silhouette coefficient for each sample, computed as the difference
between the average intra-cluster distance and the mean nearest-cluster
distance for each sample, normalized by the maximum value. This produces a
score between -1 and +1, where scores near +1 indicate high separation
and scores near -1 indicate that the samples may have been assigned to
the wrong cluster.
Parameters
----------
model : a Scikit-Learn clusterer
Should be an instance of a centroidal clustering algorithm (``KMeans``
or ``MiniBatchKMeans``). If the estimator is not fitted, it is fit when
the visualizer is fitted, unless otherwise specified by ``is_fitted``.
X : array-like of shape (n, m)
A matrix or data frame with n instances and m features
y : array-like of shape (n,), optional
A vector or series representing the target for each instance
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
colors : iterable or string, default: None
A collection of colors to use for each cluster group. If there are
fewer colors than cluster groups, colors will repeat. May also be a
Yellowbrick or matplotlib colormap string.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the
estimator will be fit when the visualizer is fit, otherwise, the
estimator will not be modified. If 'auto' (default), a helper method
will check if the estimator is fitted before fitting it again.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
viz : SilhouetteVisualizer
The silhouette visualizer, fitted and finalized.
"""
oz = SilhouetteVisualizer(
model, ax=ax, colors=colors, is_fitted=is_fitted, **kwargs
)
oz.fit(X, y)
if show:
oz.show()
else:
oz.finalize()
return oz
| 37.435821 | 85 | 0.638386 |
fb71150e6ba6b709ef8c48b997400387f344a14c | 3,781 | py | Python | formative/registry.py | jaap3/django-formative | 0f6bd2dc8a78e59a842513b49efc7768d1ffc3a3 | [
"MIT"
] | null | null | null | formative/registry.py | jaap3/django-formative | 0f6bd2dc8a78e59a842513b49efc7768d1ffc3a3 | [
"MIT"
] | null | null | null | formative/registry.py | jaap3/django-formative | 0f6bd2dc8a78e59a842513b49efc7768d1ffc3a3 | [
"MIT"
] | null | null | null | from collections import Container, Iterable, Sized
from django.forms.models import modelform_factory
from django.utils.encoding import python_2_unicode_compatible
from django.utils import six
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces
from formative.exceptions import FormativeTypeNotRegistered
from formative.forms import FormativeTypeForm
from formative.utils import formative_form_factory, add_field_to_fieldsets
class FormativeTypeBase(type):
def __new__(mcs, name, bases, attrs):
if name != 'FormativeType':
if 'name' not in attrs:
attrs['name'] = camel_case_to_spaces(name).lower()
if 'verbose_name' not in attrs:
attrs['verbose_name'] = attrs['name'].title()
if 'fieldsets' in attrs:
attrs['fieldsets'] = add_field_to_fieldsets(
'formative_type', attrs['fieldsets'])
return super(FormativeTypeBase, mcs).__new__(mcs, name, bases, attrs)
@python_2_unicode_compatible
@six.add_metaclass(FormativeTypeBase)
class FormativeType(object):
def __init__(self, model, exclude=None):
self.model = model
self.exclude = exclude
@cached_property
def form(self):
return self.get_form(exclude=self.exclude)
def get_form(self, exclude=None):
form = formative_form_factory(
self.model, self.form_class, exclude=exclude)
form.formative_type = self
return form
@cached_property
def fieldsets(self):
"""
Fallback property for types that don't define fieldsets.
"""
fields = []
for field in self.form_class.base_fields:
fields.append(field)
return add_field_to_fieldsets(
'formative_type', [(None, {'fields': fields})])
def __str__(self):
return self.verbose_name
class FormativeTypeRegistry(Sized, Iterable, Container):
"""
Formative type registry
"""
def __init__(self, model):
self.model = model
self.__registry = {}
def __contains__(self, name):
return name in self.__registry
def __iter__(self):
for key, value in sorted(self.__registry.items()):
yield value
def __len__(self):
return len(self.__registry)
@cached_property
def type_select_form(self):
return modelform_factory(self.model, form=FormativeTypeForm)
def register(self, cls, model, exclude=None):
"""
Register a type
"""
self.__registry[cls.name] = cls(model, exclude=exclude)
def get(self, name):
"""
Get a type from the registry using its name.
Raises FormativeTypeNotRegistered if the name is not found in the
registry.
"""
try:
return self.__registry[name]
except KeyError:
raise FormativeTypeNotRegistered
def autodiscover():
"""
Auto-discover INSTALLED_APPS formative_types.py modules and fail silently
if not present.
"""
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's formative_types module.
try:
import_module('%s.formative_types' % app)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have an formative_types module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'formative_types'):
raise # pragma: nocover
| 32.316239 | 77 | 0.656969 |
c8a3f4d9f019d5939b824f712b97c3101e1f4fe3 | 1,718 | py | Python | src/line_detection.py | jdg1837/Spotlight | 91bb0d4b07f782e90fe4b374d951ce1965422c27 | [
"MIT"
] | null | null | null | src/line_detection.py | jdg1837/Spotlight | 91bb0d4b07f782e90fe4b374d951ce1965422c27 | [
"MIT"
] | null | null | null | src/line_detection.py | jdg1837/Spotlight | 91bb0d4b07f782e90fe4b374d951ce1965422c27 | [
"MIT"
] | null | null | null | '''Testing openCV line detection'''
# Found on https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv
import cv2
import numpy as np
def detect_lines(filename):
# Get gray image
imgInput = filename
imgOutput = '../images/' + filename.replace('.png', '') + '_lines.png'
img = cv2.imread(imgInput)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Process gray image with GaussianBlur
kernal_size = 5
blur_gray = cv2.GaussianBlur(gray, (kernal_size, kernal_size), 0)
# Process edge detection using Canny
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
# Get lines with HoughLinesP
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 50 # minimum number of pixels making up a line
max_line_gap = 150 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) # creating a blank to draw lines on
line_overlay = np.copy(img)
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 5)
# Draw the lines on the image
lines_edges = cv2.addWeighted(img, 0.8, line_overlay, 1, 0)
cv2.imwrite(imgOutput, line_image)
return imgOutput
| 35.061224 | 85 | 0.679278 |
62b1b04b19f61940ed46a0258fffc6244c00c659 | 3,708 | py | Python | contrib/macdeploy/custom_dsstore.py | slicecoincore/slice | 51e1cdf1bc109a40488e7aaf612e6554659b0167 | [
"MIT"
] | 3 | 2019-03-13T08:02:36.000Z | 2020-05-06T12:05:08.000Z | contrib/macdeploy/custom_dsstore.py | slicecoincore/slice | 51e1cdf1bc109a40488e7aaf612e6554659b0167 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | slicecoincore/slice | 51e1cdf1bc109a40488e7aaf612e6554659b0167 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Slice-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.8 | 1,817 | 0.72411 |
041369213e6820fd7201752bef6db803d57db630 | 1,313 | py | Python | Binary Tree/kSmallestElementInBST.py | hoaiphun96/Leet-Code-Problems | bc4651fa9c8eae261bb280bb25b7537722d3b1f9 | [
"MIT",
"Unlicense"
] | 8 | 2019-01-17T23:45:41.000Z | 2021-07-08T02:06:16.000Z | Binary Tree/kSmallestElementInBST.py | hoaiphun96/Leet-Code-Problems | bc4651fa9c8eae261bb280bb25b7537722d3b1f9 | [
"MIT",
"Unlicense"
] | null | null | null | Binary Tree/kSmallestElementInBST.py | hoaiphun96/Leet-Code-Problems | bc4651fa9c8eae261bb280bb25b7537722d3b1f9 | [
"MIT",
"Unlicense"
] | 9 | 2018-04-27T04:50:06.000Z | 2022-03-03T14:17:13.000Z | """
Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
Note:
You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
Follow up:
What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently? How would you optimize the kthSmallest routine?
Credits:
Special thanks to @ts for adding this problem and creating all test cases.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
# inorder traversal ( left, root, right) with a counter
stack = [root]
visited = []
while stack:
curr = stack[-1]
if curr.val in visited:
stack.pop()
continue
if curr.left and curr.left.val not in visited:
stack.append(curr.left)
else:
visited.append(curr.val)
if len(visited) == k:
return visited[k - 1]
if curr.right:
stack.append(curr.right)
| 24.314815 | 158 | 0.562072 |
4b86d01dfa0fc3d8d0f3465c65cea7eae8522818 | 4,256 | py | Python | app/core/__init__.py | codestrange/cerebro | 7d0ea0d9d41349a3324a9bf80b9a36f22be08b3c | [
"MIT"
] | 7 | 2019-04-10T20:40:48.000Z | 2020-12-07T14:46:20.000Z | app/core/__init__.py | cbermudez97/cerebro | f343c50146200d84631fa3f782a44bb45d257021 | [
"MIT"
] | null | null | null | app/core/__init__.py | cbermudez97/cerebro | f343c50146200d84631fa3f782a44bb45d257021 | [
"MIT"
] | 1 | 2018-09-10T22:45:24.000Z | 2018-09-10T22:45:24.000Z | from json import dumps
from queue import Queue, Empty, Full
from time import sleep
from logging import debug, error
from requests.api import post
from requests.exceptions import ConnectionError
from .config import get_config
from ..models import Message
config = get_config()
queue = Queue()
# El módulo dummy_threading.threading provee una interfaz igual a la del módulo
# threading porque threading utilizá el módulo _thread y este no es provisto por
# todas las plataformas.
try:
from threading import Thread
except ImportError:
from dummy_threading.threading import Thread
def process(message):
"""Actualizar la base de datos de los mensajes y buscar en el
diccionario de configuración el próximo módulo al que
transferirle el mensaje.
Arguments:
message {tuple} -- Tupla de tres elementos. Conteniendo
en este orden: etiqueta del módulo procedente, id del
mensaje y lista de etiquetas del mensaje.
"""
try:
prev_module = message[0]
id_message = message[1]
tags_message = message[2]
except IndexError:
# Lanzar excepción si la tupla message no tiene 3 elementos
error('La tupla "message" pasada como argumento no tiene 3 elementos.')
try:
# Obtener mensaje de la base de datos con el id = id_message
message = Message.objects.get(id=id_message)
except Exception:
# Lanzar excepción si no se encontró el mensaje en la base de datos
error(f'El mensaje con id={id_message} no existe en la base de datos.')
# Guardar en la base de datos las nuevas etiquetas del mensaje
for tag in tags_message:
if tag not in message.tags:
message.tags.append(tag)
message.save()
tags_message = message.tags
text_message = message.text
# Obtener el modulo en la configuración
try:
prev_module = config[prev_module]
except KeyError:
error(f'No existe el módulo {prev_module} en el archivo de configuración.')
# Revizar que transiciones se pueden realizar y hacerlas
for transition in prev_module.transitions:
# url es la dirección a la que hay que hacerle POST
# si la transición se debe realizar
try:
url_next_module = config[transition.next_module].url
except KeyError:
error('No existe el módulo {0} en el archivo de configuración.'
.format(transition.next_module))
# query es una expresión lamda que resive como parámetro
# la lista de etiquetas y retorna True si se puede hacer
# la transición (si retorna False no se puede)
if transition.query(tags_message):
# Realizar POST a la url
obj = {'id': str(id_message), 'text': text_message, 'tags': tags_message}
try:
post(url_next_module, json=dumps(obj))
except ConnectionError:
error('No se pudo establecer la conexión con el módulo "{0}" con la url: "{1}"'
.format(transition.next_module, url_next_module))
def get_message():
"""Obtiene un mensaje de la cola de mensajes a procesar.
Returns:
tuple -- Tupla de tres elementos. Conteniendo
en este orden: etiqueta del módulo procedente, id del
mensaje y lista de etiquetas del mensaje.
"""
debug('Obteniendo mensaje ...')
try:
message = queue.get(timeout=1)
debug('Procesando mensaje ...')
process(message)
except Empty:
debug('Cola sin mensajes ...')
def put_message(message):
"""Inserta un mensaje en la cola de mensajes a procesar.
Arguments:
message {tuple} -- Tupla de tres elementos. Conteniendo
en este orden: etiqueta del módulo procedente, id del
mensaje y lista de etiquetas del mensaje.
"""
debug('Insertando mensaje ...')
try:
queue.put(message)
except Full:
debug('Cola completamente llena ...')
# Hay que hacer este metodo asincrónico
def start_core():
"""Controla el proceso de perdir los mensajes de la cola de
mensajes a procesar y procesarlos.
"""
while True:
sleep(1)
thread_get = Thread(target=get_message)
thread_get.start()
| 34.322581 | 95 | 0.664474 |
ced2aa6972f4b58876e43affaf8b4558cf7d1dfb | 352 | py | Python | tap_ringcentral/streams/meetings.py | volodymyr-mykhailyk/tap-ringcentral | 54c0038ede52e270a09c70e3d100d0a0ee1b11e3 | [
"Apache-2.0"
] | 5 | 2019-05-02T04:49:05.000Z | 2020-04-15T15:00:14.000Z | tap_ringcentral/streams/meetings.py | volodymyr-mykhailyk/tap-ringcentral | 54c0038ede52e270a09c70e3d100d0a0ee1b11e3 | [
"Apache-2.0"
] | 4 | 2019-12-31T13:16:19.000Z | 2020-06-09T13:18:07.000Z | tap_ringcentral/streams/meetings.py | volodymyr-mykhailyk/tap-ringcentral | 54c0038ede52e270a09c70e3d100d0a0ee1b11e3 | [
"Apache-2.0"
] | 3 | 2019-06-11T13:26:17.000Z | 2020-01-20T13:47:24.000Z | from tap_ringcentral.streams.base import ContactBaseStream
import singer
import json
LOGGER = singer.get_logger() # noqa
class MeetingStream(ContactBaseStream):
KEY_PROPERTIES = ['id']
API_METHOD = 'GET'
TABLE = 'meetings'
@property
def api_path(self):
return '/restapi/v1.0/account/~/extension/{extensionId}/meeting'
| 20.705882 | 72 | 0.713068 |
d927a5fb7c6f1fe4e6a01bf9a29fb333a6aa9a22 | 1,813 | py | Python | psydac/polar/c1_spaces.py | GabrielJie/psydac | 51814f04501fa14bc100f0ab224f50a2bbe86612 | [
"MIT"
] | 1 | 2022-01-19T02:26:49.000Z | 2022-01-19T02:26:49.000Z | psydac/polar/c1_spaces.py | GabrielJie/psydac | 51814f04501fa14bc100f0ab224f50a2bbe86612 | [
"MIT"
] | null | null | null | psydac/polar/c1_spaces.py | GabrielJie/psydac | 51814f04501fa14bc100f0ab224f50a2bbe86612 | [
"MIT"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 Yaman Güçlü
from psydac.linalg.stencil import StencilVectorSpace
from psydac.linalg.block import ProductSpace
from psydac.polar .dense import DenseVectorSpace
from psydac.polar .c1_cart import C1_Cart
__all__ = ['new_c1_vector_space']
#==============================================================================
def new_c1_vector_space( V, radial_dim=0, angle_dim=1 ):
"""
Create a new product space from a given stencil vector space.
Parameters
----------
V : StencilVectorSpace
Space of the coefficients of a tensor-product finite-element space
built on a mapping with a polar singularity (O-point).
radial_dim : int
Index of the dimension that corresponds to the 'radial' direction.
angle_dim : int
Index of the dimension that corresponds to the 'angle' direction.
Results
-------
P : ProductSpace
Space of the coefficients of a new finite-element space which has
C^1 continuity at the O-point.
"""
assert isinstance( V, StencilVectorSpace )
assert isinstance( radial_dim, int )
assert isinstance( angle_dim, int )
assert 0 <= radial_dim < V.ndim
assert 0 <= angle_dim < V.ndim
assert V.ndim >= 2
assert V.periods[radial_dim] == False
assert V.periods[ angle_dim] == True
if V.parallel:
c1_cart = C1_Cart( V.cart, radial_dim )
S = StencilVectorSpace( cart=c1_cart, dtype=V.dtype )
D = DenseVectorSpace( 3, cart=V.cart, radial_dim=radial_dim, angle_dim=angle_dim )
else:
c1_npts = [(n-2 if d==radial_dim else n) for (d,n) in enumerate( V.npts )]
S = StencilVectorSpace( c1_npts, V.pads, V.periods, V.dtype )
D = DenseVectorSpace( 3 )
P = ProductSpace( D, S )
return P
| 31.807018 | 90 | 0.63872 |
854222700f569e9dce1037acf725ade98c98c66b | 4,926 | py | Python | common/global_variables.py | bluebibi/trade | 32145c2a139f4172159ace8dfad11591ad8839ea | [
"MIT"
] | 2 | 2021-02-03T19:03:13.000Z | 2021-03-06T15:58:08.000Z | common/global_variables.py | bluebibi/trade | 32145c2a139f4172159ace8dfad11591ad8839ea | [
"MIT"
] | 10 | 2020-01-28T23:09:34.000Z | 2022-02-10T00:23:12.000Z | common/global_variables.py | bluebibi/trade | 32145c2a139f4172159ace8dfad11591ad8839ea | [
"MIT"
] | 1 | 2019-11-03T11:37:37.000Z | 2019-11-03T11:37:37.000Z | from enum import Enum
import configparser
import ast
import torch
import sys, os
from common.slack import PushSlack
idx = os.getcwd().index("trade")
PROJECT_HOME = os.getcwd()[:idx] + "trade"
sys.path.append(PROJECT_HOME)
class CoinStatus(Enum):
bought = 0
trailed = 1
success_sold = 2
gain_sold = 3
loss_sold = 4
up_trailed = 5
class Period(Enum):
daily = 0
half_daily = 1
quater_daily = 2
every_hour = 3
class BuyType(Enum):
normal = 0
prompt = 1
# GENERAL
fmt = "%Y-%m-%dT%H:%M:%S"
sqlite3_buy_sell_db_filename = os.path.join(PROJECT_HOME, 'web/db/upbit_buy_sell.db')
sqlite3_order_book_db_filename = os.path.join(PROJECT_HOME, 'web/db/upbit_order_book_info.db')
config = configparser.ConfigParser()
read_ok = config.read(os.getcwd()[:idx] + "trade/common/config.ini")
# USER
USER_ID = int(config['USER']['user_id'])
USERNAME = config['USER']['username']
HOST_IP = config['USER']['host_ip']
SYSTEM_USERNAME = config['USER']['system_username']
SYSTEM_PASSWORD = config['USER']['system_password']
EXCHANGE = config['USER']['exchange']
SOURCE = config['USER']['source']
INITIAL_TOTAL_KRW = int(config['USER']['initial_total_krw'])
# UPBIT
CLIENT_ID_UPBIT = config['UPBIT']['access_key']
CLIENT_SECRET_UPBIT = config['UPBIT']['secret_key']
# BINANCE
API_KEY_BINANCE = config['BINANCE']['api_key']
SECRET_KEY_BINANCE = config['BINANCE']['secret_key']
#TELEGRAM
TELEGRAM_API_ID = config['TELEGRAM']['api_id']
TELEGRAM_API_HASH = config['TELEGRAM']['api_hash']
TELEGRAM_APP_TITLE = config['TELEGRAM']['app_title']
#SLACK
PUSH_SLACK_MESSAGE = config.getboolean('SLACK', 'push_slack_message')
SLACK_WEBHOOK_URL_1 = config['SLACK']['webhook_url_1']
SLACK_WEBHOOK_URL_2 = config['SLACK']['webhook_url_2']
#GOOGLE
GOOGLE_APP_PASSWORD = config['GOOGLE']['app_password']
#TRAIN
NUM_EPOCHS = int(config['TRAIN']['num_epochs'])
#DATA
WINDOW_SIZE = int(config['DATA']['window_size'])
FUTURE_TARGET_SIZE = int(config['DATA']['future_target_size'])
UP_RATE = float(config['DATA']['up_rate'])
#INPUT_SIZE = 125 # 1 (daily_base_timestamp) + 30 (ask_price) + 30 (ask_price_btc) + 30 (bid_price) + 30 (bid_price_btc) + 2 (total ask, total bid) + 2 (total_btc ask, total_btc bid)
INPUT_SIZE = 63 # 1 (daily_base_timestamp) + 15 (ask_price) + 15 (ask_price_btc) + 15 (bid_price) + 15 (bid_price_btc) + 2 (total ask, total bid)
VERBOSE = True
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
SLACK = PushSlack(SLACK_WEBHOOK_URL_1, SLACK_WEBHOOK_URL_2)
#EVALUATION
MIN_VALID_LOSS_THRESHOLD = float(config['EVALUATION']['min_valid_loss_threshold'])
LAST_VALID_ACCURACY_THRESHOLD = float(config['EVALUATION']['last_valid_accuracy_threshold'])
LAST_SAVE_EPOCH_THRESHOLD = int(config['EVALUATION']['last_save_epoch_threshold'])
ONE_RATE_VALID_THRESHOLD = float(config['EVALUATION']['one_rate_valid_threshold'])
VALID_SIZE_THRESHOLD = int(config['EVALUATION']['valid_size_threshold'])
BUY_PROB_THRESHOLD = float(config['EVALUATION']['buy_prob_threshold'])
#SELL
BUY_CONTROL_CONSTANT = float(config['BUY_SELL']['buy_control_constant'])
BANNED_BUY_COIN_LIST = ast.literal_eval(config['BUY_SELL']['banned_buy_coin_list'])
SELL_RATE = float(config['BUY_SELL']['sell_rate'])
DOWN_FORCE_SELL_RATE = float(config['BUY_SELL']['down_force_sell_rate'])
TRANSACTION_FEE_RATE = float(config['BUY_SELL']['transaction_fee_rate'])
SELL_PERIOD = int(config['BUY_SELL']['sell_period'])
UP_TRAIL_COUNT_BOUND = int(config['BUY_SELL']['up_trail_count_bound'])
#PULL_MODELS
REMOTE_SOURCE_HOST = config['PULL_MODELS']['remote_source_host']
REMOTE_SOURCE = config['PULL_MODELS']['remote_source']
SSH_SCP_SOURCE_PORT = config['PULL_MODELS']['ssh_scp_source_port']
SSH_SCP_SOURCE_ID = config['PULL_MODELS']['ssh_scp_source_id']
SSH_SCP_SOURCE_PASSWORD = config['PULL_MODELS']['ssh_scp_source_password']
LOCAL_TARGET = config['PULL_MODELS']['local_target']
#PUSH_MODELS
IS_PUSH_AFTER_MAKE_MODELS = config.getboolean('PUSH_MODELS', 'is_push_after_make_models')
REMOTE_TARGET_HOST = config['PUSH_MODELS']['remote_target_host']
REMOTE_TARGET = config['PUSH_MODELS']['remote_target']
SSH_SCP_TARGET_PORT = config['PUSH_MODELS']['ssh_scp_target_port']
SSH_SCP_TARGET_ID = config['PUSH_MODELS']['ssh_scp_target_id']
SSH_SCP_TARGET_PEM_FILE_PATH = config['PUSH_MODELS']['ssh_scp_target_pem_file_path']
LOCAL_MODEL_SOURCE = config['PUSH_MODELS']['local_model_source']
#RL
MAX_TRADING_SESSION = config['RL']['max_trading_session']
#WEB
WEB_DEBUG = config['WEB']['web_debug']
#MYSQL
MYSQL_ROOT_PASSWORD = config['MYSQL']['root_password']
MYSQL_ID = config['MYSQL']['mysql_id']
MYSQL_PASSWORD = config['MYSQL']['mysql_password']
MYSQL_HOST = config['MYSQL']['mysql_host']
#MYSQL_NAVER
NAVER_MYSQL_ID = config['MYSQL_NAVER']['mysql_id']
NAVER_MYSQL_PASSWORD = config['MYSQL_NAVER']['mysql_password']
NAVER_MYSQL_HOST = config['MYSQL_NAVER']['mysql_host']
S3_BUCKET_NAME = 'invest-thinkonweb' | 34.447552 | 182 | 0.760455 |
1966911a3e9b1d962875a466c17e1aaf3dddf6dd | 9,700 | py | Python | urba.py | VoytRuslan/The-Best-Program-Of-Afisha | 97ab867620719ce26e32b014347c63a0f0b58f40 | [
"Apache-2.0"
] | null | null | null | urba.py | VoytRuslan/The-Best-Program-Of-Afisha | 97ab867620719ce26e32b014347c63a0f0b58f40 | [
"Apache-2.0"
] | null | null | null | urba.py | VoytRuslan/The-Best-Program-Of-Afisha | 97ab867620719ce26e32b014347c63a0f0b58f40 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#from parserFirst import parser1
from functools import partial
from PIL import ImageTk, Image # $ pip install pillow
import webbrowser
#from parserSecond import parser2
from urllib.request import urlopen
import requests, html2text, bs4
try:
# Tkinter for Python 2.xx
import Tkinter as tk
except ImportError:
# Tkinter for Python 3.xx
import tkinter as tk
def siteOpen1(y):
webbrowser.open_new('https://www.culture.ru/' + y)
def siteOpen2(y):
webbrowser.open_new('https://gorodzovet.ru' + y)
def splitter(text):
s = []
k = ''
p = ''
for i in range(len(text)):
k += text[i]
if text[i] == ' ':
iter =i
p = k
if i % 20 == 0 and i > 0:
s.append(p)
k = text[iter + 1:i + 1]
p = ''
s.append(k)
return '\n'.join(s)
def parser1():
buttons = []
def listTitle():
#print(s28)
for i in range(0,len(title)):
if title[i] != '':
#print(title[i])
buttons.append(title[i])
#scrollbar.config( command = b1.yview )
#scrollbar = Scrollbar(root)
#scrollbar.pack( side = RIGHT, fill = Y )
s = requests.get('https://www.culture.ru/afisha/izhevsk')
b = bs4.BeautifulSoup(s.text, 'html.parser')
t = b.select('.entity-card_title')
images = b.select('.thumbnail')
#print(images)
#print(t)
s = ''.join(list(map(str, t)))
imagesList = ''.join(list(map(str, images)))
siteList = []
a = False
for i in range(len(s)):
if s[i:i+7] == 'events/':
a = True
k = ''
if a:
if s[i] == '"':
siteList.append(k)
a = False
else:
k += s[i]
#print(siteList)
#t = html2text.HTML2Text().handle(s.text)
for i in range(len(t)):
y = i
title = t[i].getText().split('\n')
listTitle()
imagesList = ''.join(list(map(str, images)))
a = False
photos = []
for i in range(len(imagesList)):
if imagesList[i:i + 5] == 'src="':
a = True
k = ''
count = 0
if a:
if imagesList[i] == '"':
count += 1
if count == 2:
a = False
photos.append(k[5:])
else:
k += imagesList[i]
return list(splitter(i) for i in buttons), siteList, photos
def parser2():
def listTitle():
#print(s28)
for i in range(0,len(title)):
if title[i] != '':
buttons.append(title[i])
#scrollbar.config( command = b1.yview )
#scrollbar = Scrollbar(root)
#scrollbar.pack( side = RIGHT, fill = Y )
buttons = []
s = requests.get('https://gorodzovet.ru/izhevsk/')
b = bs4.BeautifulSoup(s.text, 'html.parser')
t = b.select('.eventBox__title')
silka = b.select('.eventBox')
s = ''.join(list(map(str, silka)))
siteList = []
photos=[]
a = False
phurl = False
for i in range(len(s)):
if s[i:i+2] == '//':
phurl = True
p= ''
if phurl:
if s[i] == '"':
photos.append('https:' + p)
phurl= False
else:
p += s[i]
if s[i:i+9] == '/izhevsk/':
a = True
count = 0
k = ''
if a:
if s[i] == '/':
count += 1
if count == 3:
a = False
continue
if s[i] == '"':
siteList.append(k)
a = False
else:
k += s[i]
#t = html2text.HTML2Text().handle(s.text)
for i in range(len(t)):
y = i
title = t[i].getText().split('\n')
listTitle()
return list(splitter(i) for i in buttons), siteList, photos
APP_TITLE = "THE BEST PROGRAMM EVER OF AFISHA"
APP_XPOS = 0
APP_YPOS = 0
NUM_OF_BUTTONS = 20
a = parser1()
buttons1, site1, photos1 = a[0], a[1], a[2]
b = parser2()
buttons2, site2, photos2 = b[0], b[1], b[2]
#print(parser1())
class Application(tk.Frame):
def __init__(self, master, **options):
global X
X = master.winfo_screenwidth()
self.master = master
self.master.protocol("WM_DELETE_WINDOW", self.close)
tk.Frame.__init__(self, master, **options)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.canvas = tk.Canvas(self, bg='steelblue', highlightthickness=0)
self.canvas.grid(row=0, column=0, sticky='wesn')
self.yscrollbar = tk.Scrollbar(self, orient="vertical",
width=14, command=self.canvas.yview)
self.yscrollbar.grid(row=0, column=1, sticky='ns')
self.xscrollbar = tk.Scrollbar(self, orient="horizontal",
width=14, command=self.canvas.xview)
self.xscrollbar.grid(row=1, column=0, sticky='we')
self.canvas.configure(
xscrollcommand=self.xscrollbar.set,
yscrollcommand=self.yscrollbar.set)
self.button_frame = tk.Frame(self.canvas, bg=self.canvas['bg'])
self.button_frame.pack()
self.canvas.create_window((0,0), window=self.button_frame, anchor="nw")
c = 0
r = 1
self.BUTTONS1 = []
for button in buttons1 + buttons2:
if button in buttons1:
'''
im = Image.open(urlopen(photos1[buttons1.index(button)]))
im = im.resize((400, 160))
im.save('Foto.png')
image1 = ImageTk.PhotoImage(file='Foto.png')'''
button = tk.Button(self.button_frame, text=button,
highlightthickness=0, bg='#6b6262', fg = '#ffde27', font = 'Verdana 15',width = round(X) // 60, padx=round(X) // 15, height = 7, command=lambda x = site1[buttons1.index(button)]: siteOpen1(x))
self.BUTTONS1.append(button)
# button.config(image=image1)
button.grid(row = r,column= c)
# button.image = image1
self.bind_mouse_scroll(button, self.yscroll)
else:
'''
im = Image.open(urlopen(photos2[buttons2.index(button)]))
im = im.resize((400, 160))
im.save('Foto.png')
image1 = ImageTk.PhotoImage(file='Foto.png')'''
button = tk.Button(self.button_frame, text=button,
highlightthickness=0, bg='#6b6262', fg = '#ffde27', font = 'Verdana 15',width = round(X) // 60, padx=round(X) // 15, height = 7, command=lambda x = site2[buttons2.index(button)]: siteOpen2(x))
button.grid(row = r, column = c)
# button.image=image1
# button.bind("<Enter>", view)
self.bind_mouse_scroll(button, self.yscroll)
c+=1
if c % 3 == 0:
r +=1
c=0
self.canvas.bind('<Configure>', self.update)
self.bind_mouse_scroll(self.canvas, self.yscroll)
self.bind_mouse_scroll(self.xscrollbar, self.xscroll)
self.bind_mouse_scroll(self.yscrollbar, self.yscroll)
self.bind_mouse_scroll(self.button_frame, self.yscroll)
#self.canvas.focus_set()
def bind_mouse_scroll(self, parent, mode):
#~~ Windows only
parent.bind("<MouseWheel>", mode)
#~~ Unix only
parent.bind("<Button-4>", mode)
parent.bind("<Button-5>", mode)
def yscroll(self, event):
if event.num == 5 or event.delta < 0:
self.canvas.yview_scroll(1, "unit")
elif event.num == 4 or event.delta > 0:
self.canvas.yview_scroll(-1, "unit")
def xscroll(self, event):
if event.num == 5 or event.delta < 0:
self.canvas.xview_scroll(1, "unit")
elif event.num == 4 or event.delta > 0:
self.canvas.xview_scroll(-1, "unit")
def update(self, event):
if self.canvas.bbox('all') != None:
region = self.canvas.bbox('all')
self.canvas.config(scrollregion=region)
def button_callback(self, button):
pass
#print(button)
def close(self):
#print("Application-Shutdown")
self.master.destroy()
def main():
app_win = tk.Tk()
app_win.iconbitmap(r'icona.ico')
'''
def start():
app = Application(app_win)
app.pack(fill='both', expand=True)
bt1.destroy()
fonLabel.destroy()
'''
X = app_win.winfo_screenwidth()
Y = app_win.winfo_screenheight()
app_win.title(APP_TITLE)
app_win.geometry("+{}+{}".format(0, 0))
app_win.geometry("{}x{}".format(round(X), round(Y)))
'''
fonbg = tk.PhotoImage(file='fon.gif')
fonLabel = tk.Label(app_win, image=fonbg)
fonLabel.pack()
bt1 = tk.Button(app_win, text='Start', font='Verdana 18', command = start)
bt1.pack()
'''
app = Application(app_win)
app.pack(fill='both', expand=True)
app_win.mainloop()
if __name__ == '__main__':
main()
| 32.881356 | 209 | 0.501443 |
449ad4eb8e518d71a9f4da4e00897af24457ab5a | 821 | py | Python | configs/sefa_mstar.py | McHz1s/genforce | c808edc5fe897c27076c4588de3466669399e5e6 | [
"MIT"
] | null | null | null | configs/sefa_mstar.py | McHz1s/genforce | c808edc5fe897c27076c4588de3466669399e5e6 | [
"MIT"
] | null | null | null | configs/sefa_mstar.py | McHz1s/genforce | c808edc5fe897c27076c4588de3466669399e5e6 | [
"MIT"
] | null | null | null | # python3.7
"""Configuration for training StyleGAN on FF-HQ (256) dataset.
All settings are particularly used for one replica (GPU), such as `batch_size`
and `num_workers`.
"""
viz_size = 128
gpus = '2'
work_dir = '/data3/lyz/cache/sefa'
checkpoint_path = '/data3/lyz/cache/genforce/stylegan_mstar_24z_degree1-2/2021-4-14-16-48-35/checkpoint_iter140000.pth'
gt_data_cfg = dict(root_dir='/data3/lyz/dataset/mstar/TRAINT72_132INF.MAT',
degree_interval_list=[[0, 90]])
generator_config = dict(
gan_type='stylegan',
resolution=128,
z_space_dim=24, w_space_dim=24,
image_channels=1, final_sigmoid=True
)
sefa_cfg = dict(num_samples=4, num_semantics=2, start_distance=-6, end_distance=6, step=118, seed_range=[0, 1, 2, 3],
trunc_psi=1.0, trunc_layers=0, layer_idx='all')
| 32.84 | 119 | 0.714982 |
52481849a6d206347c8fcf75b4635bc6edcd6d37 | 3,015 | py | Python | benchmark/startCirq2553.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2553.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2553.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=35
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[1])) # number=26
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=32
c.append(cirq.X.on(input_qubit[3])) # number=33
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=34
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=20
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=23
c.append(cirq.Z.on(input_qubit[3])) # number=24
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=25
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=22
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.Z.on(input_qubit[3])) # number=28
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=13
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=17
c.append(cirq.X.on(input_qubit[3])) # number=18
c.append(cirq.rx(-3.1101767270538954).on(input_qubit[1])) # number=27
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=19
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=15
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[1])) # number=30
c.append(cirq.X.on(input_qubit[1])) # number=31
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2553.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 36.325301 | 77 | 0.684577 |
3f35ffe0f09ab1087dd1f014e2da04dc6e8a765c | 15,140 | py | Python | examples/lightning_base.py | gp201/transformers | 89f2781e87e92b04303f7f128107718e44e755ed | [
"Apache-2.0"
] | 12 | 2021-09-13T18:31:09.000Z | 2022-03-31T12:10:28.000Z | examples/lightning_base.py | gp201/transformers | 89f2781e87e92b04303f7f128107718e44e755ed | [
"Apache-2.0"
] | 5 | 2021-12-01T04:34:07.000Z | 2022-01-28T08:28:18.000Z | examples/lightning_base.py | gp201/transformers | 89f2781e87e92b04303f7f128107718e44e755ed | [
"Apache-2.0"
] | 3 | 2022-01-18T10:56:05.000Z | 2022-01-28T01:46:43.000Z | import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
import pkg_resources
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
logger = logging.getLogger(__name__)
try:
pkg = "pytorch_lightning"
min_ver = "1.0.4"
pkg_resources.require(f"{pkg}>={min_ver}")
except pkg_resources.VersionConflict:
logger.warning(
f"{pkg}>={min_ver} is required for a normal functioning of this module, but found {pkg}=={pkg_resources.get_distribution(pkg).version}. Try pip install -r examples/requirements.txt"
)
MODEL_MODES = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeq2SeqLM,
"translation": AutoModelForSeq2SeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
arg_to_scheduler = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
arg_to_scheduler_choices = sorted(arg_to_scheduler.keys())
arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class BaseTransformer(pl.LightningModule):
def __init__(
self,
hparams: argparse.Namespace,
num_labels=None,
mode="base",
config=None,
tokenizer=None,
model=None,
**config_kwargs
):
"""Initialize a model, tokenizer and config."""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
else:
self.config: PretrainedConfig = config
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.model_type = MODEL_MODES[mode]
if model is None:
self.model = self.model_type.from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps()
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
optimizer = Adafactor(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
)
else:
optimizer = AdamW(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return [optimizer], [scheduler]
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
def total_steps(self) -> int:
"""The number of total training steps that will be run. Used for lr scheduler purposes."""
num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def setup(self, mode):
if mode == "test":
self.dataset_size = len(self.test_dataloader().dataset)
else:
self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
self.dataset_size = len(self.train_dataloader().dataset)
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False):
raise NotImplementedError("You must implement this for your task")
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("best_tfmr")
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--encoder_layerdrop",
type=float,
help="Encoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--decoder_layerdrop",
type=float,
help="Decoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--dropout",
type=float,
help="Dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--attention_dropout",
type=float,
help="Attention dropout probability (Optional). Goes into model.config",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler",
default="linear",
choices=arg_to_scheduler_choices,
metavar=arg_to_scheduler_metavar,
type=str,
help="Learning rate scheduler",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
parser.add_argument("--adafactor", action="store_true")
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lrs)
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Validation results *****")
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Test results *****")
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def add_generic_args(parser, root_dir) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O2",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps",
dest="accumulate_grad_batches",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
def generic_train(
model: BaseTransformer,
args: argparse.Namespace,
early_stopping_callback=None,
logger=True, # can pass WandbLogger() here
extra_callbacks=[],
checkpoint_callback=None,
logging_callback=None,
**extra_train_kwargs
):
pl.seed_everything(args.seed)
# init model
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
# add custom checkpoints
if checkpoint_callback is None:
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1
)
if early_stopping_callback:
extra_callbacks.append(early_stopping_callback)
if logging_callback is None:
logging_callback = LoggingCallback()
train_params = {}
# TODO: remove with PyTorch 1.6 since pl uses native amp
if args.fp16:
train_params["precision"] = 16
train_params["amp_level"] = args.fp16_opt_level
if args.gpus > 1:
train_params["distributed_backend"] = "ddp"
train_params["accumulate_grad_batches"] = args.accumulate_grad_batches
trainer = pl.Trainer.from_argparse_args(
args,
weights_summary=None,
callbacks=[logging_callback] + extra_callbacks,
logger=logger,
checkpoint_callback=checkpoint_callback,
**train_params,
)
if args.do_train:
trainer.fit(model)
return trainer
| 38.040201 | 189 | 0.648547 |
1e4951e85c3c45fe7ac687f04406ed42b48603bf | 2,703 | py | Python | examples/round_lcd_gc9a01.py | charkster/u2if | ceab7e038435fc212641ecc35683fd05523dd5cd | [
"BSD-3-Clause"
] | 79 | 2021-04-18T14:03:24.000Z | 2022-03-27T06:03:44.000Z | examples/round_lcd_gc9a01.py | charkster/u2if | ceab7e038435fc212641ecc35683fd05523dd5cd | [
"BSD-3-Clause"
] | 10 | 2021-04-21T20:28:42.000Z | 2022-03-19T21:47:15.000Z | examples/round_lcd_gc9a01.py | charkster/u2if | ceab7e038435fc212641ecc35683fd05523dd5cd | [
"BSD-3-Clause"
] | 13 | 2021-04-18T14:32:25.000Z | 2022-03-26T01:22:48.000Z | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Round lcd GC9A01 (240x240) example, from Waveshare team. Modified to work on micropython
#import chardet
import os
import sys
import time
import logging
from external.gc9a01 import Lcd
from PIL import Image, ImageDraw,ImageFont
from machine import SPI, Pin, u2if
try:
# display with hardware SPI:
''' Warning!!!Don't creation of multiple displayer objects!!! '''
#disp = LCD_1inch28.LCD_1inch28(spi=SPI.SpiDev(bus, device),spi_freq=10000000,rst=RST,dc=DC,bl=BL)
spi = SPI(spi_index=0)
spi.init(baudrate=1000000)
rst = Pin(u2if.GP6)
dc = Pin(u2if.GP7)
cs = Pin(u2if.GP8)
bl = Pin(u2if.GP9)
disp = Lcd(spi, dc, rst, cs, bl, bl_freq=1000)
# Initialize library.
disp.Init()
disp.bl_DutyCycle(65000)
# Clear display.
disp.clear()
# Create blank image for drawing.
image1 = Image.new("RGB", (disp.width, disp.height), "BLACK")
draw = ImageDraw.Draw(image1)
#logging.info("draw point")
#draw.rectangle((Xstart,Ystart,Xend,Yend), fill = "color")
print("draw circle")
draw.arc((1,1,239,239),0, 360, fill =(0,0,255))
draw.arc((2,2,238,238),0, 360, fill =(0,0,255))
draw.arc((3,3,237,237),0, 360, fill =(0,0,255))
print("draw dial line")
draw.line([(120, 1),(120, 12)], fill = (128,255,128),width = 4)
draw.line([(120, 227),(120, 239)], fill = (128,255,128),width = 4)
draw.line([(1,120),(12,120)], fill = (128,255,128),width = 4)
draw.line([(227,120),(239,120)], fill = (128,255,128),width = 4)
# print("draw text")
Font1 = ImageFont.truetype("./external/ressources/Font/Font01.ttf",25)
Font2 = ImageFont.truetype("./external/ressources/Font/Font01.ttf",35)
Font3 = ImageFont.truetype("./external/ressources/Font/Font02.ttf",32)
#
draw.text((40, 50), 'WaveShare', fill = (128,255,128),font=Font2)
text= u"微雪电子"
draw.text((74, 150),text, fill = "WHITE",font=Font3)
print("draw pointer line")
draw.line([(120, 120),(70, 70)], fill = "YELLOW",width = 3)
draw.line([(120, 120),(176, 64)], fill = "BLUE",width = 3)
draw.line([(120, 120),(120 ,210)], fill = "RED",width = 3)
# im_r=image1.rotate(180)
# disp.ShowImage(im_r)
disp.ShowImage(image1)
time.sleep(3)
print("show image")
image = Image.open('./external/ressources/pic/LCD_1inch28_1.jpg')
#im_r=image.rotate(180)
#disp.ShowImage(im_r)
disp.ShowImage(image)
time.sleep(3)
#disp.module_exit()
#logging.info("quit:")
print("quit")
except IOError as e:
#logging.info(e)
print(e)
except KeyboardInterrupt:
disp.module_exit()
#logging.info("quit:")
print("quit:")
exit()
| 31.430233 | 102 | 0.630041 |
7cb7921508ec3cc126cd9fd0b6d91bade887ecf5 | 1,985 | py | Python | tools/src/main/python/dlpx/virtualization/_internal/commands/upload.py | ankursarin/virtualization-sdk | 28d600ab9abcaa77161f4d303ae169f907bcc096 | [
"Apache-2.0"
] | null | null | null | tools/src/main/python/dlpx/virtualization/_internal/commands/upload.py | ankursarin/virtualization-sdk | 28d600ab9abcaa77161f4d303ae169f907bcc096 | [
"Apache-2.0"
] | null | null | null | tools/src/main/python/dlpx/virtualization/_internal/commands/upload.py | ankursarin/virtualization-sdk | 28d600ab9abcaa77161f4d303ae169f907bcc096 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019 by Delphix. All rights reserved.
#
import errno
import json
import logging
import os
from dlpx.virtualization._internal import delphix_client, exceptions
logger = logging.getLogger(__name__)
UNKNOWN_ERR = 'UNKNOWN_ERR'
def upload(engine, user, upload_artifact, password):
"""
Takes in the engine hostname/ip address, logs on and uploads the artifact
passed in. The upload artifact should have been generated via the build
command from a plugin. The file is expected to contain the delphix api
version as well as be in the json format. During the process, print any
errors that occur cleanly.
Raises specifically:
UserError
InvalidArtifactError
HttpError
UnexpectedError
"""
logger.debug('Upload parameters include'
' engine: {},'
' user: {},'
' upload_artifact: {}'.format(engine, user, upload_artifact))
logger.info('Uploading plugin artifact {} ...'.format(upload_artifact))
# Read content of upload artifact
try:
with open(upload_artifact, 'rb') as f:
try:
content = json.load(f)
except ValueError:
raise exceptions.UserError(
'Upload failed because the upload artifact was not a valid'
' json file. Verify the file was built using the delphix'
' build command.')
except IOError as err:
raise exceptions.UserError(
'Unable to read upload artifact file \'{}\''
'\nError code: {}. Error message: {}'.format(
upload_artifact, err.errno,
errno.errorcode.get(err.errno, UNKNOWN_ERR)))
# Create a new delphix session.
client = delphix_client.DelphixClient(engine)
engine_api = client.get_engine_api(content)
client.login(engine_api, user, password)
client.upload_plugin(os.path.basename(upload_artifact), content)
| 34.224138 | 79 | 0.641814 |
b18bc3ffcd9ffe2b338595a73ef1ddf1ce69f634 | 14,156 | py | Python | fact/storage/__init__.py | unicornunicode/FACT | fcdda7e7a2f6615d4b4cdb2afcdd230194bdc935 | [
"MIT"
] | 17 | 2021-11-24T14:20:02.000Z | 2022-03-22T11:45:44.000Z | fact/storage/__init__.py | unicornunicode/FACT | fcdda7e7a2f6615d4b4cdb2afcdd230194bdc935 | [
"MIT"
] | null | null | null | fact/storage/__init__.py | unicornunicode/FACT | fcdda7e7a2f6615d4b4cdb2afcdd230194bdc935 | [
"MIT"
] | 2 | 2022-01-26T02:14:18.000Z | 2022-01-31T20:06:39.000Z | from .types import ArtifactType
from fact.exceptions import (
TaskExistsError,
TaskNotFoundError,
TaskInvalidUUID,
ArtifactExistsError,
ArtifactNotFoundError,
ArtifactInvalidName,
ArtifactInvalidType,
)
from pathlib import Path
from uuid import UUID
from typing import BinaryIO, List, Union
import logging
log = logging.getLogger(__name__)
# TODO: Rewrite the storage API to always produce writestreams, for
# compatibility with S3 buckets
class Artifact:
"""
Stores information about an artifact
"""
def __init__(self, artifact_name: str = "", artifact_type: str = "") -> None:
"""Initialises an Artifact object
:param artifact_name: Name of artifact
:param artifact_type: Type of artifact
:raises:
ArtifactInvalidName: Invalid name. Cannot be empty.
ArtifactInvalidType: Invalid artifact type, needs to be found in ArtifactType
"""
if not artifact_name:
raise ArtifactInvalidName("Invalid empty name", artifact_name)
if not artifact_type:
artifact_type = ArtifactType.unknown.name
elif not Artifact.is_valid_artifact_type(artifact_type):
valid_types = "{" + ", ".join(ArtifactType.__members__.keys()) + "}"
err_msg = f"Invalid artifact type. Select from: {valid_types}"
raise ArtifactInvalidType(err_msg, artifact_type)
self.artifact_name = artifact_name
self.artifact_type = ArtifactType[artifact_type]
def get_artifact_type(self) -> str:
"""Gets artifact type of instance
:return: Artifact type
"""
return self.artifact_type.name
def get_artifact_path(self) -> tuple[Path, Path]:
"""Gets artifact path of instance
:return: (Artifact path, Artifact name) ->
( {artifact_type}, {artifact_name} )
"""
artifact_type = self.get_artifact_type()
artifact_path = Path(artifact_type)
return artifact_path, Path(self.artifact_name)
def get_artifact_info(self) -> dict[str, str]:
"""Gets artifact info of instance
:return: Artifact info (Attributes of instance)
"""
artifact_type: str = self.get_artifact_type()
return {"artifact_name": self.artifact_name, "artifact_type": artifact_type}
@staticmethod
def is_valid_artifact_name(artifact_name: str) -> bool:
"""Checks if artifact name is not empty
:param artifact_name: Name of artifact
:return: Validation result
"""
return bool(artifact_name)
@staticmethod
def is_valid_artifact_type(artifact_type: str) -> bool:
"""Checks if artifact type exists in ArtifactType
:param artifact_type: Type of artifact
:return: Validation result
"""
return artifact_type in ArtifactType.__members__
class Task:
"""
Stores information about a task
"""
def __init__(self, task_uuid_str: str = "") -> None:
"""Initialises a Task object
:param task_uuid_str: UUID string of task
:raises:
TaskInvalidUUID: Invalid task UUID
"""
if not Task.is_valid_uuid(task_uuid_str):
raise TaskInvalidUUID("Invalid Task UUID", task_uuid_str)
self.task_uuid = UUID(task_uuid_str)
self.artifacts: list[Artifact] = []
def get_artifact(self, artifact_info: dict) -> Union[Artifact, None]:
"""Gets artifact matching the artifact info in instance
:param artifact_info: Artifact info
:return: The artifact matched or None
"""
for a in self.artifacts:
if a.get_artifact_info() == artifact_info:
return a
return None
def is_artifact_exists(self, artifact_info: dict) -> bool:
"""Checks if artifact exists in instance based on artifact info
:param artifact_info: Artifact info
:return: Whether the artifact exists
"""
artifact: Union[Artifact, None] = self.get_artifact(artifact_info)
if artifact is not None:
return True
return False
def add_artifact(self, artifact: Artifact) -> None:
"""Adds artifact to instance if it is does not exist already
:param artifact: The artifact to add to instance
:raises:
ArtifactExistsError: Artifact exists already
"""
artifact_info: dict = artifact.get_artifact_info()
if self.is_artifact_exists(artifact_info):
raise ArtifactExistsError("Artifact exists already", artifact_info)
self.artifacts.append(artifact)
def get_task_uuid(self) -> str:
"""Gets task UUID of instance
:returns: Task UUID
"""
return str(self.task_uuid)
def get_task_path(self) -> Path:
"""Gets task path of instance
:returns: Task path
"""
task_uuid: str = self.get_task_uuid()
return Path(task_uuid)
def _get_artifacts(self) -> list[dict]:
"""Gets list of artifact info in task instance
:returns: List of artifact info
"""
artifacts: list[dict] = []
for artifact in self.artifacts:
artifacts.append(artifact.get_artifact_info())
return artifacts
def get_task_info(self) -> dict:
"""Gets task info of instance
:returns: Task info (Attributes of instance)
"""
task_uuid = self.get_task_uuid()
artifacts = self._get_artifacts()
return {"task_uuid": task_uuid, "artifacts": artifacts}
@staticmethod
def is_valid_uuid(uuid_str: str) -> bool:
"""Checks if UUID string is valid
:param uuid_str: UUID of task
:return: Validation result
"""
try:
UUID(uuid_str)
except ValueError:
return False
else:
return True
class Storage:
"""
Manages the file system and maintains a structure with
Task and Artifact objects
Structure:
Storage
|__ Task
|__ Artifact
|__ Artifact
|__ Task
"""
def __init__(self, data_dir: Path) -> None:
"""Initialises a Storage object
:param data_dir: Data directory for storage
:raises:
DirectoryExistsError: Directory exists already
PermissionError: Insufficient permission to create directory
"""
self.data_dir = data_dir
self.tasks: List[Task] = []
if self.data_dir.exists():
log.info("Existing directory found. Attempting to restore Storage.")
self._restore_storage()
else:
try:
self.data_dir.mkdir(parents=True, exist_ok=True)
except PermissionError as e:
raise e
def get_task(self, task_uuid: str) -> Union[Task, None]:
"""Gets task of instance matching the task UUID
:param task_uuid: Task UUID
:return: The task matched or None
"""
for t in self.tasks:
if t.get_task_uuid() == task_uuid:
return t
return None
def is_task_uuid_exists(self, task_uuid: str) -> bool:
"""Checks if task exists in instance based on task UUID
:param task_uuid: Task UUID
:return: Whether the task exists
"""
task: Union[Task, None] = self.get_task(task_uuid)
if task is not None:
return True
return False
def add_task(self, task: Task) -> None:
"""Adds task to instance if it is does not exist already
:param task: The task to add to instance
:raises:
TaskExistsError: Task exists already
"""
task_uuid: str = task.get_task_uuid()
if self.is_task_uuid_exists(task_uuid):
raise TaskExistsError("Task exists already", task.get_task_uuid())
self.tasks.append(task)
def add_task_artifact(self, task_uuid: str, artifact: Artifact) -> Path:
"""Adds artifact to a task that exists in instance already
and provides path to store in file system
:param task_uuid: UUID of task to add to instance
:param artifact: Artifact to store in instance
:returns: Artifact path to store in file system
:raises:
TaskNotFoundError: Task does not exist in instance
PermissionError: Insufficient permission to create directory
"""
task: Union[Task, None] = self.get_task(task_uuid)
if task is None:
storage_path = str(self.get_storage_path())
raise TaskNotFoundError(
f"Task does not exists in {storage_path}", task_uuid
)
task.add_artifact(artifact)
artifact_type_path, artifact_name = artifact.get_artifact_path()
artifact_path: Path = (
self.get_storage_path() / task.get_task_path() / artifact_type_path
)
if not artifact_path.exists():
try:
artifact_path.mkdir(parents=True, exist_ok=True)
except PermissionError as e:
raise e
artifact_full_path: Path = artifact_path / artifact_name
return artifact_full_path
def get_task_artifact_path(self, task_uuid: str, artifact: Artifact) -> Path:
"""Gets path to where artifact of task in instance is stored on file system
:param task_uuid: UUID of task to add to instance
:param artifact: Artifact to store in instance
:returns: Artifact path where it is store on file system
:raises:
TaskNotFoundError: Task does not exist in instance
ArtifactNotFoundError: Artifact does not exist in instance
"""
task: Union[Task, None] = self.get_task(task_uuid)
if task is None:
storage_path = str(self.get_storage_path())
raise TaskNotFoundError(
f"Task does not exists in {storage_path}", task_uuid
)
artifact_info = artifact.get_artifact_info()
task_artifact: Union[Artifact, None] = task.get_artifact(artifact_info)
if task_artifact is None:
raise ArtifactNotFoundError(
f"Artifact does not exist in {task_uuid}", artifact_info
)
artifact_type_path, artifact_name = task_artifact.get_artifact_path()
artifact_path: Path = (
self.get_storage_path()
/ task.get_task_path()
/ artifact_type_path
/ artifact_name
)
return artifact_path
def get_storage_path(self) -> Path:
"""Gets path of instance in file system
:return: The data directory of instance
"""
return self.data_dir
def get_storage_info(self) -> dict:
"""Gets storage info of instance
:returns: Storage info (Attributes of instance)
"""
data_dir: str = str(self.get_storage_path())
tasks = [task.get_task_info() for task in self.tasks]
return {"data_dir": data_dir, "tasks": tasks}
def _restore_storage(self) -> None:
"""Restores instance from files and folders in self.data_dir"""
file_paths = self.data_dir.rglob("*")
for fpath in file_paths:
pruned_fpath = fpath.relative_to(self.data_dir)
fpath_parts = pruned_fpath.parts
num_of_parts = len(fpath_parts)
if num_of_parts > 3:
log.warning(
f"Unknown folder structure. Skipping reconstruction of {pruned_fpath}."
)
continue
try:
task_uuid_str = fpath_parts[0]
task = Task(task_uuid_str)
self.add_task(task)
except TaskInvalidUUID:
log.warning(
f"Invalid task UUID: {task_uuid_str}. "
+ f"Skipping reconstruction of {pruned_fpath}."
)
continue
except TaskExistsError:
pass
if num_of_parts == 3:
_, artifact_type, artifact_name = fpath_parts
try:
artifact = Artifact(artifact_name, artifact_type)
except ArtifactInvalidName:
log.warning(
f"Invalid artifact name: {artifact_name}. "
+ f"Skipping reconstruction of {pruned_fpath}."
)
continue
except ArtifactInvalidType:
log.warning(
f"Invalid artifact type: {artifact_type}. "
+ f"Skipping reconstruction of {pruned_fpath}."
)
continue
else:
self.add_task_artifact(task_uuid_str, artifact)
class Session:
"""Provides a session to interact with storage and manage the file system"""
def __init__(self, storage: Storage, task: Task, artifact: Artifact):
self.storage = storage
self.task = task
self.artifact = artifact
self.file_io: BinaryIO
def __enter__(self):
self.setup()
return self
def __exit__(self, *exc):
self.close()
def setup(self):
"""Add self.task to self.storage and self.artifact to that task
and open Binary IO to that artifact path."""
try:
self.storage.add_task(self.task)
except TaskExistsError:
pass
task_uuid = self.task.get_task_uuid()
artifact_path = self.storage.add_task_artifact(task_uuid, self.artifact)
self.file_io = open(artifact_path, "wb")
def write(self, data: bytes):
"""Write data to self.file_io
:param data: Data to be written to artifact"""
try:
self.file_io.write(data)
except AttributeError:
raise
def close(self):
"""Close self.file_io"""
try:
self.file_io.close()
except AttributeError:
raise
| 33.465721 | 91 | 0.602501 |
59f3acfcaaa367c86f272516284c4b16a5edfa1d | 1,599 | py | Python | flask_with_spark/user/form.py | todhm/have_you_read_this_book | 905cb1934bafc987b76b6e57dbc63285f491ac88 | [
"MIT"
] | 2 | 2019-01-17T08:05:49.000Z | 2019-01-17T08:06:00.000Z | flask_with_spark/user/form.py | todhm/have_you_read_this_book | 905cb1934bafc987b76b6e57dbc63285f491ac88 | [
"MIT"
] | 8 | 2020-03-24T15:53:34.000Z | 2021-04-30T20:40:38.000Z | flask_with_spark/user/form.py | todhm/have_you_read_this_book | 905cb1934bafc987b76b6e57dbc63285f491ac88 | [
"MIT"
] | null | null | null | import bcrypt
from flask_wtf import FlaskForm
from wtforms import validators, StringField, PasswordField
from wtforms.validators import ValidationError
from wtforms.fields.html5 import EmailField
from user.models import User
duplicate_error = "Email is already in use"
class SignUpForm(FlaskForm):
email = EmailField('Email address', [
validators.DataRequired(),
validators.Email()
])
username = StringField('Username', [
validators.DataRequired(),
validators.Length(min=4, max=25)
])
password = PasswordField('New Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match'),
validators.Length(min=4, max=80)
])
confirm = PasswordField('Repeat Password')
def validate_email(form, field):
if User.objects.filter(email=field.data).first():
raise ValidationError(duplicate_error)
class LoginForm(FlaskForm):
email = StringField('Email', [
validators.DataRequired(),
validators.Length(min=4, max=25)
])
password = PasswordField('Password', [
validators.DataRequired(),
validators.Length(min=4, max=80)
])
def validate_email(form, field):
user = User.objects.filter(email=field.data).first()
if not user:
raise ValidationError("Email does not exists")
if bcrypt.hashpw(form.password.data.encode(), user.password.encode())!= user.password.encode():
raise ValidationError("Invalid Password")
| 32.632653 | 103 | 0.649781 |
7118ebf098b69be17cbac836b259787b94d247cd | 23,765 | py | Python | responsibleai/responsibleai/modelanalysis/model_analysis.py | hrisheekeshr/responsible-ai-widgets | eac619cdf9a77409210402e2da961254b1783939 | [
"MIT"
] | null | null | null | responsibleai/responsibleai/modelanalysis/model_analysis.py | hrisheekeshr/responsible-ai-widgets | eac619cdf9a77409210402e2da961254b1783939 | [
"MIT"
] | null | null | null | responsibleai/responsibleai/modelanalysis/model_analysis.py | hrisheekeshr/responsible-ai-widgets | eac619cdf9a77409210402e2da961254b1783939 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Defines the ModelAnalysis class."""
import json
import pandas as pd
import numpy as np
import pickle
from pathlib import Path
from responsibleai._internal.constants import\
ManagerNames, Metadata, SKLearn
from responsibleai._managers.causal_manager import CausalManager
from responsibleai._managers.counterfactual_manager import (
CounterfactualManager)
from responsibleai._managers.error_analysis_manager import ErrorAnalysisManager
from responsibleai._managers.explainer_manager import ExplainerManager
from responsibleai._interfaces import ModelAnalysisData, Dataset
from responsibleai._input_processing import _convert_to_list
from responsibleai.modelanalysis.constants import ModelTask
from responsibleai.exceptions import UserConfigValidationException
_DTYPES = 'dtypes'
_TRAIN = 'train'
_TEST = 'test'
_TARGET_COLUMN = 'target_column'
_TASK_TYPE = 'task_type'
_MODEL = Metadata.MODEL
_MODEL_PKL = _MODEL + '.pkl'
_SERIALIZER = 'serializer'
_CLASSES = 'classes'
_MANAGERS = 'managers'
_CATEGORICAL_FEATURES = 'categorical_features'
_TRAN_LABELS = 'train_labels'
_META_JSON = Metadata.META_JSON
class ModelAnalysis(object):
"""Defines the top-level Model Analysis API.
Use ModelAnalysis to analyze errors, explain the most important
features, compute counterfactuals and run causal analysis in a
single API.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param train: The training dataset including the label column.
:type train: pandas.DataFrame
:param test: The test dataset including the label column.
:type test: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run, can be `classification` or
`regression`.
:type task_type: str
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
:param train_labels: The class labels in the training dataset
:type train_labels: ndarray
:param serializer: Picklable custom serializer with save and load
methods defined for model that is not serializable. The save
method returns a dictionary state and load method returns the model.
:type serializer: object
"""
def __init__(self, model, train, test, target_column,
task_type, categorical_features=None, train_labels=None,
serializer=None):
"""Defines the top-level Model Analysis API.
Use ModelAnalysis to analyze errors, explain the most important
features, compute counterfactuals and run causal analysis in a
single API.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param train: The training dataset including the label column.
:type train: pandas.DataFrame
:param test: The test dataset including the label column.
:type test: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run, can be `classification` or
`regression`.
:type task_type: str
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
:param train_labels: The class labels in the training dataset
:type train_labels: ndarray
:param serializer: Picklable custom serializer with save and load
methods defined for model that is not serializable. The save
method returns a dictionary state and load method returns the
model.
:type serializer: object
"""
self._validate_model_analysis_input_parameters(
model=model, train=train, test=test,
target_column=target_column, task_type=task_type,
categorical_features=categorical_features,
train_labels=train_labels,
serializer=serializer)
self.model = model
self.train = train
self.test = test
self.target_column = target_column
self.task_type = task_type
self.categorical_features = categorical_features
self._serializer = serializer
self._train_labels = train_labels
self._classes = ModelAnalysis._get_classes(
task_type=self.task_type,
train=self.train,
target_column=self.target_column,
train_labels=self._train_labels
)
self._causal_manager = CausalManager(
train, test, target_column, task_type, categorical_features)
self._counterfactual_manager = CounterfactualManager(
model=model, train=train, test=test,
target_column=target_column, task_type=task_type,
categorical_features=categorical_features)
self._error_analysis_manager = ErrorAnalysisManager(
model, test, target_column,
categorical_features)
self._explainer_manager = ExplainerManager(
model, train, test,
target_column,
self._classes,
categorical_features=categorical_features)
self._managers = [self._causal_manager,
self._counterfactual_manager,
self._error_analysis_manager,
self._explainer_manager]
@staticmethod
def _get_classes(task_type, train, target_column, train_labels):
if task_type == ModelTask.CLASSIFICATION:
if train_labels is None:
return train[target_column].unique()
else:
return train_labels
else:
return None
def _validate_model_analysis_input_parameters(
self, model, train, test, target_column,
task_type, categorical_features=None, train_labels=None,
serializer=None):
"""
Validate the inputs for ModelAnalysis class.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param train: The training dataset including the label column.
:type train: pandas.DataFrame
:param test: The test dataset including the label column.
:type test: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run, can be `classification` or
`regression`.
:type task_type: str
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
:param train_labels: The class labels in the training dataset
:type train_labels: ndarray
:param serializer: Picklable custom serializer with save and load
methods defined for model that is not serializable. The save
method returns a dictionary state and load method returns the
model.
:type serializer: object
"""
if task_type != ModelTask.CLASSIFICATION and \
task_type != ModelTask.REGRESSION:
raise UserConfigValidationException(
'Unsupported task type. Should be one of {0} or {1}'.format(
ModelTask.CLASSIFICATION, ModelTask.REGRESSION)
)
if serializer is not None:
if not hasattr(serializer, 'save'):
raise UserConfigValidationException(
'The serializer does not implement save()')
if not hasattr(serializer, 'load'):
raise UserConfigValidationException(
'The serializer does not implement load()')
try:
pickle.dumps(serializer)
except Exception:
raise UserConfigValidationException(
'The serializer should be serializable via pickle')
if isinstance(train, pd.DataFrame) and isinstance(test, pd.DataFrame):
if len(set(train.columns) - set(test.columns)) != 0 or \
len(set(test.columns) - set(train.columns)):
raise UserConfigValidationException(
'The features in train and test data do not match')
if target_column not in list(train.columns) or \
target_column not in list(test.columns):
raise UserConfigValidationException(
'Target name {0} not present in train/test data'.format(
target_column)
)
if categorical_features is not None and \
len(categorical_features) > 0:
if target_column in categorical_features:
raise UserConfigValidationException(
'Found target name {0} in '
'categorical feature list'.format(
target_column)
)
if not set(categorical_features).issubset(set(train.columns)):
raise UserConfigValidationException(
'Found some feature names in categorical feature which'
' do not occur in train data'
)
if train_labels is not None and task_type == \
ModelTask.CLASSIFICATION:
if len(set(train[target_column].unique()) -
set(train_labels)) != 0 or \
len(set(train_labels) -
set(train[target_column].unique())) != 0:
raise UserConfigValidationException(
'The train labels and distinct values in '
'target (train data) do not match')
if len(set(test[target_column].unique()) -
set(train_labels)) != 0 or \
len(set(train_labels) -
set(test[target_column].unique())) != 0:
raise UserConfigValidationException(
'The train labels and distinct values in '
'target (test data) do not match')
if model is not None:
# Run predict() of the model
try:
small_train_data = train.iloc[0:1].drop(
[target_column], axis=1)
small_test_data = test.iloc[0:1].drop(
[target_column], axis=1)
model.predict(small_train_data)
model.predict(small_test_data)
except Exception:
raise UserConfigValidationException(
'The model passed cannot be used for'
' getting predictions via predict()'
)
# Run predict_proba() of the model
if task_type == ModelTask.CLASSIFICATION:
try:
small_train_data = train.iloc[0:1].drop(
[target_column], axis=1)
small_test_data = test.iloc[0:1].drop(
[target_column], axis=1)
model.predict_proba(small_train_data)
model.predict_proba(small_test_data)
except Exception:
raise UserConfigValidationException(
'The model passed cannot be used for'
' getting predictions via predict_proba()'
)
@property
def causal(self) -> CausalManager:
"""Get the causal manager.
:return: The causal manager.
:rtype: CausalManager
"""
return self._causal_manager
@property
def counterfactual(self) -> CounterfactualManager:
"""Get the counterfactual manager.
:return: The counterfactual manager.
:rtype: CounterfactualManager
"""
return self._counterfactual_manager
@property
def error_analysis(self) -> ErrorAnalysisManager:
"""Get the error analysis manager.
:return: The error analysis manager.
:rtype: ErrorAnalysisManager
"""
return self._error_analysis_manager
@property
def explainer(self) -> ExplainerManager:
"""Get the explainer manager.
:return: The explainer manager.
:rtype: ExplainerManager
"""
return self._explainer_manager
def compute(self):
"""Calls compute on each of the managers."""
for manager in self._managers:
manager.compute()
def list(self):
"""List information about each of the managers.
:return: Information about each of the managers.
:rtype: dict
"""
configs = {}
for manager in self._managers:
configs[manager.name] = manager.list()
return configs
def get(self):
"""List information about each of the managers.
:return: Information about each of the managers.
:rtype: dict
"""
configs = {}
for manager in self._managers:
configs[manager.name] = manager.get()
return configs
def get_data(self):
"""Get all data as ModelAnalysisData object
:return: Model Analysis Data
:rtype: ModelAnalysisData
"""
data = ModelAnalysisData()
data.dataset = self._get_dataset()
data.modelExplanationData = self.explainer.get_data()
data.errorAnalysisConfig = self.error_analysis.get_data()
data.causalAnalysisData = self.causal.get_data()
data.counterfactualData = self.counterfactual.get_data()
return data
def _get_dataset(self):
dashboard_dataset = Dataset()
dashboard_dataset.task_type = self.task_type
dashboard_dataset.class_names = _convert_to_list(
self._classes)
predicted_y = None
feature_length = None
dataset: pd.DataFrame = self.test.drop(
[self.target_column], axis=1)
if isinstance(dataset, pd.DataFrame) and hasattr(dataset, 'columns'):
self._dataframeColumns = dataset.columns
try:
list_dataset = _convert_to_list(dataset)
except Exception as ex:
raise ValueError(
"Unsupported dataset type") from ex
if dataset is not None and self.model is not None:
try:
predicted_y = self.model.predict(dataset)
except Exception as ex:
msg = "Model does not support predict method for given"
"dataset type"
raise ValueError(msg) from ex
try:
predicted_y = _convert_to_list(predicted_y)
except Exception as ex:
raise ValueError(
"Model prediction output of unsupported type,") from ex
if predicted_y is not None:
if(self.task_type == "classification" and
dashboard_dataset.class_names is not None):
predicted_y = [dashboard_dataset.class_names.index(
y) for y in predicted_y]
dashboard_dataset.predicted_y = predicted_y
row_length = 0
if list_dataset is not None:
row_length, feature_length = np.shape(list_dataset)
if row_length > 100000:
raise ValueError(
"Exceeds maximum number of rows"
"for visualization (100000)")
if feature_length > 1000:
raise ValueError("Exceeds maximum number of features for"
" visualization (1000). Please regenerate the"
" explanation using fewer features or"
" initialize the dashboard without passing a"
" dataset.")
dashboard_dataset.features = list_dataset
true_y = self.test[self.target_column]
if true_y is not None and len(true_y) == row_length:
if(self.task_type == "classification" and
dashboard_dataset.class_names is not None):
true_y = [dashboard_dataset.class_names.index(
y) for y in true_y]
dashboard_dataset.true_y = _convert_to_list(true_y)
features = dataset.columns
if features is not None:
features = _convert_to_list(features)
if feature_length is not None and len(features) != feature_length:
raise ValueError("Feature vector length mismatch:"
" feature names length differs"
" from local explanations dimension")
dashboard_dataset.feature_names = features
dashboard_dataset.target_column = self.target_column
if (self.model is not None and
hasattr(self.model, SKLearn.PREDICT_PROBA) and
self.model.predict_proba is not None and
dataset is not None):
try:
probability_y = self.model.predict_proba(dataset)
except Exception as ex:
raise ValueError("Model does not support predict_proba method"
" for given dataset type,") from ex
try:
probability_y = _convert_to_list(probability_y)
except Exception as ex:
raise ValueError(
"Model predict_proba output of unsupported type,") from ex
dashboard_dataset.probability_y = probability_y
return dashboard_dataset
def _write_to_file(self, file_path, content):
"""Save the string content to the given file path.
:param file_path: The file path to save the content to.
:type file_path: str
:param content: The string content to save.
:type content: str
"""
with open(file_path, 'w') as file:
file.write(content)
def save(self, path):
"""Save the ModelAnalysis to the given path.
:param path: The directory path to save the ModelAnalysis to.
:type path: str
"""
top_dir = Path(path)
# save each of the individual managers
for manager in self._managers:
manager._save(top_dir / manager.name)
# save current state
dtypes = self.train.dtypes.astype(str).to_dict()
self._write_to_file(top_dir / (_TRAIN + _DTYPES),
json.dumps(dtypes))
self._write_to_file(top_dir / _TRAIN, self.train.to_json())
dtypes = self.test.dtypes.astype(str).to_dict()
self._write_to_file(top_dir / (_TEST + _DTYPES),
json.dumps(dtypes))
self._write_to_file(top_dir / _TEST, self.test.to_json())
meta = {
_TARGET_COLUMN: self.target_column,
_TASK_TYPE: self.task_type,
_CATEGORICAL_FEATURES: self.categorical_features,
_TRAN_LABELS: self._train_labels
}
with open(top_dir / _META_JSON, 'w') as file:
json.dump(meta, file)
if self._serializer is not None:
model_data = self._serializer.save(self.model)
# save the serializer
with open(top_dir / _SERIALIZER, 'wb') as file:
pickle.dump(self._serializer, file)
# save the model
self._write_to_file(top_dir / _MODEL_PKL, model_data)
else:
has_setstate = hasattr(self.model, '__setstate__')
has_getstate = hasattr(self.model, '__getstate__')
if not (has_setstate and has_getstate):
raise ValueError(
"Model must be picklable or a custom serializer must"
" be specified")
with open(top_dir / _MODEL_PKL, 'wb') as file:
pickle.dump(self.model, file)
@staticmethod
def load(path):
"""Load the ModelAnalysis from the given path.
:param path: The directory path to load the ModelAnalysis from.
:type path: str
"""
# create the ModelAnalysis without any properties using the __new__
# function, similar to pickle
inst = ModelAnalysis.__new__(ModelAnalysis)
top_dir = Path(path)
# load current state
with open(top_dir / (_TRAIN + _DTYPES), 'r') as file:
types = json.load(file)
with open(top_dir / _TRAIN, 'r') as file:
train = pd.read_json(file, dtype=types)
inst.__dict__[_TRAIN] = train
with open(top_dir / (_TEST + _DTYPES), 'r') as file:
types = json.load(file)
with open(top_dir / _TEST, 'r') as file:
test = pd.read_json(file, dtype=types)
inst.__dict__[_TEST] = test
with open(top_dir / _META_JSON, 'r') as meta_file:
meta = meta_file.read()
meta = json.loads(meta)
inst.__dict__[_TARGET_COLUMN] = meta[_TARGET_COLUMN]
inst.__dict__[_TASK_TYPE] = meta[_TASK_TYPE]
inst.__dict__[_CATEGORICAL_FEATURES] = meta[_CATEGORICAL_FEATURES]
inst.__dict__['_' + _TRAN_LABELS] = meta[_TRAN_LABELS]
inst.__dict__['_' + _CLASSES] = ModelAnalysis._get_classes(
task_type=meta[_TASK_TYPE],
train=train,
target_column=meta[_TARGET_COLUMN],
train_labels=meta[_TRAN_LABELS]
)
serializer_path = top_dir / _SERIALIZER
if serializer_path.exists():
with open(serializer_path) as file:
serializer = pickle.load(file)
inst.__dict__['_' + _SERIALIZER] = serializer
with open(top_dir / _MODEL_PKL, 'rb') as file:
inst.__dict__[_MODEL] = serializer.load(file)
else:
inst.__dict__['_' + _SERIALIZER] = None
with open(top_dir / _MODEL_PKL, 'rb') as file:
inst.__dict__[_MODEL] = pickle.load(file)
# load each of the individual managers
managers = []
cm_name = '_' + ManagerNames.CAUSAL + '_manager'
causal_dir = top_dir / ManagerNames.CAUSAL
causal_manager = CausalManager._load(causal_dir, inst)
inst.__dict__[cm_name] = causal_manager
managers.append(causal_manager)
cfm_name = '_' + ManagerNames.COUNTERFACTUAL + '_manager'
cf_dir = top_dir / ManagerNames.COUNTERFACTUAL
counterfactual_manager = CounterfactualManager._load(cf_dir, inst)
inst.__dict__[cfm_name] = counterfactual_manager
managers.append(counterfactual_manager)
eam_name = '_' + ManagerNames.ERROR_ANALYSIS + '_manager'
ea_dir = top_dir / ManagerNames.ERROR_ANALYSIS
error_analysis_manager = ErrorAnalysisManager._load(ea_dir, inst)
inst.__dict__[eam_name] = error_analysis_manager
exm_name = '_' + ManagerNames.EXPLAINER + '_manager'
exp_dir = top_dir / ManagerNames.EXPLAINER
explainer_manager = ExplainerManager._load(exp_dir, inst)
inst.__dict__[exm_name] = explainer_manager
managers.append(explainer_manager)
inst.__dict__['_' + _MANAGERS] = managers
return inst
| 41.619965 | 79 | 0.606648 |
27a431bf2a9e5c5b54b972af83ad74e0da481078 | 7,725 | py | Python | byceps/blueprints/admin/shop/storefront/views.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | null | null | null | byceps/blueprints/admin/shop/storefront/views.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | null | null | null | byceps/blueprints/admin/shop/storefront/views.py | GSH-LAN/byceps | ab8918634e90aaa8574bd1bb85627759cef122fe | [
"BSD-3-Clause"
] | null | null | null | """
byceps.blueprints.admin.shop.storefront.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, request
from flask_babel import gettext
from .....permissions.shop import ShopPermission
from .....services.brand import service as brand_service
from .....services.shop.catalog import service as catalog_service
from .....services.shop.order import sequence_service as order_sequence_service
from .....services.shop.shop import service as shop_service
from .....services.shop.storefront import service as storefront_service
from .....util.framework.blueprint import create_blueprint
from .....util.framework.flash import flash_error, flash_success
from .....util.framework.templating import templated
from .....util.views import permission_required, redirect_to
from .forms import StorefrontCreateForm, StorefrontUpdateForm
blueprint = create_blueprint('shop_storefront_admin', __name__)
@blueprint.get('/for_shop/<shop_id>')
@permission_required(ShopPermission.view)
@templated
def index_for_shop(shop_id):
"""List storefronts for that shop."""
shop = _get_shop_or_404(shop_id)
brand = brand_service.get_brand(shop.brand_id)
storefronts = storefront_service.get_storefronts_for_shop(shop.id)
order_number_prefixes_by_sequence_id = (
_get_order_number_prefixes_by_sequence_id(storefronts, shop.id)
)
return {
'shop': shop,
'brand': brand,
'storefronts': storefronts,
'order_number_prefixes_by_sequence_id': order_number_prefixes_by_sequence_id,
}
def _get_order_number_prefixes_by_sequence_id(storefronts, shop_id):
sequence_ids = {sf.order_number_sequence_id for sf in storefronts}
sequences = order_sequence_service.get_order_number_sequences_for_shop(
shop_id
)
return {seq.id: seq.prefix for seq in sequences}
@blueprint.get('/<storefront_id>')
@permission_required(ShopPermission.view)
@templated
def view(storefront_id):
"""Show a single storefront."""
storefront = _get_storefront_or_404(storefront_id)
shop = shop_service.get_shop(storefront.shop_id)
brand = brand_service.get_brand(shop.brand_id)
order_number_sequence = order_sequence_service.get_order_number_sequence(
storefront.order_number_sequence_id
)
order_number_prefix = order_number_sequence.prefix
return {
'storefront': storefront,
'shop': shop,
'brand': brand,
'order_number_prefix': order_number_prefix,
}
@blueprint.get('/for_shop/<shop_id>/create')
@permission_required(ShopPermission.create)
@templated
def create_form(shop_id, erroneous_form=None):
"""Show form to create a storefront."""
shop = _get_shop_or_404(shop_id)
brand = brand_service.get_brand(shop.brand_id)
catalogs = catalog_service.get_all_catalogs()
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(shop.id)
)
order_number_sequence_available = bool(order_number_sequences)
form = erroneous_form if erroneous_form else StorefrontCreateForm()
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
return {
'shop': shop,
'brand': brand,
'order_number_sequence_available': order_number_sequence_available,
'form': form,
}
@blueprint.post('/for_shop/<shop_id>')
@permission_required(ShopPermission.create)
def create(shop_id):
"""Create a storefront."""
shop = _get_shop_or_404(shop_id)
form = StorefrontCreateForm(request.form)
catalogs = catalog_service.get_all_catalogs()
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(shop.id)
)
if not order_number_sequences:
flash_error(
gettext('No order number sequences are defined for this shop.')
)
return create_form(shop_id, form)
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
if not form.validate():
return create_form(shop_id, form)
storefront_id = form.id.data.strip()
catalog_id = form.catalog_id.data or None
order_number_sequence_id = form.order_number_sequence_id.data
if not order_number_sequence_id:
flash_error(gettext('No valid order number sequence was specified.'))
return create_form(shop_id, form)
order_number_sequence = order_sequence_service.get_order_number_sequence(
order_number_sequence_id
)
if order_number_sequence.shop_id != shop.id:
flash_error(gettext('No valid order number sequence was specified.'))
return create_form(shop_id, form)
try:
item_number = order_sequence_service.generate_order_number(
order_number_sequence.id
)
except order_sequence_service.OrderNumberGenerationFailed as e:
abort(500, e.message)
storefront = storefront_service.create_storefront(
storefront_id,
shop.id,
order_number_sequence.id,
closed=False,
catalog_id=catalog_id,
)
flash_success(
gettext(
'Storefront "%(storefront_id)s" has been created.',
storefront_id=storefront.id,
)
)
return redirect_to('.view', storefront_id=storefront.id)
@blueprint.get('/<storefront_id>/update')
@permission_required(ShopPermission.update)
@templated
def update_form(storefront_id, erroneous_form=None):
"""Show form to update a storefront."""
storefront = _get_storefront_or_404(storefront_id)
shop = shop_service.get_shop(storefront.shop_id)
brand = brand_service.get_brand(shop.brand_id)
catalogs = catalog_service.get_all_catalogs()
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(shop.id)
)
form = (
erroneous_form
if erroneous_form
else StorefrontUpdateForm(obj=storefront)
)
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
return {
'storefront': storefront,
'shop': shop,
'brand': brand,
'form': form,
}
@blueprint.post('/<storefront_id>')
@permission_required(ShopPermission.update)
def update(storefront_id):
"""Update a storefront."""
storefront = _get_storefront_or_404(storefront_id)
catalogs = catalog_service.get_all_catalogs()
order_number_sequences = (
order_sequence_service.get_order_number_sequences_for_shop(
storefront.shop_id
)
)
form = StorefrontUpdateForm(request.form)
form.set_catalog_choices(catalogs)
form.set_order_number_sequence_choices(order_number_sequences)
if not form.validate():
return update_form(storefront_id, form)
order_number_sequence_id = form.order_number_sequence_id.data
catalog_id = form.catalog_id.data or None
closed = form.closed.data
storefront = storefront_service.update_storefront(
storefront.id, catalog_id, order_number_sequence_id, closed
)
flash_success(
gettext(
'Storefront "%(storefront_id)s" has been updated.',
storefront_id=storefront.id,
)
)
return redirect_to('.view', storefront_id=storefront.id)
def _get_shop_or_404(shop_id):
shop = shop_service.find_shop(shop_id)
if shop is None:
abort(404)
return shop
def _get_storefront_or_404(storefront_id):
storefront = storefront_service.find_storefront(storefront_id)
if storefront is None:
abort(404)
return storefront
| 29.94186 | 85 | 0.723236 |
ce56027bba1dafd0566245479a05ec73c1dc496b | 2,603 | py | Python | ValAgents.py | Mister-SOSA/ValAgents | 06fcc47c24dc1ded6b8d79710f485c24d726ca76 | [
"MIT"
] | null | null | null | ValAgents.py | Mister-SOSA/ValAgents | 06fcc47c24dc1ded6b8d79710f485c24d726ca76 | [
"MIT"
] | null | null | null | ValAgents.py | Mister-SOSA/ValAgents | 06fcc47c24dc1ded6b8d79710f485c24d726ca76 | [
"MIT"
] | null | null | null | """ Dictionaries of all VALORANT agents with their respective UUIDs, categorized by role. """
list_all_agents = {
"Jett" : "add6443a-41bd-e414-f6ad-e58d267f4e95",
"Reyna" : "a3bfb853-43b2-7238-a4f1-ad90e9e46bcc",
"Raze" : "f94c3b30-42be-e959-889c-5aa313dba261",
"Yoru" : "7f94d92c-4234-0a36-9646-3a87eb8b5c89",
"Phoenix" : "eb93336a-449b-9c1b-0a54-a891f7921d69",
"Neon" : "bb2a4828-46eb-8cd1-e765-15848195d751",
"Breach" : "5f8d3a7f-467b-97f3-062c-13acf203c006",
"Skye" : "6f2a04ca-43e0-be17-7f36-b3908627744d",
"Sova" : "320b2a48-4d9b-a075-30f1-1f93a9b638fa",
"KAY/O" : "601dbbe7-43ce-be57-2a40-4abd24953621",
"Killjoy" : "1e58de9c-4950-5125-93e9-a0aee9f98746",
"Cypher" : "117ed9e3-49f3-6512-3ccf-0cada7e3823b",
"Sage" : "569fdd95-4d10-43ab-ca70-79becc718b46",
"Chamber" : "22697a3d-45bf-8dd7-4fec-84a9e28c69d7",
"Omen" : "8e253930-4c05-31dd-1b6c-968525494517",
"Brimstone" : "9f0d8ba9-4140-b941-57d3-a7ad57c6b417",
"Astra" : "41fb69c1-4189-7b37-f117-bcaf1e96f1bf",
"Viper" : "707eab51-4836-f488-046a-cda6bf494859"
}
list_deulists = {
"Jett" : "add6443a-41bd-e414-f6ad-e58d267f4e95",
"Reyna" : "a3bfb853-43b2-7238-a4f1-ad90e9e46bcc",
"Raze" : "f94c3b30-42be-e959-889c-5aa313dba261",
"Yoru" : "7f94d92c-4234-0a36-9646-3a87eb8b5c89",
"Phoenix" : "eb93336a-449b-9c1b-0a54-a891f7921d69",
"Neon" : "bb2a4828-46eb-8cd1-e765-15848195d751"
}
list_initiators = {
"Breach" : "5f8d3a7f-467b-97f3-062c-13acf203c006",
"Skye" : "6f2a04ca-43e0-be17-7f36-b3908627744d",
"Sova" : "320b2a48-4d9b-a075-30f1-1f93a9b638fa",
"KAY/O" : "601dbbe7-43ce-be57-2a40-4abd24953621"
}
list_sentinels = {
"Killjoy" : "1e58de9c-4950-5125-93e9-a0aee9f98746",
"Cypher" : "117ed9e3-49f3-6512-3ccf-0cada7e3823b",
"Sage" : "569fdd95-4d10-43ab-ca70-79becc718b46",
"Chamber" : "22697a3d-45bf-8dd7-4fec-84a9e28c69d7"
}
list_controllers = {
"Omen" : "8e253930-4c05-31dd-1b6c-968525494517",
"Brimstone" : "9f0d8ba9-4140-b941-57d3-a7ad57c6b417",
"Astra" : "41fb69c1-4189-7b37-f117-bcaf1e96f1bf",
"Viper" : "707eab51-4836-f488-046a-cda6bf494859"
}
def deulists():
return list_deulists
def initiators():
return list_initiators
def sentinels():
return list_sentinels
def controllers():
return list_controllers
def returnAgents(role):
if (role == 'deulists'):
return list_deulists
if (role == 'initiators'):
return list_initiators
if (role == 'sentinels'):
return list_sentinels
if (role == 'controllers'):
return list_controllers
| 34.706667 | 93 | 0.683058 |
bb05eaaa2175c553512274020610bb321a67949d | 1,651 | py | Python | examples/8-add-an-attribute-to-the-list.py | fossabot/lucid-dynamodb | 497726ff5c3929efa95512d41b86d0c9035dc463 | [
"MIT"
] | null | null | null | examples/8-add-an-attribute-to-the-list.py | fossabot/lucid-dynamodb | 497726ff5c3929efa95512d41b86d0c9035dc463 | [
"MIT"
] | null | null | null | examples/8-add-an-attribute-to-the-list.py | fossabot/lucid-dynamodb | 497726ff5c3929efa95512d41b86d0c9035dc463 | [
"MIT"
] | null | null | null | from LucidDynamodb import DynamoDb
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
db = DynamoDb()
item_update_status = db.update_item(
table_name="dev_jobs",
key={
"company_name": "Google",
"role_id": "111"
},
attributes_to_update={
'locations': "Detroit, Michigan"
},
operation="ADD_ATTRIBUTE_TO_LIST"
)
try:
logging.info("Update is successful")
except Exception as e:
logging.warning("Update failed - {}".format(e))
item = db.read_item(
table_name="dev_jobs",
key={
"company_name": "Google",
"role_id": "111"
})
try:
logging.info("Item: {}".format(item))
except Exception as e:
logging.warning("Item doesn't exist - {}".format(e))
"""
dineshsonachalam@macbook examples % python 8-add-an-attribute-to-the-list.py
INFO:botocore.credentials:Found credentials in environment variables.
INFO:root:Update is successful
INFO:root:Item: {
"locations": [
"Mountain View, California",
"Austin, Texas",
"Chicago, IL",
"Detroit, Michigan"
],
"role_id": "111",
"overall_review": {
"compensation_and_benefits": "3.9/5",
"overall_rating": "4/5",
"yearly_bonus_percent": "12"
},
"company_name": "Google",
"role": "Staff Software Engineer 2",
"yearly_hike_percent": "13",
"salary": "$1,50,531",
"benefits": [
"Internet, Medical, Edu reimbursements",
"Health insurance",
"Travel reimbursements"
]
}
""" | 26.629032 | 76 | 0.58086 |
c7e548d994915f4ddba31bf9d9338d4d8a9988a6 | 5,107 | py | Python | tb_rest_client/models/models_ce/asset_search_query.py | maksonlee/python_tb_rest_client | a6cd17ef4de31f68c3226b7a9835292fbac4b1fa | [
"Apache-2.0"
] | 1 | 2021-07-19T10:09:04.000Z | 2021-07-19T10:09:04.000Z | tb_rest_client/models/models_ce/asset_search_query.py | moravcik94/python_tb_rest_client | 985361890cdf4ccce93d2b24905ad9003c8dfcaa | [
"Apache-2.0"
] | null | null | null | tb_rest_client/models/models_ce/asset_search_query.py | moravcik94/python_tb_rest_client | 985361890cdf4ccce93d2b24905ad9003c8dfcaa | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pprint
import re # noqa: F401
import six
class AssetSearchQuery(object):
"""NOTE: This class is auto generated by the swagger code generator program.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'asset_types': 'list[str]',
'parameters': 'RelationsSearchParameters',
'relation_type': 'str'
}
attribute_map = {
'asset_types': 'assetTypes',
'parameters': 'parameters',
'relation_type': 'relationType'
}
def __init__(self, asset_types=None, parameters=None, relation_type=None): # noqa: E501
"""AssetSearchQuery - a model defined in Swagger""" # noqa: E501
self._asset_types = None
self._parameters = None
self._relation_type = None
self.discriminator = None
if asset_types is not None:
self.asset_types = asset_types
if parameters is not None:
self.parameters = parameters
if relation_type is not None:
self.relation_type = relation_type
@property
def asset_types(self):
"""Gets the asset_types of this AssetSearchQuery. # noqa: E501
:return: The asset_types of this AssetSearchQuery. # noqa: E501
:rtype: list[str]
"""
return self._asset_types
@asset_types.setter
def asset_types(self, asset_types):
"""Sets the asset_types of this AssetSearchQuery.
:param asset_types: The asset_types of this AssetSearchQuery. # noqa: E501
:type: list[str]
"""
self._asset_types = asset_types
@property
def parameters(self):
"""Gets the parameters of this AssetSearchQuery. # noqa: E501
:return: The parameters of this AssetSearchQuery. # noqa: E501
:rtype: RelationsSearchParameters
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this AssetSearchQuery.
:param parameters: The parameters of this AssetSearchQuery. # noqa: E501
:type: RelationsSearchParameters
"""
self._parameters = parameters
@property
def relation_type(self):
"""Gets the relation_type of this AssetSearchQuery. # noqa: E501
:return: The relation_type of this AssetSearchQuery. # noqa: E501
:rtype: str
"""
return self._relation_type
@relation_type.setter
def relation_type(self, relation_type):
"""Sets the relation_type of this AssetSearchQuery.
:param relation_type: The relation_type of this AssetSearchQuery. # noqa: E501
:type: str
"""
self._relation_type = relation_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AssetSearchQuery, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AssetSearchQuery):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.218935 | 92 | 0.599178 |
85da958075f2eca75cc4731f708a8eb5eb3d400b | 97 | py | Python | MolsonFlooringSite/localdesign/apps.py | imxiaow/MolsonFlooringWeb2021 | f7bdf5d6f0acb1c3b7337e7de01625acbe8c8243 | [
"MIT"
] | 1 | 2021-01-09T00:17:49.000Z | 2021-01-09T00:17:49.000Z | MolsonFlooringSite/localdesign/apps.py | imxiaow/MolsonFlooringWeb2021 | f7bdf5d6f0acb1c3b7337e7de01625acbe8c8243 | [
"MIT"
] | null | null | null | MolsonFlooringSite/localdesign/apps.py | imxiaow/MolsonFlooringWeb2021 | f7bdf5d6f0acb1c3b7337e7de01625acbe8c8243 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class LocaldesignConfig(AppConfig):
name = 'localdesign'
| 16.166667 | 35 | 0.773196 |
5a0010674760846e2a33d686bb0bbeedd59eeaae | 1,893 | py | Python | kubernetes/test/test_v1_replication_controller_condition.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_replication_controller_condition.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_replication_controller_condition.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v1_replication_controller_condition import V1ReplicationControllerCondition # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1ReplicationControllerCondition(unittest.TestCase):
"""V1ReplicationControllerCondition unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1ReplicationControllerCondition
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.v1_replication_controller_condition.V1ReplicationControllerCondition() # noqa: E501
if include_optional :
return V1ReplicationControllerCondition(
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
status = '0',
type = '0'
)
else :
return V1ReplicationControllerCondition(
status = '0',
type = '0',
)
def testV1ReplicationControllerCondition(self):
"""Test V1ReplicationControllerCondition"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 32.084746 | 127 | 0.66878 |
ce4aaa1d18ccbf7dc790a23b7573d6e0950910f6 | 3,980 | py | Python | test/url2md.py | puppylpg/wechat_articles_spider | fecc263dd0cfc571882eb000f55d2716d56eb8e6 | [
"Apache-2.0"
] | 1,603 | 2018-03-05T03:01:28.000Z | 2022-03-31T05:30:51.000Z | test/url2md.py | puppylpg/wechat_articles_spider | fecc263dd0cfc571882eb000f55d2716d56eb8e6 | [
"Apache-2.0"
] | 42 | 2018-03-09T03:06:57.000Z | 2021-12-31T02:30:13.000Z | test/url2md.py | puppylpg/wechat_articles_spider | fecc263dd0cfc571882eb000f55d2716d56eb8e6 | [
"Apache-2.0"
] | 491 | 2018-03-05T03:22:31.000Z | 2022-03-30T10:10:59.000Z | # coding: utf-8
import requests
from bs4 import BeautifulSoup as bs
from bs4.element import NavigableString
import re
# 该文件仅作测试用,可能失效
path = "F:\\wechat_articles_spider\\test\\imgs\\{}"
parse_lst = ["article", "a"]
str_lst = ["hr", "span", "ul"]
def extract_text(p):
pass
def download_img(url, name):
response = requests.get(url)
img = response.content
imgpath = path.format(name)
with open(imgpath, "wb") as f:
f.write(img)
def parse_section(sections):
content = ""
global section
for section in sections:
if section.name == None:
content += section
elif item.name == "section":
section_str = str(item)
for img in item.find_all("img"):
section_str = section_str.replace(
str(img), "\n\n\n\n".format(img["data-src"])
)
content += section_str
content += section_str
elif section.name in str_lst:
content += str(section)
elif section.name == "p":
tmp = "".join(str(content) for content in section.contents)
content += tmp
elif section.name in parse_lst:
content += parse_section(section.contents)
elif section.name == "img":
url = section["data-src"]
"""
name = url.split('/')[-2] + '.' + url.split('/')[-1].split('=')[1]
download_img(url, name)
content += '\n'.format(name, path.format(name))
"""
content += "\n".format(url)
# content += str(section)
elif section.name == "br":
content += "</br>"
elif section.name == "strong":
content += "<strong>{}</strong>".format(section.string)
elif section.name == "iframe":
content += "iframe\n"
else:
print(section.name)
# print(section)
return content
url_lst = ["https://mp.weixin.qq.com/s/3Xqrn52jObN-M524Jld1yw"]
with open("t.md", "w", encoding="utf-8") as f:
for url in url_lst[::-1]:
html = requests.get(url)
soup = bs(html.text, "lxml")
# try:
body = soup.find(class_="rich_media_area_primary_inner")
title = body.find(class_="rich_media_title").text.strip()
author = body.find(
class_="rich_media_meta rich_media_meta_nickname"
).a.text.strip()
content_p = body.find(class_="rich_media_content")
content_lst = content_p.contents
content = ""
for item in content_lst:
if item.name == None:
content += item
elif item.name == "section":
section_str = str(item)
for img in item.find_all("img"):
section_str = section_str.replace(
str(img), "\n\n\n\n".format(img["data-src"])
)
content += section_str
elif item.name in str_lst:
content += str(item)
elif item.name == "p":
tmp = "".join(str(content) for content in item.contents)
content += tmp
elif item.name in parse_lst:
content += parse_section(item.contents)
elif item.name == "br":
content += "</br>"
elif item.name == "strong":
content += "<strong>{}</strong>".format(item.string)
elif item.name == "iframe":
content += "iframe\n"
elif section.name == "img":
url = section["data-src"]
content += "\n".format(url)
else:
print(item.name)
f.write("## " + title + "\n")
f.write(author + "\n")
f.write(content + "\n")
f.write('<div style="page-break-after: always;"></div>\n')
# except:
# print(url)
# pass
| 33.166667 | 78 | 0.507538 |
6b9099fd3a804bf68461c0a158bdb6c0611d04bc | 4,112 | py | Python | repo2docker/docker.py | data-workspaces/dws-repo2docker | 4d8736d7e3d79b8cdfa1f644f590aa7fdede183b | [
"BSD-3-Clause"
] | 1,047 | 2017-05-25T03:37:21.000Z | 2020-08-09T19:36:56.000Z | repo2docker/docker.py | data-workspaces/dws-repo2docker | 4d8736d7e3d79b8cdfa1f644f590aa7fdede183b | [
"BSD-3-Clause"
] | 810 | 2017-05-24T20:50:49.000Z | 2020-08-05T15:56:38.000Z | repo2docker/docker.py | data-workspaces/dws-repo2docker | 4d8736d7e3d79b8cdfa1f644f590aa7fdede183b | [
"BSD-3-Clause"
] | 253 | 2017-06-02T20:23:05.000Z | 2020-08-04T17:23:22.000Z | """
Docker container engine for repo2docker
"""
import docker
from traitlets import Dict
from iso8601 import parse_date
from .engine import Container, ContainerEngine, ContainerEngineException, Image
class DockerContainer(Container):
def __init__(self, container):
self._c = container
def reload(self):
return self._c.reload()
def logs(self, *, stream=False, timestamps=False, since=None):
if since:
# docker only accepts integer timestamps
# this means we will usually replay logs from the last second
# of the container
# we should check if this ever returns anything new,
# since we know it ~always returns something redundant
since = int(parse_date(since).timestamp())
return self._c.logs(stream=stream, timestamps=timestamps, since=since)
def kill(self, *, signal="KILL"):
return self._c.kill(signal=signal)
def remove(self):
return self._c.remove()
def stop(self, *, timeout=10):
return self._c.stop(timeout=timeout)
def wait(self):
return self._c.wait()
@property
def exitcode(self):
return self._c.attrs["State"]["ExitCode"]
@property
def status(self):
return self._c.status
class DockerEngine(ContainerEngine):
"""
https://docker-py.readthedocs.io/en/4.2.0/api.html#module-docker.api.build
"""
string_output = False
extra_init_args = Dict(
{},
help="""
Extra kwargs to pass to docker client when initializing it.
Dictionary that allows users to specify extra parameters to pass
to APIClient, parameters listed in https://docker-py.readthedocs.io/en/stable/api.html#docker.api.client.APIClient.
Parameters here are merged with whatever is picked up from the
environment.
""",
config=True,
)
def __init__(self, *, parent):
super().__init__(parent=parent)
try:
kwargs = docker.utils.kwargs_from_env()
kwargs.update(self.extra_init_args)
kwargs.setdefault("version", "auto")
self._apiclient = docker.APIClient(**kwargs)
except docker.errors.DockerException as e:
raise ContainerEngineException("Check if docker is running on the host.", e)
def build(
self,
*,
buildargs=None,
cache_from=None,
container_limits=None,
tag="",
custom_context=False,
dockerfile="",
fileobj=None,
path="",
labels=None,
**kwargs,
):
return self._apiclient.build(
buildargs=buildargs,
cache_from=cache_from,
container_limits=container_limits,
forcerm=True,
rm=True,
tag=tag,
custom_context=custom_context,
decode=True,
dockerfile=dockerfile,
fileobj=fileobj,
path=path,
labels=labels,
**kwargs,
)
def images(self):
images = self._apiclient.images()
return [Image(tags=image["RepoTags"]) for image in images]
def inspect_image(self, image):
image = self._apiclient.inspect_image(image)
return Image(tags=image["RepoTags"], config=image["ContainerConfig"])
def push(self, image_spec):
return self._apiclient.push(image_spec, stream=True)
def run(
self,
image_spec,
*,
command=None,
environment=None,
ports=None,
publish_all_ports=False,
remove=False,
volumes=None,
**kwargs,
):
client = docker.from_env(version="auto")
container = client.containers.run(
image_spec,
command=command,
environment=(environment or []),
detach=True,
ports=(ports or {}),
publish_all_ports=publish_all_ports,
remove=remove,
volumes=(volumes or {}),
**kwargs,
)
return DockerContainer(container)
| 27.972789 | 123 | 0.594844 |
297fbc764a904d6315a8307b05fe64044995d930 | 35,765 | py | Python | lib/axon/_loader.py | intellimath/pyaxon | fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb | [
"MIT"
] | 19 | 2015-03-02T19:38:51.000Z | 2021-11-16T13:48:04.000Z | lib/axon/_loader.py | intellimath/pyaxon | fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb | [
"MIT"
] | null | null | null | lib/axon/_loader.py | intellimath/pyaxon | fcadf741bedd71fdb21d6e8b865da2a22f2bd1fb | [
"MIT"
] | 4 | 2015-02-07T13:29:43.000Z | 2020-01-01T19:20:53.000Z | # coding: utf-8
#cython: boundscheck=False
#cython: wraparound=False
#cython: nonecheck=False
#cython: language_level=3
#cython: embedsignature=True
#cython: optimize.use_switch=False
# The MIT License (MIT)
#
# Copyright (c) <2011-2016> <Shibzukhov Zaur, szport at gmail dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import axon.errors as errors
import array
from collections import OrderedDict as odict
###
### Exceptions
###
import cython
#
#######################################################################
#
if sys.version_info.major == 3:
int_mode = 'i'
else:
int_mode = b'i'
_builder_dict = {
'safe': SafeBuilder(),
'strict': StrictBuilder(),
'mixed': MixedBuilder()
}
def register_builder(mode, builder):
_builder_dict[mode] = builder
def get_builder(mode):
return _builder_dict.get(mode, None)
#
# Loader
#
class Loader:
'''
Loader from line oriented unicode text inputs.
.. py:attribute:: line
Current input unicode line
.. py:attribute:: pos
Position of current unicode character
.. py:attribute:: lnum
Number of current input unicode line
.. py:attribute:: errto
Name of file for reporting errors
'''
#
def __init__(self, fd, mode='safe', errto=None):
'''
.. py:function:: Loader(fd, readline, builder="safe", sbuilder="default", errto=None)
:param fd:
File-like object with `.readline()` and `.close()` method
:param mode:
Specifies the method of building python objects for complex values
:param errto:
Name of file for reporting errors
'''
self.fd = fd
self.readline = fd.readline
self.bc = 0
self.bs = 0
self.bq = 0
self.ba = 0
self.labeled_objects = {}
self.builder = get_builder(mode)
if self.builder is None:
raise ValueError("Invalid mode: %s", mode)
self.sbuilder = SimpleBuilder()
self.c_constants = c_constants.copy()
if errto is None:
self.errto = sys.stderr
else:
self.errto = open(errto, 'wt')
self.da = array.array(int_mode, (0,0,0))
self.ta = array.array(int_mode, (0,0,0,0))
self.to = array.array(int_mode, (0,0))
self.is_nl = 0
self.lnum = 0
self.keyval = KeyVal('', None)
self.next_line()
#
def _check_pairs(self):
if self.bc > 0:
errors.error(self, 'Missed closing }')
elif self.bc < 0:
errors.error(self, 'Extra closing }')
if self.bs > 0:
errors.error(self, 'Missed closing ]')
elif self.bs < 0:
errors.error(self, 'Extra closing ]')
if self.bq > 0:
errors.error(self, 'Missed closing )')
elif self.bq < 0:
errors.error(self, 'Extra closing )')
if self.ba > 0:
errors.error(self, 'Missed closing >')
elif self.ba < 0:
errors.error(self, 'Extra closing >')
#
def load(self):
'''
Load all values.
'''
is_odict = 0
self.skip_spaces()
if self.eof:
self.fd.close()
self._check_pairs()
if self.errto != sys.stderr:
self.errto.close()
return []
val = self.get_keyval_or_value()
if type(val) is KeyVal:
is_odict = 1
mapping = c_new_odict([])
mapping[self.keyval.key] = self.keyval.val
else:
sequence = [val]
while 1:
self.skip_spaces()
if self.eof:
self.fd.close()
self._check_pairs()
if self.errto != sys.stderr:
self.errto.close()
break
if is_odict:
self.get_keyval_odict(mapping)
else:
sequence.append(self.get_value(0, 0))
if is_odict:
return mapping
else:
return sequence
#
def iload(self):
'''
Iterative get value
'''
self.skip_spaces()
if self.eof:
self.fd.close()
self._check_pairs()
if self.errto != sys.stderr:
self.errto.close()
return
val = self.get_keyval_or_value()
if type(val) is KeyVal:
is_odict = 1
yield (self.keyval.key, self.keyval.val)
else:
is_odict = 0
yield val
while 1:
self.skip_spaces()
if self.eof:
self.fd.close()
self._check_pairs()
if self.errto != sys.stderr:
self.errto.close()
break
# if is_odict and not type(val) is KeyVal:
# errors.error_expected_keyval(self)
# elif not is_odict and type(val) is KeyVal:
# errors.error_unexpected_keyval(self)
if is_odict:
self.get_keyval()
yield (self.keyval.key, self.keyval.val)
else:
yield self.get_value(0, 0)
#
def __iter__(self):
'''
Return iterator for iterative loading of values.
'''
return self
#
#def skip_char(self):
# self.pos += 1
#
#def next_char(self):
# self.pos += 1
# #return self.line[self.pos]
# return c_unicode_char(self.line, self.pos)
#
#def current_char(self):
# #return self.line[self.pos]
# return c_unicode_char(self.line, self.pos)
#
def next_line(self):
line = self.readline()
if line == '':
self.eof = 1
self.pos = 0
self.col = 0
else:
ch = c_unicode_char(line, c_unicode_length(line) - 1)
if ch != '\n':
line += '\n'
self.eof = 0
self.lnum += 1
self.line = line
self.pos = 0
self.col = 0
#
def skip_spaces(self):
self.is_nl = 0
if self.eof:
return 0
ch = current_char(self)
prev_ch = '\0'
while ch <= ' ':
if ch == '\n':
self.next_line()
if prev_ch != '\\':
self.is_nl = 1
if self.eof:
return 0
prev_ch = ch
ch = current_char(self)
elif ch == '\t':
prev_ch = ch
ch = next_char(self)
self.col += 8
else:
prev_ch = ch
ch = next_char(self)
self.col += 1
return ch
#
def skip_whitespace(self):
ch = current_char(self)
while ch == ' ' or ch == '\t':
ch = next_char(self)
if ch == '\t':
self.col += 8
else:
self.col += 1
#
#def valid_end_item(self):
# ch = current_char(self)
# return ch <= ' ' or ch == '}' or ch == ']' or ch == ')' or ch == 0
#
def try_get_int(self, maxsize):
val = 0
ch = current_char(self)
i = 0
while ch >= '0' and ch <= '9':
if i == maxsize:
skip_char(self)
break
ch0 = ch
val = 10*val + (ch0 - 48)
ch = next_char(self)
i += 1
if i == 0:
return -1
return val
#
def get_date(self):
val = self.try_get_int(4)
if val < 0:
return -1
else:
self.da[0] = val
ch = current_char(self)
if ch == '-':
skip_char(self)
else:
return -1
val = self.try_get_int(2)
if val < 0:
return -1
else:
self.da[1] = val
ch = current_char(self)
if ch == '-':
skip_char(self)
else:
return -1
val = self.try_get_int(2)
if val < 0:
return -1
else:
self.da[2] = val
ch = current_char(self)
if '0' <= ch <= '9':
return -1
return 0
#
def get_time(self):
val = self.try_get_int(2)
if val < 0:
return -1
else:
self.ta[0] = val
ch = current_char(self)
if ch == ':':
skip_char(self)
else:
return -1
val = self.try_get_int(2)
if val < 0:
return -1
else:
self.ta[1] = val
ch = current_char(self)
if ch == ':':
skip_char(self)
elif '0' <= ch <= '9':
return -1
else:
self.ta[2] = 0
self.ta[3] = 0
return 0
val = self.try_get_int(2)
if val < 0:
return -1
else:
self.ta[2] = val
ch = current_char(self)
if ch == '.':
skip_char(self)
elif '0' <= ch <= '9':
return -1
else:
self.ta[3] = 0
return 0
val = self.try_get_int(6)
if val < 0:
return -1
else:
self.ta[3] = val
ch = current_char(self)
if '0' <= ch <= '9':
return -1
return 0
#
def get_time_offset(self):
val = self.try_get_int(2)
if val < 0:
return -1
else:
self.to[0] = val
ch = current_char(self)
if ch == ':':
skip_char(self)
elif '0' <= ch <= '9':
return -1
else:
self.to[1] = 0
return 0
val = self.try_get_int(2)
if val < 0:
return -1
else:
self.to[1] = val
ch = current_char(self)
if '0' <= ch <= '9':
return -1
return 0
#
def get_tzinfo(self):
v = 0
sign = 0
ch = current_char(self)
if ch == '-':
skip_char(self)
sign = 1
v = self.get_time_offset()
if v < 0:
errors.error_invalid_time(self)
elif ch == '+':
skip_char(self)
v = self.get_time_offset()
if v < 0:
errors.error_invalid_time(self)
elif '0' <= ch <= '9':
v = self.get_time_offset()
if v < 0:
errors.error_invalid_time(self)
else:
return None
h = self.to[0]
m = self.to[1]
minutes = h * 60 + m
if minutes > 1440 or minutes < 0:
errors.error_invalid_time(self)
if sign:
minutes = -minutes
tzinfo = self.sbuilder.create_tzinfo(minutes)
return tzinfo
#
def get_number(self):
numtype = 1
pos0 = self.pos
ch = next_char(self)
while ch >= '0' and ch <= '9':
ch = next_char(self)
if ch == '.':
numtype = 2
ch = next_char(self)
while ch >= '0' and ch <= '9':
ch = next_char(self)
elif ch == '-':
self.pos = pos0
v = self.get_date()
if v < 0:
errors.error_invalid_datetime(self)
ch = current_char(self)
if ch == 'T':
skip_char(self)
v = self.get_time()
if v < 0:
errors.error_invalid_datetime(self)
tzinfo = self.get_tzinfo()
val = self.sbuilder.create_datetime(
self.da[0], self.da[1], self.da[2],
self.ta[0], self.ta[1], self.ta[2], self.ta[3], tzinfo)
else:
val = self.sbuilder.create_date(
self.da[0], self.da[1], self.da[2])
return val
elif ch == ':':
self.pos = pos0
v = self.get_time()
if v < 0:
errors.error_invalid_time(self)
tzinfo = self.get_tzinfo()
val = self.sbuilder.create_time(self.ta[0], self.ta[1], self.ta[2], self.ta[3], tzinfo)
return val
if ch == 'e' or ch == 'E':
numtype = 2
ch = next_char(self)
if ch == '-' or ch == '+':
ch = next_char(self)
if ch >= '0' and ch <= '9':
ch = next_char(self)
while ch >= '0' and ch <= '9':
ch = next_char(self)
else:
errors.error_getnumber(self)
text = get_chunk(self, pos0)
if ch == 'd' or ch == 'D': # or ch == '$':
skip_char(self)
return self.sbuilder.create_decimal(text)
if numtype == 1:
return self.sbuilder.create_int(text)
else:
return self.sbuilder.create_float(text)
#
def get_date_time(self):
pos0 = self.pos
ch = current_char(self)
while ch >= '0' and ch <= '9':
ch = next_char(self)
if ch == '-':
self.pos = pos0
v = self.get_date()
if v < 0:
errors.error_invalid_datetime(self)
ch = current_char(self)
if ch == 'T':
skip_char(self)
v = self.get_time()
if v < 0:
errors.error_invalid_datetime(self)
tzinfo = self.get_tzinfo()
val = self.sbuilder.create_datetime(
self.da[0], self.da[1], self.da[2],
self.ta[0], self.ta[1], self.ta[2], self.ta[3], tzinfo)
else:
val = self.sbuilder.create_date(
self.da[0], self.da[1], self.da[2])
return val
elif ch == ':':
self.pos = pos0
v = self.get_time()
if v < 0:
errors.error_invalid_time(self)
tzinfo = self.get_tzinfo()
val = self.sbuilder.create_time(self.ta[0], self.ta[1], self.ta[2], self.ta[3], tzinfo)
return val
#
def get_name(self):
pos0 = self.pos
ch = next_char(self)
while ch.isalnum() or ch == '_':
ch = next_char(self)
# if ch == '.':
# ch = next_char(self)
# while ch.isalnum() or ch == '_':
# ch = next_char(self)
name0 = get_chunk(self, pos0)
name = c_get_cached_name(name0)
return name
#
def get_key(self):
pos0 = self.pos
ch = next_char(self)
while ch.isalnum() or ch == '_':
ch = next_char(self)
return get_chunk(self, pos0)
#
def get_label(self):
pos0 = self.pos
ch = current_char(self)
while ch.isalnum() or ch == '_':
ch = next_char(self)
if self.pos == pos0:
errors.error_unexpected_value(self, ' after &')
return get_chunk(self, pos0)
#
def get_unicode_hex(self):
flag = 1
val = 0
for i in range(4):
ch = current_char(self)
if ch >= '0' and ch <= '9':
ch0 = ch
val = 16*val + (ch0 - 48)
skip_char(self)
elif ch >= 'a' and ch <= 'f':
ch0 = ch
val = 16*val + (ch0 - 87)
skip_char(self)
elif ch >= 'A' and ch <= 'F':
ch0 = ch
val = 16*val + (ch0 - 55)
skip_char(self)
else:
flag = 0
break
if flag:
return PyUnicode_FromOrdinal(val)
else:
errors.error(self, 'Invalid unicode character %r' % self.line[self.pos-4:self.pos+1])
#
def get_string(self, endch):
text = None
ch = next_char(self)
pos0 = self.pos
while 1:
if ch == endch:
if text is None:
text = get_chunk(self, pos0)
else:
text += get_chunk(self, pos0)
skip_char(self)
return text
elif ch == '\n' or ch == '\r':
if text is None:
text = get_chunk(self, pos0)
else:
text += get_chunk(self, pos0)
text += '\n'
self.next_line()
if self.eof:
errors.error_unexpected_end_string(self)
#self.skip_whitespace()
pos0 = self.pos
ch = current_char(self)
elif ch == '\\':
if text is None:
text = get_chunk(self, pos0)
else:
text += get_chunk(self, pos0)
ch = next_char(self)
if ch == endch:
if endch == "'":
text += "'"
elif endch == '"':
text += '"'
elif endch == '`':
text += '`'
else:
raise errors.error(self, "String error")
skip_char(self)
elif ch == '\n' or ch == '\r':
if text is None:
text = get_chunk(self, pos0)
else:
text += get_chunk(self, pos0)
self.next_line()
if self.eof:
errors.error_unexpected_end_string(self)
# elif ch == 'n':
# text += "\n"
# skip_char(self)
# elif ch == 'r':
# text += "\r"
# skip_char(self)
# elif ch == 't':
# text += "\t"
# skip_char(self)
# elif ch == 'u' or ch == 'U':
# skip_char(self)
# text += self.get_unicode_hex()
else:
text += '\\'
pos0 = self.pos
ch = current_char(self)
else:
ch = next_char(self)
#
def get_base64(self):
text = None
ch = next_char(self)
pos0 = self.pos
while 1:
if ch >= '0' and ch <= '9':
ch = next_char(self)
elif ch >= 'a' and ch <= 'z':
ch = next_char(self)
elif ch >= 'A' and ch <= 'Z':
ch = next_char(self)
elif ch == '+' or ch == '/':
ch = next_char(self)
elif ch <= ' ':
if text is None:
text = get_chunk(self, pos0)
else:
text += get_chunk(self, pos0)
self.skip_spaces()
if self.eof:
errors.error(self, 'MIME Base64 string is not finished')
pos0 = self.pos
ch = self.line[pos0]
elif ch == '=':
ch = next_char(self)
if ch == '=':
ch = next_char(self)
if text is None:
text = get_chunk(self, pos0)
else:
text += get_chunk(self, pos0)
return self.sbuilder.create_binary(text)
else:
raise errors.error(self, 'Invalid character %r in MIME Base64 string' % ch)
#
def skip_comment(self):
ch = next_char(self)
while 1:
if ch == '\n' or ch == '\r':
self.next_line()
return
else:
ch = next_char(self)
#
def skip_comments(self):
while 1:
self.skip_comment()
ch = self.skip_spaces()
if self.eof:
break
if ch == '#':
skip_char(self)
else:
break
#
def get_negative_constant(self):
ch = current_char(self)
if ch == '∞':
ch = next_char(self)
if ch == 'd' or ch == 'D' or ch == '$':
skip_char(self)
return self.sbuilder.create_decimal_ninf()
else:
return self.sbuilder.create_ninf()
else:
errors.error_invalid_value_with_prefix(self, '-')
#
def get_value(self, idn, idn0):
ch = current_char(self)
if ch == '#':
self.skip_comments()
ch = current_char(self)
if (ch <= '9' and ch >= '0'):
val = self.get_number()
return val
if ch == '-':
ch = c_unicode_char(self.line, self.pos+1)
if ch.isdigit():
return self.get_number()
else:
skip_char(self)
return self.get_negative_constant()
elif ch == '"':
text = self.get_string(ch)
ch = self.skip_spaces()
if ch == ':':
errors.error_unexpected_keyval()
else:
return text
elif ch == '{':
self.bc += 1
skip_char(self)
return self.get_dict_value()
elif ch == '[':
self.bs += 1
skip_char(self)
return self.get_list_value()
elif ch == '(':
self.bq += 1
skip_char(self)
return self.get_tuple_value()
elif ch == '^':
skip_char(self)
return self.get_date_time()
elif ch.isalpha() or ch == '_':
name = self.get_name()
val = reserved_name_dict.get(name, c_undefined)
if val is not c_undefined:
return val
ch = self.skip_spaces()
return self.get_named(name, idn, idn0)
elif ch == '`':
return self.get_string(ch)
elif ch == "'":
name = self.get_string(ch)
self.skip_spaces()
return self.get_named(name, idn, idn0)
elif ch == '|':
return self.get_base64()
elif ch == '∞': # \U221E
ch = next_char(self)
if ch == 'D' or ch == 'd' or ch == '$':
skip_char(self)
return self.sbuilder.create_decimal_inf()
else:
return self.sbuilder.create_inf()
elif ch == '?':
ch = next_char(self)
if ch == 'D' or ch == 'd' or ch == '$':
skip_char(self)
return self.sbuilder.create_decimal_nan()
else:
return self.sbuilder.create_nan()
elif ch == '*':
skip_char(self)
label = self.get_label()
#if self.eof:
# errors.error_unexpected_end(self)
if label is None:
errors.error_expected_label(self)
else:
return self.labeled_objects.get(label, c_undefined)
elif ch == '&':
#pos0 = self.pos
skip_char(self)
label = self.get_label()
if label is None:
errors.error_expected_label(self)
self.skip_spaces()
val = self.get_value(idn, idn0)
self.labeled_objects[label] = val
return val
elif ch == '$':
skip_char(self)
name = self.get_name()
val = self.c_constants.get(name, c_undefined)
if val is c_undefined:
errors.error(self, "Undefined name %r" % name)
return val
elif ch == '∅':
skip_char(self)
return set()
else:
errors.error_unexpected_value(self, 'Unexpected value')
#
def get_keyval_dict(self, mapping):
ch = current_char(self)
if ch == '"':
key = self.get_string(ch)
elif ch.isalpha() or ch == '_':
key = self.get_key()
else:
errors.error_expected_key(self)
ch = self.skip_spaces()
if ch == ':':
skip_char(self)
self.skip_spaces()
val = self.get_value(0, 0)
mapping[key] = val
else:
errors.error_expected_keyval(self)
#
def get_keyval_odict(self, mapping):
ch = current_char(self)
if ch == '"':
key = self.get_string(ch)
elif ch.isalpha() or ch == '_':
key = self.get_key()
else:
errors.error_expected_key(self)
ch = self.skip_spaces()
if ch == ':':
skip_char(self)
self.skip_spaces()
val = self.get_value(0, 0)
mapping[key] = val
else:
errors.error_expected_keyval(self)
#
def get_keyval_or_value(self):
pos0 = self.col
is_key = 0
ch = current_char(self)
if ch == '"':
is_key = 1
key = self.get_string(ch)
elif ch.isalpha() or ch == '_':
key = self.get_key()
elif ch == "'":
key = self.get_string(ch)
else:
return self.get_value(0, 0)
ch = self.skip_spaces()
if ch == ':':
skip_char(self)
self.skip_spaces()
val = self.get_value(0, 0)
self.keyval.key = key
self.keyval.val = val
return self.keyval
if is_key:
return key
if ch == '{':
self.bc += 1
skip_char(self)
self.skip_spaces()
val = reserved_name_dict.get(key, c_undefined)
if val is not c_undefined:
return val
return self.get_complex_value(key, 0, 0)
else:
if is_key:
return key
val = reserved_name_dict.get(key, c_undefined)
if val is not c_undefined:
return val
if self.is_nl:
if self.eof or self.col <= pos0:
return self.builder.create_node(key, None, None)
elif self.col > pos0:
return self.get_complex_value(key, self.col, pos0)
else:
return key
#
def get_keyval(self):
ch = current_char(self)
if ch == '"':
is_key = 1
key = self.get_string(ch)
elif ch.isalpha() or ch == '_':
key = self.get_key()
else:
errors.error_expected_key(self)
ch = self.skip_spaces()
if ch == ':':
skip_char(self)
self.skip_spaces()
val = self.get_value(0, 0)
self.keyval.key = key
self.keyval.val = val
#return self.keyval
else:
errors.error_expected_keyval(self)
#
def get_named(self, name, idn, idn0):
ch = current_char(self)
if ch == ':':
errors.error_unexpected_keyval(self)
if self.is_nl:
if self.eof or self.col <= idn:
return self.builder.create_node(name, None, None)
elif self.col > idn:
return self.get_complex_value(name, self.col, idn)
else:
errors.error_indentation(self, idn)
ch = current_char(self)
if ch == '{':
self.bc += 1
skip_char(self)
self.skip_spaces()
return self.get_complex_value(name, 0, idn)
else:
errors.error_unexpected_value(self, 'Expected attribute or complex value with the name %r' % name)
#
def get_complex_value(self, name, idn, idn0):
attrs = c_new_odict([])
vals = self.get_attributes(attrs, idn, idn0)
if len(attrs) == 0:
attrs = None
if vals is not None:
self.get_values(vals, idn, idn0)
return self.builder.create_node(name, attrs, vals)
#
def get_attributes(self, attrs, idn, idn0):
while 1:
ch = self.skip_spaces()
if ch == '#':
self.skip_comments()
ch = current_char(self)
if idn:
if self.eof or self.col <= idn0:
return None
elif self.col == idn:
pass
elif self.is_nl:
errors.error_indentation(self, idn)
elif self.eof:
errors.error_unexpected_end_complex_value(self)
if ch == '}':
self.bc -= 1
skip_char(self)
return None
if ch.isalpha() or ch == '_':
key = self.get_name()
elif ch == "'":
key = self.get_string(ch)
else:
key = None
ch = self.skip_spaces()
if key is None:
val = self.get_value(idn0, idn0)
return [val]
if ch == ':':
skip_char(self)
self.skip_spaces()
val = self.get_value(idn, idn0)
attrs[key] = val
else:
val = self.get_named(key, idn, idn0)
return [val]
#
def get_values(self, vals, idn, idn0):
while 1:
ch = self.skip_spaces()
if ch == '#':
self.skip_comments()
ch = current_char(self)
if idn:
if self.eof or self.col <= idn0:
return 1
elif self.col == idn:
pass
elif self.is_nl:
errors.error_indentation(self, idn)
elif self.eof:
errors.error_unexpected_end_complex_value(self)
if ch == '}':
self.bc -= 1
skip_char(self)
return 1
val = self.get_value(idn, idn0)
# if type(val) is KeyVal:
# errors.error_unexpected_keyval(self)
vals.append(val)
#
def get_list_value(self):
is_odict = 0
ch = self.skip_spaces()
if ch == '#':
self.skip_comments()
ch = current_char(self)
if self.eof:
if is_odict:
errors.error_unexpected_end_odict(self)
else:
errors.error_unexpected_end_list(self)
if ch == ']':
skip_char(self)
self.bs -= 1
return []
if ch == ':':
ch = next_char(self)
if ch == ']':
skip_char(self)
self.bs -= 1
return c_new_odict([])
else:
errors.error(self, "Invalid empty ordered dict")
val = self.get_keyval_or_value()
if type(val) is KeyVal:
is_odict = 1
mapping = c_new_odict([])
mapping[self.keyval.key] = self.keyval.val
else:
sequence = [val]
ch = self.skip_spaces()
while 1:
if self.eof:
if is_odict:
errors.error_unexpected_end_odict(self)
else:
errors.error_unexpected_end_list(self)
if ch == '#':
self.skip_comments()
ch = current_char(self)
if ch == ']':
skip_char(self)
self.bs -= 1
if is_odict:
return mapping
else:
return sequence
if is_odict:
self.get_keyval_odict(mapping)
else:
sequence.append(self.get_value(0, 0))
ch = self.skip_spaces()
#
def get_tuple_value(self):
sequence = []
ch = self.skip_spaces()
while 1:
if ch == '#':
self.skip_comments()
ch = current_char(self)
if self.eof:
errors.error_unexpected_end_tuple(self)
if ch == ')':
skip_char(self)
self.bq -= 1
return tuple(sequence)
val = self.get_value(0, 0)
sequence.append(val)
ch = self.skip_spaces()
#
def get_dict_value(self):
is_dict = 0
ch = self.skip_spaces()
if ch == '#':
self.skip_comments()
ch = current_char(self)
if self.eof:
errors.error_unexpected_end_list(self)
if ch == '}':
skip_char(self)
self.bc -= 1
return {}
val = self.get_keyval_or_value()
if type(val) is KeyVal:
is_dict = 1
keyval = val
mapping = {}
mapping[keyval.key] = keyval.val
else:
sequence = {val}
ch = self.skip_spaces()
while 1:
if ch == '#':
self.skip_comments()
ch = current_char(self)
if self.eof:
errors.error_unexpected_end_list(self)
if ch == '}':
skip_char(self)
self.bc -= 1
if is_dict:
return mapping
else:
return sequence
if is_dict:
self.get_keyval_dict(mapping)
else:
val = self.get_value(0, 0)
# if type(val) is KeyVal:
# errors.error(self, "Invalid set item")
# else:
sequence.add(val)
ch = self.skip_spaces()
#
| 27.897816 | 110 | 0.441074 |
de05455c0d81744f461547510c4212e23335bba2 | 5,306 | py | Python | neutron/plugins/mlnx/agent/utils.py | kevinbenton/neutron | f27fba3ad77d907713e3e1cbfa45d33e0135c08b | [
"Apache-2.0"
] | null | null | null | neutron/plugins/mlnx/agent/utils.py | kevinbenton/neutron | f27fba3ad77d907713e3e1cbfa45d33e0135c08b | [
"Apache-2.0"
] | null | null | null | neutron/plugins/mlnx/agent/utils.py | kevinbenton/neutron | f27fba3ad77d907713e3e1cbfa45d33e0135c08b | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zmq
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import exceptions
LOG = logging.getLogger(__name__)
class EswitchUtils(object):
def __init__(self, daemon_endpoint, timeout):
self.__conn = None
self.daemon = daemon_endpoint
self.timeout = timeout
@property
def _conn(self):
if self.__conn is None:
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.setsockopt(zmq.LINGER, 0)
socket.connect(self.daemon)
self.__conn = socket
self.poller = zmq.Poller()
self.poller.register(self._conn, zmq.POLLIN)
return self.__conn
def send_msg(self, msg):
self._conn.send(msg)
socks = dict(self.poller.poll(self.timeout))
if socks.get(self._conn) == zmq.POLLIN:
recv_msg = self._conn.recv()
response = self.parse_response_msg(recv_msg)
return response
else:
self._conn.setsockopt(zmq.LINGER, 0)
self._conn.close()
self.poller.unregister(self._conn)
self.__conn = None
raise exceptions.MlnxException(_("eSwitchD: Request timeout"))
def parse_response_msg(self, recv_msg):
msg = jsonutils.loads(recv_msg)
if msg['status'] == 'OK':
if 'response' in msg:
return msg.get('response')
return
elif msg['status'] == 'FAIL':
msg_dict = dict(action=msg['action'], reason=msg['reason'])
error_msg = _("Action %(action)s failed: %(reason)s") % msg_dict
else:
error_msg = _("Unknown operation status %s") % msg['status']
LOG.error(error_msg)
raise exceptions.MlnxException(error_msg)
def get_attached_vnics(self):
LOG.debug(_("get_attached_vnics"))
msg = jsonutils.dumps({'action': 'get_vnics', 'fabric': '*'})
vnics = self.send_msg(msg)
return vnics
def set_port_vlan_id(self, physical_network,
segmentation_id, port_mac):
LOG.debug(_("Set Vlan %(segmentation_id)s on Port %(port_mac)s "
"on Fabric %(physical_network)s"),
{'port_mac': port_mac,
'segmentation_id': segmentation_id,
'physical_network': physical_network})
msg = jsonutils.dumps({'action': 'set_vlan',
'fabric': physical_network,
'port_mac': port_mac,
'vlan': segmentation_id})
self.send_msg(msg)
def define_fabric_mappings(self, interface_mapping):
for fabric, phy_interface in interface_mapping.iteritems():
LOG.debug(_("Define Fabric %(fabric)s on interface %(ifc)s"),
{'fabric': fabric,
'ifc': phy_interface})
msg = jsonutils.dumps({'action': 'define_fabric_mapping',
'fabric': fabric,
'interface': phy_interface})
self.send_msg(msg)
def port_up(self, fabric, port_mac):
LOG.debug(_("Port Up for %(port_mac)s on fabric %(fabric)s"),
{'port_mac': port_mac, 'fabric': fabric})
msg = jsonutils.dumps({'action': 'port_up',
'fabric': fabric,
'ref_by': 'mac_address',
'mac': 'port_mac'})
self.send_msg(msg)
def port_down(self, fabric, port_mac):
LOG.debug(_("Port Down for %(port_mac)s on fabric %(fabric)s"),
{'port_mac': port_mac, 'fabric': fabric})
msg = jsonutils.dumps({'action': 'port_down',
'fabric': fabric,
'ref_by': 'mac_address',
'mac': port_mac})
self.send_msg(msg)
def port_release(self, fabric, port_mac):
LOG.debug(_("Port Release for %(port_mac)s on fabric %(fabric)s"),
{'port_mac': port_mac, 'fabric': fabric})
msg = jsonutils.dumps({'action': 'port_release',
'fabric': fabric,
'ref_by': 'mac_address',
'mac': port_mac})
self.send_msg(msg)
def get_eswitch_ports(self, fabric):
# TODO(irena) - to implement for next phase
return {}
def get_eswitch_id(self, fabric):
# TODO(irena) - to implement for next phase
return ""
| 38.729927 | 76 | 0.566905 |
faf64c539e7f79154907c2dcf2492ab40c2daf3a | 382 | py | Python | tests/test_ext_baidu.py | terorie/music-dl | 0d7f1ff177501058d84cf270db869a5d0ea92216 | [
"MIT"
] | 18 | 2019-03-11T05:31:19.000Z | 2021-03-17T03:14:24.000Z | tests/test_ext_baidu.py | BuildDreams/music-dl | 87c9c00e7b8fe4fb9d1d2e8b74efd02a153fd2d3 | [
"MIT"
] | null | null | null | tests/test_ext_baidu.py | BuildDreams/music-dl | 87c9c00e7b8fe4fb9d1d2e8b74efd02a153fd2d3 | [
"MIT"
] | 10 | 2019-07-08T06:38:39.000Z | 2021-02-20T02:39:07.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: HJK
@file: test_ext_baidu
@time: 2019-01-30
"""
from music_dl.extractors import baidu
def test_baidu(capsys):
music_list = baidu.search("朴树")
assert music_list is not None
# if len(music_list) > 0:
# music_list[0].download()
# out, err = capsys.readouterr()
# assert out.find("已保存到:")
| 20.105263 | 40 | 0.623037 |
8f4878d908f4be5bd928001a32c63d4eaf543e4b | 266 | py | Python | test_abots.py | aewens/abots | c462ab1be33f2bf9fb9b9afd4c280423aa53952f | [
"BSD-3-Clause"
] | null | null | null | test_abots.py | aewens/abots | c462ab1be33f2bf9fb9b9afd4c280423aa53952f | [
"BSD-3-Clause"
] | null | null | null | test_abots.py | aewens/abots | c462ab1be33f2bf9fb9b9afd4c280423aa53952f | [
"BSD-3-Clause"
] | null | null | null | #!env/bin/python3
from abots.net import SocketServer, SocketClient
host = "localhost"
port = 10401
timeout = 3
server = SocketServer(host, port, timeout=timeout)
client = SocketClient(host, port, timeout=timeout)
server.start()
server.ready.wait()
client.start() | 19 | 50 | 0.759398 |
951ca9f5d2751c0e951a9b3e7573d1319b9c75a5 | 8,293 | py | Python | Python/ocr.py | AbdulConsole/Hacktoberfest2019-2 | b9619361b6cecf9b3e734972af3b0a03dba98d2e | [
"MIT"
] | 1 | 2019-10-28T20:12:23.000Z | 2019-10-28T20:12:23.000Z | Python/ocr.py | AbdulConsole/Hacktoberfest2019-2 | b9619361b6cecf9b3e734972af3b0a03dba98d2e | [
"MIT"
] | null | null | null | Python/ocr.py | AbdulConsole/Hacktoberfest2019-2 | b9619361b6cecf9b3e734972af3b0a03dba98d2e | [
"MIT"
] | 1 | 2019-10-20T13:07:33.000Z | 2019-10-20T13:07:33.000Z | import cv2
import numpy as np
import sys
import os.path
if len(sys.argv) != 3:
print("%s input_file output_file" % (sys.argv[0]))
sys.exit()
else:
input_file = sys.argv[1]
output_file = sys.argv[2]
if not os.path.isfile(input_file):
print("No such file '%s'" % input_file)
sys.exit()
DEBUG = 0
# Determine pixel intensity
# Apparently human eyes register colors differently.
# TVs use this formula to determine
# pixel intensity = 0.30R + 0.59G + 0.11B
def ii(xx, yy):
global img, img_y, img_x
if yy >= img_y or xx >= img_x:
#print "pixel out of bounds ("+str(y)+","+str(x)+")"
return 0
pixel = img[yy][xx]
return 0.30 * pixel[2] + 0.59 * pixel[1] + 0.11 * pixel[0]
# A quick test to check whether the contour is
# a connected shape
def connected(contour):
first = contour[0][0]
last = contour[len(contour) - 1][0]
return abs(first[0] - last[0]) <= 1 and abs(first[1] - last[1]) <= 1
# Helper function to return a given contour
def c(index):
global contours
return contours[index]
# Count the number of real children
def count_children(index, h_, contour):
# No children
if h_[index][2] < 0:
return 0
else:
#If the first child is a contour we care about
# then count it, otherwise don't
if keep(c(h_[index][2])):
count = 1
else:
count = 0
# Also count all of the child's siblings and their children
count += count_siblings(h_[index][2], h_, contour, True)
return count
# Quick check to test if the contour is a child
def is_child(index, h_):
return get_parent(index, h_) > 0
# Get the first parent of the contour that we care about
def get_parent(index, h_):
parent = h_[index][3]
while not keep(c(parent)) and parent > 0:
parent = h_[parent][3]
return parent
# Count the number of relevant siblings of a contour
def count_siblings(index, h_, contour, inc_children=False):
# Include the children if necessary
if inc_children:
count = count_children(index, h_, contour)
else:
count = 0
# Look ahead
p_ = h_[index][0]
while p_ > 0:
if keep(c(p_)):
count += 1
if inc_children:
count += count_children(p_, h_, contour)
p_ = h_[p_][0]
# Look behind
n = h_[index][1]
while n > 0:
if keep(c(n)):
count += 1
if inc_children:
count += count_children(n, h_, contour)
n = h_[n][1]
return count
# Whether we care about this contour
def keep(contour):
return keep_box(contour) and connected(contour)
# Whether we should keep the containing box of this
# contour based on it's shape
def keep_box(contour):
xx, yy, w_, h_ = cv2.boundingRect(contour)
# width and height need to be floats
w_ *= 1.0
h_ *= 1.0
# Test it's shape - if it's too oblong or tall it's
# probably not a real character
if w_ / h_ < 0.1 or w_ / h_ > 10:
if DEBUG:
print("\t Rejected because of shape: (" + str(xx) + "," + str(yy) + "," + str(w_) + "," + str(h_) + ")" + \
str(w_ / h_))
return False
# check size of the box
if ((w_ * h_) > ((img_x * img_y) / 5)) or ((w_ * h_) < 15):
if DEBUG:
print("\t Rejected because of size")
return False
return True
def include_box(index, h_, contour):
if DEBUG:
print(str(index) + ":")
if is_child(index, h_):
print("\tIs a child")
print("\tparent " + str(get_parent(index, h_)) + " has " + str(
count_children(get_parent(index, h_), h_, contour)) + " children")
print("\thas " + str(count_children(index, h_, contour)) + " children")
if is_child(index, h_) and count_children(get_parent(index, h_), h_, contour) <= 2:
if DEBUG:
print("\t skipping: is an interior to a letter")
return False
if count_children(index, h_, contour) > 2:
if DEBUG:
print("\t skipping, is a container of letters")
return False
if DEBUG:
print("\t keeping")
return True
# Load the image
orig_img = cv2.imread(input_file)
# Add a border to the image for processing sake
img = cv2.copyMakeBorder(orig_img, 50, 50, 50, 50, cv2.BORDER_CONSTANT)
# Calculate the width and height of the image
img_y = len(img)
img_x = len(img[0])
if DEBUG:
print("Image is " + str(len(img)) + "x" + str(len(img[0])))
#Split out each channel
blue, green, red = cv2.split(img)
# Run canny edge detection on each channel
blue_edges = cv2.Canny(blue, 200, 250)
green_edges = cv2.Canny(green, 200, 250)
red_edges = cv2.Canny(red, 200, 250)
# Join edges back into image
edges = blue_edges | green_edges | red_edges
# Find the contours
contours, hierarchy = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
hierarchy = hierarchy[0]
if DEBUG:
processed = edges.copy()
rejected = edges.copy()
# These are the boxes that we are determining
keepers = []
# For each contour, find the bounding rectangle and decide
# if it's one we care about
for index_, contour_ in enumerate(contours):
if DEBUG:
# print("Processing #%d" % index)_
x, y, w, h = cv2.boundingRect(contour_)
# Check the contour and it's bounding box
if keep(contour_) and include_box(index_, hierarchy, contour_):
# It's a winner!
keepers.append([contour_, [x, y, w, h]])
if DEBUG:
cv2.rectangle(processed, (x, y), (x + w, y + h), (100, 100, 100), 1)
cv2.putText(processed, str(index_), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
else:
if DEBUG:
cv2.rectangle(rejected, (x, y), (x + w, y + h), (100, 100, 100), 1)
cv2.putText(rejected, str(index_), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
# Make a white copy of our image
new_image = edges.copy()
new_image.fill(255)
boxes = []
# For each box, find the foreground and background intensities
for index_, (contour_, box) in enumerate(keepers):
# Find the average intensity of the edge pixels to
# determine the foreground intensity
fg_int = 0.0
for p in contour_:
fg_int += ii(p[0][0], p[0][1])
fg_int /= len(contour_)
if DEBUG:
print("FG Intensity for #%d = %d" % (index_, fg_int))
# Find the intensity of three pixels going around the
# outside of each corner of the bounding box to determine
# the background intensity
x_, y_, width, height = box
bg_int = \
[
# bottom left corner 3 pixels
ii(x_ - 1, y_ - 1),
ii(x_ - 1, y_),
ii(x_, y_ - 1),
# bottom right corner 3 pixels
ii(x_ + width + 1, y_ - 1),
ii(x_ + width, y_ - 1),
ii(x_ + width + 1, y_),
# top left corner 3 pixels
ii(x_ - 1, y_ + height + 1),
ii(x_ - 1, y_ + height),
ii(x_, y_ + height + 1),
# top right corner 3 pixels
ii(x_ + width + 1, y_ + height + 1),
ii(x_ + width, y_ + height + 1),
ii(x_ + width + 1, y_ + height)
]
# Find the median of the background
# pixels determined above
bg_int = np.median(bg_int)
if DEBUG:
print("BG Intensity for #%d = %s" % (index_, repr(bg_int)))
# Determine if the box should be inverted
if fg_int >= bg_int:
fg = 255
bg = 0
else:
fg = 0
bg = 255
# Loop through every pixel in the box and color the
# pixel accordingly
for x in range(x_, x_ + width):
for y in range(y_, y_ + height):
if y >= img_y or x >= img_x:
if DEBUG:
print("pixel out of bounds (%d,%d)" % (y, x))
continue
if ii(x, y) > fg_int:
new_image[y][x] = bg
else:
new_image[y][x] = fg
# blur a bit to improve ocr accuracy
new_image = cv2.blur(new_image, (2, 2))
cv2.imwrite(output_file, new_image)
if DEBUG:
cv2.imwrite('edges.png', edges)
cv2.imwrite('processed.png', processed)
cv2.imwrite('rejected.png', rejected) | 28.016892 | 119 | 0.581816 |
63fab8f6cf1759d13f46cd86c0e4f931ae8970e9 | 260 | py | Python | random.py | eunjirina/Exercicios-de-Python | 0cf42a2b54191e966b1d019483db1cd17094e866 | [
"MIT"
] | null | null | null | random.py | eunjirina/Exercicios-de-Python | 0cf42a2b54191e966b1d019483db1cd17094e866 | [
"MIT"
] | null | null | null | random.py | eunjirina/Exercicios-de-Python | 0cf42a2b54191e966b1d019483db1cd17094e866 | [
"MIT"
] | null | null | null | import random
from random import randint
n = (random.randint(1, 10), random.randint(1, 10), random.randint(1, 10), random.randint(1, 10), random.randint(1, 10))
print(f'Foram sorteados os números {n}\nDesses valores o menor é o {min(n)} e maior é o {max(n)}') | 65 | 119 | 0.707692 |
c3dd5bafaa7aa9ad3da19ee612f62c5caeedc0ec | 65 | py | Python | pubmed/__init__.py | oldteng/pubmed | 7f1183ae1a3efa7536c6aa8d9398e6fff59bb3b6 | [
"MIT"
] | null | null | null | pubmed/__init__.py | oldteng/pubmed | 7f1183ae1a3efa7536c6aa8d9398e6fff59bb3b6 | [
"MIT"
] | null | null | null | pubmed/__init__.py | oldteng/pubmed | 7f1183ae1a3efa7536c6aa8d9398e6fff59bb3b6 | [
"MIT"
] | null | null | null |
from .literature import PubMed
from .pubmedcsv import OpenCsv
| 10.833333 | 30 | 0.8 |
27ad5ae8a52d06688f074e9c6051cbf6bb03f4dd | 5,084 | py | Python | tests/func/test_import_url.py | arthurcgusmao/dvc | dff27bb163419bd2f93acaa0906dfdee7359d9d6 | [
"Apache-2.0"
] | 1 | 2020-08-01T08:31:18.000Z | 2020-08-01T08:31:18.000Z | tests/func/test_import_url.py | arthurcgusmao/dvc | dff27bb163419bd2f93acaa0906dfdee7359d9d6 | [
"Apache-2.0"
] | null | null | null | tests/func/test_import_url.py | arthurcgusmao/dvc | dff27bb163419bd2f93acaa0906dfdee7359d9d6 | [
"Apache-2.0"
] | 1 | 2020-11-28T11:47:48.000Z | 2020-11-28T11:47:48.000Z | import os
from uuid import uuid4
import pytest
from dvc.dependency.base import DependencyDoesNotExistError
from dvc.main import main
from dvc.stage import Stage
from dvc.utils.fs import makedirs
from tests.basic_env import TestDvc
class TestCmdImport(TestDvc):
def test(self):
ret = main(["import-url", self.FOO, "import"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists("import.dvc"))
ret = main(["import-url", "non-existing-file", "import"])
self.assertNotEqual(ret, 0)
def test_unsupported(self):
ret = main(["import-url", "unsupported://path", "import_unsupported"])
self.assertNotEqual(ret, 0)
class TestDefaultOutput(TestDvc):
def test(self):
tmpdir = self.mkdtemp()
filename = str(uuid4())
tmpfile = os.path.join(tmpdir, filename)
with open(tmpfile, "w") as fd:
fd.write("content")
ret = main(["import-url", tmpfile])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(filename))
with open(filename) as fd:
self.assertEqual(fd.read(), "content")
def test_should_remove_outs_before_import(tmp_dir, dvc, mocker, erepo_dir):
erepo_dir.gen({"foo": "foo"})
remove_outs_call_counter = mocker.spy(Stage, "remove_outs")
ret = main(["import-url", os.fspath(erepo_dir / "foo")])
assert ret == 0
assert remove_outs_call_counter.mock.call_count == 1
class TestImportFilename(TestDvc):
def setUp(self):
super().setUp()
tmp_dir = self.mkdtemp()
self.external_source = os.path.join(tmp_dir, "file")
with open(self.external_source, "w") as fobj:
fobj.write("content")
def test(self):
ret = main(["import-url", "--file", "bar.dvc", self.external_source])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists("bar.dvc"))
os.remove("bar.dvc")
os.mkdir("sub")
path = os.path.join("sub", "bar.dvc")
ret = main(["import-url", "--file", path, self.external_source])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists(path))
@pytest.mark.parametrize("dname", [".", "dir", "dir/subdir"])
def test_import_url_to_dir(dname, tmp_dir, dvc):
tmp_dir.gen({"data_dir": {"file": "file content"}})
src = os.path.join("data_dir", "file")
makedirs(dname, exist_ok=True)
stage = dvc.imp_url(src, dname)
dst = tmp_dir / dname / "file"
assert stage.outs[0].path_info == dst
assert os.path.isdir(dname)
assert dst.read_text() == "file content"
def test_import_stage_accompanies_target(tmp_dir, dvc, erepo_dir):
with erepo_dir.chdir():
erepo_dir.dvc_gen("file1", "file1 content", commit="commit file")
tmp_dir.gen({"dir": {}})
erepo = {"url": os.fspath(erepo_dir)}
dvc.imp_url("file1", out=os.path.join("dir", "imported_file"), erepo=erepo)
assert (tmp_dir / "dir" / "imported_file").exists()
assert (tmp_dir / "dir" / "imported_file.dvc").exists()
def test_import_url_nonexistent(dvc, erepo_dir):
with pytest.raises(DependencyDoesNotExistError):
dvc.imp_url(os.fspath(erepo_dir / "non-existent"))
def test_import_url_with_no_exec(tmp_dir, dvc, erepo_dir):
tmp_dir.gen({"data_dir": {"file": "file content"}})
src = os.path.join("data_dir", "file")
dvc.imp_url(src, ".", no_exec=True)
dst = tmp_dir / "file"
assert not dst.exists()
@pytest.mark.parametrize(
"workspace",
[
pytest.lazy_fixture("local_cloud"),
pytest.lazy_fixture("s3"),
pytest.lazy_fixture("gs"),
pytest.lazy_fixture("hdfs"),
pytest.param(
pytest.lazy_fixture("ssh"),
marks=pytest.mark.skipif(
os.name == "nt", reason="disabled on windows"
),
),
pytest.lazy_fixture("http"),
],
indirect=True,
)
def test_import_url(tmp_dir, dvc, workspace):
workspace.gen("file", "file")
assert not (tmp_dir / "file").exists() # sanity check
dvc.imp_url("remote://workspace/file")
assert (tmp_dir / "file").read_text() == "file"
assert dvc.status() == {}
@pytest.mark.parametrize(
"workspace",
[
pytest.lazy_fixture("local_cloud"),
pytest.lazy_fixture("s3"),
pytest.lazy_fixture("gs"),
pytest.param(
pytest.lazy_fixture("ssh"),
marks=pytest.mark.skipif(
os.name == "nt", reason="disabled on windows"
),
),
],
indirect=True,
)
def test_import_url_dir(tmp_dir, dvc, workspace):
workspace.gen({"dir": {"file": "file", "subdir": {"subfile": "subfile"}}})
assert not (tmp_dir / "dir").exists() # sanity check
dvc.imp_url("remote://workspace/dir")
assert set(os.listdir(tmp_dir / "dir")) == {"file", "subdir"}
assert (tmp_dir / "dir" / "file").read_text() == "file"
assert list(os.listdir(tmp_dir / "dir" / "subdir")) == ["subfile"]
assert (tmp_dir / "dir" / "subdir" / "subfile").read_text() == "subfile"
assert dvc.status() == {}
| 30.261905 | 79 | 0.617821 |
8b224e0e375b40d95efe42edbcc4f2278c7cd48f | 6,199 | py | Python | output/models/nist_data/list_pkg/any_uri/schema_instance/nistschema_sv_iv_list_any_uri_enumeration_3_xsd/nistschema_sv_iv_list_any_uri_enumeration_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/any_uri/schema_instance/nistschema_sv_iv_list_any_uri_enumeration_3_xsd/nistschema_sv_iv_list_any_uri_enumeration_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/any_uri/schema_instance/nistschema_sv_iv_list_any_uri_enumeration_3_xsd/nistschema_sv_iv_list_any_uri_enumeration_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-list-anyURI-enumeration-3-NS"
class NistschemaSvIvListAnyUriEnumeration3Type(Enum):
GOPHER_ANDDISCUSST_ENUNAMBIGU_USANDSUCH_G_OUPSOFS_TO_N_RL_GOV_GOPHER_TECHNICALPR_VIDEMAINTA_NS_EDU_NEWS_OURTHEINT_COM_MAILTO_SENSECO_ANDDEVELOPMENTINTOPARTICULARLYBETWEE_EDU_HTTP_WWW_LANGUAGETHA_THOSEWHICH_OMPATIBILI_YITSDATAAND_EDU_HTTP_WORLDFORTHE_ARDWARE_ELE_TRONIC_PART_COM_MAILTO_BUILDRAPID_REPOSITORYANDWITHORGA_ORG_FTP_FTP_NISTFILTER_N_VALLIESTOO_SOFT_ORG_NEWS_RELATION_GOV = (
"gopher://anddiscusst.enunambigu.usandsuchG.oupsofsToN.RL.gov",
"gopher://technicalpr.videmainta.ns.edu",
"news://ourtheint.com",
"mailto:senseco@anddevelopmentintoparticularlybetwee.edu",
"http://www.Languagetha.thosewhich.ompatibili.yitsdataand.edu",
"http://worldforthe.ardwareEle.tronicPart.com",
"mailto:buildrapid@repositoryandwithorga.org",
"ftp://ftp.NISTfilterN.valliestoo.soft.org",
"news://relation.gov",
)
GOPHER_INTERNETINT_RCONN_NET_FTP_WIDEIMPLEME_TATI_COM_FTP_FTP_H_COM_FTP_CAN_THEREFER_NCE_NAVALTHE_EDU_MAILTO_BECOMERE_GROUPSSIMULATIONLANGUAGEANDTR_GOV_GOPHER_THE_GROUPS_XM_TOTHEIRMAI_TAINEDCORR_ORG_FTP_FTP_DOMAINSFURT_ERMANIPULA_ORG_FTP_FTP_NEED_GOV = (
"gopher://Internetint.rconn.net",
"ftp://wideimpleme.tati.com",
"ftp://ftp.h.com",
"ftp://canTherefer.nceNavalthe.edu",
"mailto:becomere@groupssimulationlanguageandtr.gov",
"gopher://theGroupsXM.totheirmai.tainedcorr.org",
"ftp://ftp.domainsfurt.ermanipula.org",
"ftp://ftp.need.gov",
)
FTP_FTP_TOMODELSTES_ASOF_COMMUN_TY_COM_TELNET_BROWSERSREQ_ESTINGPROJ_CTBE_COMPUT_NG_OASI_ORG_HTTP_ANDPARTNERS_IP_ORG_HTTP_WWW_ANDTHEFORIM_LEMENTAT_ORG_NEWS_THOSE_DOCU_EDU_FTP_DISCUSS_AOAS_SN_GOV_FTP_FTP_FORANDDOCUM_NTSSIGNATU_ESTODOCUME_TSTRANSA_ORG_NEWS_BE_WORKINGOF_ONTROLDISC_VERCOMPUTE_THATBROWSE_STH_EDU_GOPHER_THETOISBEOF_OMPUTERAND_ESOURCESBYT_GOV_GOPHER_H_GOV = (
"ftp://ftp.tomodelstes.asofCommun.ty.com",
"telnet://browsersreq.estingproj.ctbeComput.ngOASI.org",
"http://andpartners.ip.org",
"http://www.andtheforim.lementat.org",
"news://thoseDocu.edu",
"ftp://discussAOAS.Sn.gov",
"ftp://ftp.foranddocum.ntssignatu.estodocume.tstransa.org",
"news://beWorkingof.ontroldisc.vercompute.thatbrowse.sth.edu",
"gopher://thetoisbeof.omputerand.esourcesbyt.gov",
"gopher://h.gov",
)
MAILTO_F_DEVELOPMENTKEYDIAGNOSTICOTH_ORG_NEWS_CHAIRS_NSRLT_EMETHODSLA_DSCAPE_STRU_TUREDACCEL_EDU_MAILTO_ASTOA_COMPUT_WHICHANDTESTABLEBYFROMGOVER_EDU_GOPHER_FROMLESSBRO_SERSINVEST_GATIONISUSE_ORG_MAILTO_H_H_EDU_FTP_FTP_JOINT_ONLYH_EDU = (
"mailto:f@developmentkeydiagnosticoth.org",
"news://chairsNSRLt.emethodsla.dscapeStru.turedaccel.edu",
"mailto:astoaComput@whichandtestablebyfromgover.edu",
"gopher://fromlessbro.sersinvest.gationisuse.org",
"mailto:h@h.edu",
"ftp://ftp.jointOnlyh.edu",
)
HTTP_WWW_XMLRECENTCO_PUTER_XML_EDU_GOPHER_K_COM_HTTP_WWW_ECBUSINESST_BETHATOFAB_ETHEPERS_ORG_FTP_S_EDU_HTTP_WWW_ANDWAYSENSU_EMANYUSEDC_EA_GOV_FTP_FTP_C_NET = (
"http://www.XMLrecentco.puterXML.edu",
"gopher://k.com",
"http://www.ECbusinesst.bethatofab.ethepers.org",
"ftp://s.edu",
"http://www.andwaysensu.emanyusedc.ea.gov",
"ftp://ftp.c.net",
)
GOPHER_POPULARPERV_SIVEQUALIT_GUIDELINES_FOFFILE_NSR_US_ORG_FTP_FTP_ANDINARE_DEV_LOP_XSLRECO_MENDATIONS_ANANDTH_ORG_HTTP_WWW_TOWORKFILTE_THATTHEFIL_ERBEFOUR_TH_OFEFFECTIV_LY_COM_MAILTO_CO_ANY_COMPUTINGMORE_COM_GOPHER_BOTH_DELAWAR_THEINCLUDE_ACKINGWITH_UTBESOF_ORG_FTP_SOFTWAREFRO_NEEDSANDAD_RESSINGWIT_OFINCLUDIN_THEI_EDU_FTP_OFANDDIVISI_NS_EXI_STCOMP_EDU = (
"gopher://popularperv.sivequalit.guidelines.foffileNSR.Us.org",
"ftp://ftp.andinareDev.lopXSLReco.mendations.anandth.org",
"http://www.toworkfilte.thatthefil.erbefourTh.ofeffectiv.ly.com",
"mailto:Co@anyComputingmore.com",
"gopher://bothDelawar.Theinclude.ackingwith.utbesof.org",
"ftp://softwarefro.needsandad.ressingwit.ofincludin.Thei.edu",
"ftp://ofanddivisi.nsEXiSTcomp.edu",
)
HTTP_MANAGETHETH_OFDEFINEOF_AWCONSORTI_MIN_XMLREPO_ITO_GOV_MAILTO_DATAFILETOOLSANDRECENTISSU_COM_HTTP_WWW_EXECU_GOV_GOPHER_DOMINDUSTRYP_ORG_TELNET_FILESDEVELO_MENTFILEST_EBEPROCESS_SD_COM_TELNET_BEFILTERTHE_EDESIGNEDIN_ORG = (
"http://managetheth.ofdefineof.awconsorti.minXMLrepo.ito.gov",
"mailto:@datafiletoolsandrecentissu.com",
"http://www.execu.gov",
"gopher://DOMindustryp.org",
"telnet://filesdevelo.mentfilest.ebeprocess.sd.com",
"telnet://befilterthe.edesignedin.org",
)
GOPHER_WORKINGDEVI_ESANDAIDCO_SORTIUMS_XM_HELPINFORM_TION_COM_FTP_FTP_CREATESANDU_EDNEEDSMUS_NSRLOUROF_S_NSUREOFA_COM_HTTP_EMBEDDEDTHE_EINDUSTR_GOV_MAILTO_ONLY_TOTOTHES_GOV_FTP_FTP_FORFORTOTES_EXPERIMENT_LFILESTHE_I_FORM_GOV_TELNET_THEINTOATO_COM_HTTP_WWW_ANDTRANSACT_ONSANDAREA_TOENTERPRI_E_NET = (
"gopher://workingdevi.esandaidco.sortiumsXM.helpinform.tion.com",
"ftp://ftp.createsandu.edneedsmus.NSRLourofS.nsureofa.com",
"http://embeddedthe.eindustr.gov",
"mailto:Only@totothes.gov",
"ftp://ftp.forfortotes.Experiment.lfilestheI.form.gov",
"telnet://theintoato.com",
"http://www.andtransact.onsandarea.toenterpri.e.net",
)
@dataclass
class NistschemaSvIvListAnyUriEnumeration3:
class Meta:
name = "NISTSchema-SV-IV-list-anyURI-enumeration-3"
namespace = "NISTSchema-SV-IV-list-anyURI-enumeration-3-NS"
value: Optional[NistschemaSvIvListAnyUriEnumeration3Type] = field(
default=None,
metadata={
"required": True,
}
)
| 63.255102 | 388 | 0.743668 |
c52e45c4ed7ad4d18ed4cc979bee9fbdb2c9f3f5 | 2,859 | py | Python | config.py | shackle-he/NessusToReport | 74f9691bd1d6b5508f79f09eb14bef995a7bc46d | [
"Apache-2.0"
] | null | null | null | config.py | shackle-he/NessusToReport | 74f9691bd1d6b5508f79f09eb14bef995a7bc46d | [
"Apache-2.0"
] | null | null | null | config.py | shackle-he/NessusToReport | 74f9691bd1d6b5508f79f09eb14bef995a7bc46d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ------------------------------------------------------------
# File: config.py
# Created Date: 2020/6/24
# Created Time: 0:16
# Author: Hypdncy
# Author Mail: hypdncy@outlook.com
# Copyright (c) 2020 Hypdncy
# ------------------------------------------------------------
# .::::.
# .::::::::.
# :::::::::::
# ..:::::::::::'
# '::::::::::::'
# .::::::::::
# '::::::::::::::..
# ..::::::::::::.
# ``::::::::::::::::
# ::::``:::::::::' .:::.
# ::::' ':::::' .::::::::.
# .::::' :::: .:::::::'::::.
# .:::' ::::: .:::::::::' ':::::.
# .::' :::::.:::::::::' ':::::.
# .::' ::::::::::::::' ``::::.
# ...::: ::::::::::::' ``::.
# ````':. ':::::::::' ::::..
# '.:::::' ':'````..
# ------------------------------------------------------------
from datetime import datetime
config_data = {
"user": {
"name": "懂王", # 客户名称
"acronym": "DW", # 客户名字缩写
"contacts": "张三", # 客户联系人
"phone": "13838383838", # 手机号
}
}
datetime_cn = ""
# datetime_cn = datetime.strptime("2021-03-23 UTC +0800", "%Y-%m-%d %Z %z")
# 配置忽略的IP,通常为扫描IP段时自己的IP
nessus_ignore_ips = {
"1.1.1.1"
}
# 配置忽略的漏洞ID
nessus_ignore_ids = [
# 不支持版本漏洞ID
# Microsoft Windows 2000 Unsupported Installation Detection
"47709",
# Unsupported Windows OS (remote)
"108797",
# Microsoft Windows Server 2003 Unsupported Installation Detection
"84729",
# Microsoft Windows XP Unsupported Installation Detection
"73182",
# Microsoft SQL Server Unsupported Version Detection (remote check)
"73756",
# Unix Operating System Unsupported Version Detection
"33850",
# Unsupported Web Server Detection
"34460",
# Microsoft SQL Server Unsupported Version Detection (remote check)
"73756",
# Oracle WebLogic Unsupported Version Detection
"109345",
# Symantec pcAnywhere Unsupported
"57859",
# Microsoft IIS 6.0 Unsupported Version Detection
"97994",
]
# 自定义漏洞信息,ID:info
nessus_vuln_self = {
# 举个栗子
# "18405": {
# "name_en": "Microsoft Windows SMBv1 Multiple Vulnerabilities",
# "name_cn": "名字......",
# "risk_en": "High",
# "risk_cn": "高危",
# "describe_en": "The remote Windows ...",
# "describe_cn": "描述......",
# "solution_en": "Apply the applicable ...",
# "solution_cn": "影响......",
# "cve": "CVE-2017-0279"
# },
}
nessus_risk_self = {
"Critical": [],
"High": [],
"Medium": []
}
| 29.474227 | 75 | 0.403987 |
59e010c0e2a3ec226658631974b96423b7613efd | 18,593 | py | Python | lib/enthought/traits/trait_notifiers.py | mattfoster/matplotlib | 0b47697b19b77226c633ec6a3d74a2199a153315 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2016-05-08T18:33:12.000Z | 2016-05-08T18:33:12.000Z | lib/enthought/traits/trait_notifiers.py | mattfoster/matplotlib | 0b47697b19b77226c633ec6a3d74a2199a153315 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | lib/enthought/traits/trait_notifiers.py | mattfoster/matplotlib | 0b47697b19b77226c633ec6a3d74a2199a153315 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Original Date: 06/21/2002
#
# Symbols defined: TraitChangeNotifyWrapper
# UITraitChangeNotifyWrapper
# NewTraitChangeNotifyWrapper
# StaticAnyTraitChangeNotifyWrapper
# StaticTraitChangeNotifyWrapper
#
# Refactored into a separate module: 07/04/2003
#------------------------------------------------------------------------------
""" Defines the classes needed to implement and support the Traits change
notification mechanism.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import weakref
import traceback
import sys
try:
# Requires Python 2.4:
from threading import local as thread_local
except:
thread_local = lambda: {}
from threading \
import Thread
from thread \
import get_ident
from types \
import MethodType
from trait_errors \
import TraitNotificationError
#-------------------------------------------------------------------------------
# Global Data:
#-------------------------------------------------------------------------------
# The thread ID for the user interface thread
ui_thread = -1
# The handler for notifications that must be run on the UI thread
ui_handler = None
#-------------------------------------------------------------------------------
# Sets up the user interface thread handler:
#-------------------------------------------------------------------------------
def set_ui_handler ( handler ):
""" Sets up the user interface thread handler.
"""
global ui_handler, ui_thread
ui_handler = handler
ui_thread = get_ident()
#-------------------------------------------------------------------------------
# 'NotificationExceptionHandlerState' class:
#-------------------------------------------------------------------------------
class NotificationExceptionHandlerState ( object ):
def __init__ ( self, handler, reraise_exceptions, locked ):
self.handler = handler
self.reraise_exceptions = reraise_exceptions
self.locked = locked
#-------------------------------------------------------------------------------
# 'NotificationExceptionHandler' class:
#-------------------------------------------------------------------------------
class NotificationExceptionHandler ( object ):
def __init__ ( self ):
self.traits_logger = None
self.main_thread = None
self.thread_local = thread_local()
#-- Private Methods ------------------------------------------------------------
def _push_handler ( self, handler = None, reraise_exceptions = False,
main = False, locked = False ):
""" Pushes a new traits notification exception handler onto the stack,
making it the new exception handler. Returns a
NotificationExceptionHandlerState object describing the previous
exception handler.
Parameters
----------
handler : handler
The new exception handler, which should be a callable or
None. If None (the default), then the default traits
notification exception handler is used. If *handler* is not
None, then it must be a callable which can accept four
arguments: object, trait_name, old_value, new_value.
reraise_exceptions : Boolean
Indicates whether exceptions should be reraised after the
exception handler has executed. If True, exceptions will be
re-raised after the specified handler has been executed.
The default value is False.
main : Boolean
Indicates whether the caller represents the main application
thread. If True, then the caller's exception handler is
made the default handler for any other threads that are
created. Note that a thread can explictly set its own exception
handler if desired. The *main* flag is provided to make it
easier to set a global application policy without having to
explicitly set it for each thread. The default value is
False.
locked : Boolean
Indicates whether further changes to the Traits notification
exception handler state should be allowed. If True, then
any subsequent calls to _push_handler() or _pop_handler() for
that thread will raise a TraitNotificationError. The default
value is False.
"""
handlers = self._get_handlers()
self._check_lock( handlers )
if handler is None:
handler = self._log_exception
handlers.append( NotificationExceptionHandlerState( handler,
reraise_exceptions, locked ) )
if main:
self.main_thread = handlers
return handlers[-2]
def _pop_handler ( self ):
""" Pops the traits notification exception handler stack, restoring
the exception handler in effect prior to the most recent
_push_handler() call. If the stack is empty or locked, a
TraitNotificationError exception is raised.
Note that each thread has its own independent stack. See the
description of the _push_handler() method for more information on
this.
"""
handlers = self._get_handlers()
self._check_lock( handlers )
if len( handlers ) > 1:
handlers.pop()
else:
raise TraitNotificationError(
'Attempted to pop an empty traits notification exception '
'handler stack.' )
def _handle_exception ( self, object, trait_name, old, new ):
""" Handles a traits notification exception using the handler defined
by the topmost stack entry for the corresponding thread.
"""
excp_class, excp = sys.exc_info()[:2]
handler_info = self._get_handlers()[-1]
handler_info.handler( object, trait_name, old, new )
if (handler_info.reraise_exceptions or
isinstance( excp, TraitNotificationError )):
raise excp
def _get_handlers ( self ):
""" Returns the handler stack associated with the currently executing
thread.
"""
thread_local = self.thread_local
if isinstance( thread_local, dict ):
id = get_ident()
handlers = thread_local.get( id )
else:
handlers = getattr( thread_local, 'handlers', None )
if handlers is None:
if self.main_thread is not None:
handler = self.main_thread[-1]
else:
handler = NotificationExceptionHandlerState(
self._log_exception, False, False )
handlers = [ handler ]
if isinstance( thread_local, dict ):
thread_local[ id ] = handlers
else:
thread_local.handlers = handlers
return handlers
def _check_lock ( self, handlers ):
""" Raises an exception if the specified handler stack is locked.
"""
if handlers[-1].locked:
raise TraitNotificationError(
'The traits notification exception handler is locked. '
'No changes are allowed.' )
#---------------------------------------------------------------------------
# This method defines the default notification exception handling
# behavior of traits. However, it can be completely overridden by pushing
# a new handler using the '_push_handler' method.
#
# It logs any exceptions generated in a trait notification handler.
#---------------------------------------------------------------------------
def _log_exception ( self, object, trait_name, old, new ):
""" Logs any exceptions generated in a trait notification handler.
"""
# When the stack depth is too great, the logger can't always log the
# message. Make sure that it goes to the console at a minimum:
excp_class, excp = sys.exc_info()[:2]
if ((excp_class is RuntimeError) and
(excp.args[0] == 'maximum recursion depth exceeded')):
sys.__stderr__.write( 'Exception occurred in traits notification '
'handler for object: %s, trait: %s, old value: %s, '
'new value: %s.\n%s\n' % ( object, trait_name, old, new,
''.join( traceback.format_exception( *sys.exc_info() ) ) ) )
logger = self.traits_logger
if logger is None:
import logging
self.traits_logger = logger = logging.getLogger(
'enthought.traits' )
handler = logging.StreamHandler()
handler.setFormatter( logging.Formatter( '%(message)s' ) )
logger.addHandler( handler )
print ('Exception occurred in traits notification handler.\n'
'Please check the log file for details.')
try:
logger.exception(
'Exception occurred in traits notification handler for '
'object: %s, trait: %s, old value: %s, new value: %s' %
( object, trait_name, old, new ) )
except Exception:
# Ignore anything we can't log the above way:
pass
#-------------------------------------------------------------------------------
# Traits global notification exception handler:
#-------------------------------------------------------------------------------
notification_exception_handler = NotificationExceptionHandler()
push_exception_handler = notification_exception_handler._push_handler
pop_exception_handler = notification_exception_handler._pop_handler
handle_exception = notification_exception_handler._handle_exception
#-------------------------------------------------------------------------------
# 'StaticAnyTraitChangeNotifyWrapper' class:
#-------------------------------------------------------------------------------
class StaticAnyTraitChangeNotifyWrapper:
def __init__ ( self, handler ):
self.handler = handler
self.__call__ = getattr( self, 'call_%d' %
handler.func_code.co_argcount )
def equals ( self, handler ):
return False
def call_0 ( self, object, trait_name, old, new ):
try:
self.handler()
except:
handle_exception( object, trait_name, old, new )
def call_1 ( self, object, trait_name, old, new ):
try:
self.handler( object )
except:
handle_exception( object, trait_name, old, new )
def call_2 ( self, object, trait_name, old, new ):
try:
self.handler( object, trait_name )
except:
handle_exception( object, trait_name, old, new )
def call_3 ( self, object, trait_name, old, new ):
try:
self.handler( object, trait_name, new )
except:
handle_exception( object, trait_name, old, new )
def call_4 ( self, object, trait_name, old, new ):
try:
self.handler( object, trait_name, old, new )
except:
handle_exception( object, trait_name, old, new )
#-------------------------------------------------------------------------------
# 'StaticTraitChangeNotifyWrapper' class:
#-------------------------------------------------------------------------------
class StaticTraitChangeNotifyWrapper:
def __init__ ( self, handler ):
self.handler = handler
self.__call__ = getattr( self, 'call_%d' %
handler.func_code.co_argcount )
def equals ( self, handler ):
return False
def call_0 ( self, object, trait_name, old, new ):
try:
self.handler()
except:
handle_exception( object, trait_name, old, new )
def call_1 ( self, object, trait_name, old, new ):
try:
self.handler( object )
except:
handle_exception( object, trait_name, old, new )
def call_2 ( self, object, trait_name, old, new ):
try:
self.handler( object, new )
except:
handle_exception( object, trait_name, old, new )
def call_3 ( self, object, trait_name, old, new ):
try:
self.handler( object, old, new )
except:
handle_exception( object, trait_name, old, new )
def call_4 ( self, object, trait_name, old, new ):
try:
self.handler( object, trait_name, old, new )
except:
handle_exception( object, trait_name, old, new )
#-------------------------------------------------------------------------------
# 'TraitChangeNotifyWrapper' class:
#-------------------------------------------------------------------------------
class TraitChangeNotifyWrapper:
def __init__ ( self, handler, owner ):
func = handler
if type( handler ) is MethodType:
func = handler.im_func
object = handler.im_self
if object is not None:
self.object = weakref.ref( object, self.listener_deleted )
self.name = handler.__name__
self.owner = owner
self.__call__ = getattr( self, 'rebind_call_%d' %
(func.func_code.co_argcount - 1) )
return
self.name = None
self.handler = handler
self.__call__ = getattr( self, 'call_%d' %
handler.func_code.co_argcount )
# NOTE: This method is normally the only one that needs to be overridden in
# a subclass to implement the subclass's dispatch mechanism:
def dispatch ( self, handler, *args ):
handler( *args )
def equals ( self, handler ):
if handler is self:
return True
if (type( handler ) is MethodType) and (handler.im_self is not None):
return ((handler.__name__ == self.name) and
(handler.im_self is self.object()))
return ((self.name is None) and (handler == self.handler))
def listener_deleted ( self, ref ):
self.owner.remove( self )
self.object = self.owner = None
def dispose ( self ):
self.object = None
def call_0 ( self, object, trait_name, old, new ):
try:
self.dispatch( self.handler )
except:
handle_exception( object, trait_name, old, new )
def call_1 ( self, object, trait_name, old, new ):
try:
self.dispatch( self.handler, new )
except:
handle_exception( object, trait_name, old, new )
def call_2 ( self, object, trait_name, old, new ):
try:
self.dispatch( self.handler, trait_name, new )
except:
handle_exception( object, trait_name, old, new )
def call_3 ( self, object, trait_name, old, new ):
try:
self.dispatch( self.handler, object, trait_name, new )
except:
handle_exception( object, trait_name, old, new )
def call_4 ( self, object, trait_name, old, new ):
try:
self.dispatch( self.handler, object, trait_name, old, new )
except:
handle_exception( object, trait_name, old, new )
def rebind_call_0 ( self, object, trait_name, old, new ):
try:
self.dispatch( getattr( self.object(), self.name ) )
except:
handle_exception( object, trait_name, old, new )
def rebind_call_1 ( self, object, trait_name, old, new ):
try:
self.dispatch( getattr( self.object(), self.name ), new )
except:
handle_exception( object, trait_name, old, new )
def rebind_call_2 ( self, object, trait_name, old, new ):
try:
self.dispatch( getattr( self.object(), self.name ),
trait_name, new )
except:
handle_exception( object, trait_name, old, new )
def rebind_call_3 ( self, object, trait_name, old, new ):
try:
self.dispatch( getattr( self.object(), self.name ),
object, trait_name, new )
except:
handle_exception( object, trait_name, old, new )
def rebind_call_4 ( self, object, trait_name, old, new ):
try:
self.dispatch( getattr( self.object(), self.name ),
object, trait_name, old, new )
except:
handle_exception( object, trait_name, old, new )
#-------------------------------------------------------------------------------
# 'UITraitChangeNotifyUIWrapper' class:
#-------------------------------------------------------------------------------
class UITraitChangeNotifyWrapper ( TraitChangeNotifyWrapper ):
def dispatch ( self, handler, *args ):
if get_ident() == ui_thread:
handler( *args )
else:
ui_handler( handler, *args )
#-------------------------------------------------------------------------------
# 'NewTraitChangeNotifyWrapper' class:
#-------------------------------------------------------------------------------
class NewTraitChangeNotifyWrapper ( TraitChangeNotifyWrapper ):
def dispatch ( self, handler, *args ):
Thread( target = handler, args = args ).start()
| 39.225738 | 80 | 0.521971 |
fea26ae8833faf495baa3e4f4cd7694fd6a8a782 | 6,115 | py | Python | trainingbar/bar.py | trisongz/trainingbar | e33923d7314a8bc893bfb141519ec980df8c6778 | [
"MIT"
] | null | null | null | trainingbar/bar.py | trisongz/trainingbar | e33923d7314a8bc893bfb141519ec980df8c6778 | [
"MIT"
] | null | null | null | trainingbar/bar.py | trisongz/trainingbar | e33923d7314a8bc893bfb141519ec980df8c6778 | [
"MIT"
] | null | null | null | import os
import sys
import time
from threading import Thread, Lock
from trainingbar import env, auths
from trainingbar.logger import get_logger
from trainingbar.handlers.host import config_host, HostMonitor
from trainingbar.config.styles import configure_trainingbars
from trainingbar.utils import _timer_formats
logger = get_logger()
class TrainingBar:
def __init__(self, refresh_secs=10, disabled=None, xla='auto', xla_params=None, authenticate=True, disk_path='/', reinit=False, daemon=False):
self.enabled = ['cpu', 'ram', 'disk']
self.refresh_secs = refresh_secs
self.bg_run = daemon
self.time = time.time()
self.hooks = {}
if disabled:
self.enabled = [e for e in self.enabled if e not in disabled]
if ('gpu' in disabled or 'tpu' in disabled) and xla =='auto':
xla = None
self.host = config_host(xla, xla_params, authenticate, disk_path, reinit)
self.enabled_xla = None
if xla:
if self.host['xla'].get('gpus', None):
self.enabled_xla = 'gpu'
elif self.host['xla'].get('tpu_name', None):
self.enabled_xla = 'tpu'
self.enabled.append(self.enabled_xla)
self.bars, self.ops = configure_trainingbars(self.host, self.enabled)
self.started, self.stopped = False, False
self._lock = Lock()
if self.bg_run:
_bg = Thread(target=self.background, daemon=True)
_bg.start()
def background(self):
self.start()
while not self.stopped:
with self._lock:
self.update()
time.sleep(self.refresh_secs)
def update(self):
if not self.started:
self.start()
self.all_stats['host'] = self.handlers['host'].stats()
if 'cpu' in self.enabled:
self.bars.update(self.ops['cpu'], completed=self.all_stats['host']['cpu_util'])
self.all_stats['cpu']['cpu_util'] = self.all_stats['host'].pop('cpu_util')
if 'disk' in self.enabled:
self.bars.update(self.ops['disk'], completed=self.all_stats['host']['disk_used'])
for d in ['disk_total', 'disk_used', 'disk_util']:
self.all_stats['disk'][d] = self.all_stats['host'].pop(d)
if 'ram' in self.enabled:
self.bars.update(self.ops['ram'], completed=self.all_stats['host']['ram_used'])
for r in ['ram_total', 'ram_used', 'ram_util']:
self.all_stats['ram'][r] = self.all_stats['host'].pop(r)
self.idx += 1
if self.enabled_xla:
self.all_stats[self.enabled_xla] = self.handlers[self.enabled_xla].stats()
if self.enabled_xla == 'gpu':
for gpu in self.all_stats['gpu']:
self.bars.update(self.ops['gpu'][gpu], completed=self.all_stats['gpu'][gpu].get('vram_used', 0))
elif self.enabled_xla == 'tpu':
self.bars.update(self.ops['tpu']['tpu_mxu'], completed=int(self.all_stats['tpu'].get('tpu_mxu_util', 0)))
self.bars.update(self.ops['tpu']['tpu_memory'], completed=int(self.all_stats['tpu'].get('tpu_mem_used', 0)), total=int(self.all_stats['tpu'].get('tpu_mem_total', 0)))
self.fire_hooks(self.all_stats)
def stats(self):
return self.all_stats
def stop(self):
self.bars.stop()
for op in self.handlers:
self.handlers[op].stop()
def start(self):
self.idx = 0
self.all_stats = {x: {} for x in self.enabled}
self.configure_handlers()
self.bars.start()
self.started = True
def configure_handlers(self):
self.handlers = {}
self.handlers['host'] = HostMonitor(self.client, self.enabled, self.refresh_secs, self.bg_run)
if self.enabled_xla == 'tpu':
from trainingbar.handlers.tpu import TPUMonitor
self.handlers['tpu'] = TPUMonitor(self.client, self.refresh_secs, self.bg_run)
elif self.enabled_xla == 'gpu':
from trainingbar.handlers.gpu import GPUMonitor
self.handlers['gpu'] = GPUMonitor(self.client, self.refresh_secs, self.bg_run)
def client(self, config=False, ops=None, **args):
if config:
return self.host
if ops == 'logger':
return self.log
def add_hook(self, name, hook, freq=10):
self.hooks[name] = {'freq': freq, 'function': hook}
self.log(f'Added new hook {name}. Will call hook once every {freq} updates.')
def rm_hook(self, name):
if self.hooks.get(name, None):
_ = self.hooks.pop(name)
self.log(f'Removing hook {name}')
else:
self.log(f'Hook {name} not found')
def fire_hooks(self, message, force=False, *args, **kwargs):
if self.hooks:
for hook_name in self.hooks:
hook = self.hooks[hook_name]
if self.idx % hook['freq'] == 0 or force:
hook['func'](message, *args, **kwargs)
def create_timeout_hook(self, hook, device='auto', *args):
if device == 'auto' and self.enabled_xla:
self.handlers[self.enabled_xla].create_timeout_hook(hook=hook, *args)
elif device == self.enabled_xla:
self.handlers[device].create_timeout_hook(hook=hook, *args)
def log(self, message):
if not isinstance(message, str):
message = str(message)
message = message + '\n' + ('------' * 15)
logger.info(message)
def get_time(self, fmt='mins'):
_stoptime = time.time()
total_time = _stoptime - self.time
if fmt in _timer_formats['wks']:
total_time /= 604800
elif fmt in _timer_formats['days']:
total_time /= 86400
elif fmt in _timer_formats['hrs']:
total_time /= 3600
elif fmt in _timer_formats['mins']:
total_time /= 60
return total_time
def __exit__(self, *_):
self.stop()
def __enter__(self):
return self
| 38.702532 | 182 | 0.589534 |
683f42651579fd37523de268f8831ec16749cd2a | 3,362 | py | Python | op/fused_act.py | morzh/stylegan2-pytorch | afb60a3c5c4ab902f38c510d24eb4f3d1b827454 | [
"Apache-2.0",
"MIT"
] | null | null | null | op/fused_act.py | morzh/stylegan2-pytorch | afb60a3c5c4ab902f38c510d24eb4f3d1b827454 | [
"Apache-2.0",
"MIT"
] | null | null | null | op/fused_act.py | morzh/stylegan2-pytorch | afb60a3c5c4ab902f38c510d24eb4f3d1b827454 | [
"Apache-2.0",
"MIT"
] | null | null | null | import os
import torch
from torch import nn
from torch.nn import functional as F
# from torch.autograd import Function
# from torch.utils.cpp_extension import load
'''
module_path = os.path.dirname(__file__)
fused = load(
"fused",
sources=[
os.path.join(module_path, "fused_bias_act.cpp"),
os.path.join(module_path, "fused_bias_act_kernel.cu"),
],
)
'''
'''
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, bias, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(
grad_output, empty, out, 3, 1, negative_slope, scale
)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
if bias:
grad_bias = grad_input.sum(dim).detach()
else:
grad_bias = empty
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
)
return gradgrad_out, None, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = bias is not None
if bias is None:
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale
)
if not ctx.bias:
grad_bias = None
return grad_input, grad_bias, None, None
'''
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, bias=True, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
if bias:
self.bias = nn.Parameter(torch.zeros(channel))
else:
self.bias = None
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == "cpu":
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale
# return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2) * scale
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
# return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
print('cpu only version')
return input
| 29.234783 | 119 | 0.608269 |
ef700e08b8631cf4f5d03872e7a2e1c13a5f31f4 | 50,478 | py | Python | shwirl/shaders/render_volume.py | macrocosme/shwirl | 87147ba1e99463e96b7f4295fd24ab57440d9981 | [
"BSD-3-Clause"
] | 3 | 2018-05-09T17:55:53.000Z | 2019-07-22T09:14:41.000Z | shwirl/shaders/render_volume.py | macrocosme/shwirl | 87147ba1e99463e96b7f4295fd24ab57440d9981 | [
"BSD-3-Clause"
] | 9 | 2017-04-07T01:44:15.000Z | 2018-12-16T20:47:08.000Z | shwirl/shaders/render_volume.py | macrocosme/shwirl | 87147ba1e99463e96b7f4295fd24ab57440d9981 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division
# This file implements a RenderVolumeVisual class. It is derived from the
# VolumeVisual class in vispy.visuals.volume, which is released under a BSD
# license included here:
#
# ===========================================================================
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2015, authors of Vispy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===========================================================================
#
# This modified version is released under the (new) BSD license:
#
# Copyright (c) 2015, Dany Vohl
# All rights reserved.
#
# A copy of the license is available in the root directory of this project.
#
from ..extern.vispy.gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from ..extern.vispy.visuals import Visual
from ..extern.vispy.visuals.shaders import Function
from ..extern.vispy.color import get_colormap
from ..extern.vispy.scene.visuals import create_visual_node
from ..extern.vispy.io import load_spatial_filters
import numpy as np
# Vertex shader
VERT_SHADER = """
attribute vec3 a_position;
// attribute vec3 a_texcoord;
uniform vec3 u_shape;
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
void main() {
// v_texcoord = a_texcoord;
v_position = a_position;
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) and project back. Voila, we get our ray vector.
vec4 pos_in_cam = $viewtransformf(vec4(v_position, 1));
// intersection of ray and near clipping plane (z = -1 in clip coords)
pos_in_cam.z = -pos_in_cam.w;
v_nearpos = $viewtransformi(pos_in_cam);
// intersection of ray and far clipping plane (z = +1 in clip coords)
pos_in_cam.z = pos_in_cam.w;
v_farpos = $viewtransformi(pos_in_cam);
gl_Position = $transform(vec4(v_position, 1.0));
}
""" # noqa
# Fragment shader
FRAG_SHADER = """
// uniforms
uniform $sampler_type u_volumetex;
uniform vec3 u_shape;
uniform vec3 u_resolution;
uniform float u_threshold;
uniform float u_relative_step_size;
//uniform int u_color_scale;
//uniform float u_data_min;
//uniform float u_data_max;
// Moving box filter variables
uniform int u_filter_size;
uniform float u_filter_coeff;
uniform int u_filter_arm;
uniform int u_filter_type;
uniform int u_use_gaussian_filter;
uniform int u_gaussian_filter_size;
//uniform int u_log_scale;
// Volume Stats
uniform float u_volume_mean;
uniform float u_volume_std;
//uniform float u_volume_madfm;
uniform float u_high_discard_filter_value;
uniform float u_low_discard_filter_value;
uniform float u_density_factor;
uniform int u_color_method;
//varyings
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// uniforms for lighting. Hard coded until we figure out how to do lights
const vec4 u_ambient = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 u_diffuse = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 u_specular = vec4(1.0, 1.0, 1.0, 1.0);
const float u_shininess = 40.0;
//varying vec3 lightDirs[1];
// global holding view direction in local coordinates
vec3 view_ray;
float rand(vec2 co)
{{
// Create a pseudo-random number between 0 and 1.
// http://stackoverflow.com/questions/4200224
return fract(sin(dot(co.xy ,vec2(12.9898, 78.233))) * 43758.5453);
}}
float colorToVal(vec4 color1)
{{
return color1.g;
}}
vec4 movingAverageFilter_line_of_sight(vec3 loc, vec3 step)
{{
// Initialise variables
vec4 partial_color = vec4(0.0, 0.0, 0.0, 0.0);
for ( int i=1; i<=u_filter_arm; i++ )
{{
partial_color += $sample(u_volumetex, loc-i*step);
partial_color += $sample(u_volumetex, loc+i*step);
}}
partial_color += $sample(u_volumetex, loc);
// Evaluate mean
partial_color *= u_filter_coeff;
return partial_color;
}}
vec4 Gaussian_5(vec4 color_original, vec3 loc, vec3 direction) {{
vec4 color = vec4(0.0);
vec3 off1 = 1.3333333333333333 * direction;
color += color_original * 0.29411764705882354;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.35294117647058826;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.35294117647058826;
return color;
}}
vec4 Gaussian_9(vec4 color_original, vec3 loc, vec3 direction)
{{
vec4 color = vec4(0.0);
vec3 off1 = 1.3846153846 * direction;
vec3 off2 = 3.2307692308 * direction;
color += color_original * 0.2270270270;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.3162162162;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.3162162162;
color += $sample(u_volumetex, loc + (off2 * u_resolution)) * 0.0702702703;
color += $sample(u_volumetex, loc - (off2 * u_resolution)) * 0.0702702703;
return color;
}}
vec4 Gaussian_13(vec4 color_original, vec3 loc, vec3 direction) {{
vec4 color = vec4(0.0);
vec3 off1 = 1.411764705882353 * direction;
vec3 off2 = 3.2941176470588234 * direction;
vec3 off3 = 5.176470588235294 * direction;
color += color_original * 0.1964825501511404;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.2969069646728344;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.2969069646728344;
color += $sample(u_volumetex, loc + (off2 * u_resolution)) * 0.09447039785044732;
color += $sample(u_volumetex, loc - (off2 * u_resolution)) * 0.09447039785044732;
color += $sample(u_volumetex, loc + (off3 * u_resolution)) * 0.010381362401148057;
color += $sample(u_volumetex, loc - (off3 * u_resolution)) * 0.010381362401148057;
return color;
}}
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Edge detection Pass
// (adapted from https://www.shadertoy.com/view/MscSzf#)
// ----------------------------------------------------------------
float checkSame(vec4 center, vec4 sample, vec3 resolution) {{
vec2 centerNormal = center.xy;
float centerDepth = center.z;
vec2 sampleNormal = sample.xy;
float sampleDepth = sample.z;
vec2 sensitivity = (vec2(0.3, 1.5) * resolution.y / 50.0);
vec2 diffNormal = abs(centerNormal - sampleNormal) * sensitivity.x;
bool isSameNormal = (diffNormal.x + diffNormal.y) < 0.1;
float diffDepth = abs(centerDepth - sampleDepth) * sensitivity.y;
bool isSameDepth = diffDepth < 0.1;
return (isSameNormal && isSameDepth) ? 1.0 : 0.0;
}}
vec4 edge_detection(vec4 color_original, vec3 loc, vec3 step, vec3 resolution) {{
vec4 sample1 = $sample(u_volumetex, loc + (vec3(1., 1., 0.) / resolution));
vec4 sample2 = $sample(u_volumetex, loc + (vec3(-1., -1., 0.) / resolution));
vec4 sample3 = $sample(u_volumetex, loc + (vec3(-1., 1., 0.) / resolution));
vec4 sample4 = $sample(u_volumetex, loc + (vec3(1., -1., 0.) / resolution));
float edge = checkSame(sample1, sample2, resolution) *
checkSame(sample3, sample4, resolution);
return vec4(color_original.rgb, 1-edge);
}}
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Used with iso surface
vec4 calculateColor(vec4 betterColor, vec3 loc, vec3 step)
{{
// Calculate color by incorporating lighting
vec4 color1;
vec4 color2;
// View direction
vec3 V = normalize(view_ray);
// calculate normal vector from gradient
vec3 N; // normal
color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );
color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );
N[0] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );
color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );
N[1] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );
color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );
N[2] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
float gm = length(N); // gradient magnitude
N = normalize(N);
// Flip normal so it points towards viewer
float Nselect = float(dot(N,V) > 0.0);
N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
// Get color of the texture (albeido)
color1 = betterColor;
color2 = color1;
// todo: parametrise color1_to_color2
// Init colors
vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 final_color;
// todo: allow multiple light, define lights on viewvox or subscene
int nlights = 1;
for (int i=0; i<nlights; i++)
{{
// Get light direction (make sure to prevent zero devision)
vec3 L = normalize(view_ray); //lightDirs[i];
float lightEnabled = float( length(L) > 0.0 );
L = normalize(L+(1.0-lightEnabled));
// Calculate lighting properties
float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );
vec3 H = normalize(L+V); // Halfway vector
float specularTerm = pow( max(dot(H,N),0.0), u_shininess);
// Calculate mask
float mask1 = lightEnabled;
// Calculate colors
ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;
diffuse_color += mask1 * lambertTerm;
specular_color += mask1 * specularTerm * u_specular;
}}
// Calculate final color by componing different components
final_color = color2 * ( ambient_color + diffuse_color) + specular_color;
final_color.a = color2.a;
// Done
return final_color;
}}
// for some reason, this has to be the last function in order for the
// filters to be inserted in the correct place...
void main() {{
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this
// fragment.
view_ray = normalize(farpos.xyz - nearpos.xyz);
// Compute the distance to the front surface or near clipping plane
float distance = dot(nearpos-v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_shape.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_shape.y - 0.5 - v_position.y) / view_ray.y));
//distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
// (u_shape.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / u_relative_step_size + 0.5);
if( nsteps < 1 )
discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_shape) / nsteps;
vec3 start_loc = front / u_shape;
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//gl_FragColor = vec4(0.0, nsteps / 3.0 / u_shape.x, 1.0, 1.0);
//return;
{before_loop}
vec3 loc = start_loc;
int iter = 0;
float discard_ratio = 1.0 / (u_high_discard_filter_value - u_low_discard_filter_value);
float low_discard_ratio = 1.0 / u_low_discard_filter_value;
for (iter=0; iter<nsteps; iter++)
{{
// Get sample color
vec4 color;
if (u_filter_size == 1)
color = $sample(u_volumetex, loc);
else {{
color = movingAverageFilter_line_of_sight(loc, step);
}}
if (u_use_gaussian_filter==1) {{
vec4 temp_color;
vec3 direction;
if (u_gaussian_filter_size == 5){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_5(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_5(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_5(temp_color, loc, direction);
}}
if (u_gaussian_filter_size == 9){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_9(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_9(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_9(temp_color, loc, direction);
}}
if (u_gaussian_filter_size == 13){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_13(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_13(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_13(temp_color, loc, direction);
}}
color = temp_color;
}}
float val = color.g;
// To force activating the uniform - this should be done differently
float density_factor = u_density_factor;
if (u_filter_type == 1) {{
// Get rid of very strong signal values
if (val > u_high_discard_filter_value)
{{
val = 0.;
}}
// Don't consider noisy values
//if (val < u_volume_mean - 3*u_volume_std)
if (val < u_low_discard_filter_value)
{{
val = 0.;
}}
if (u_low_discard_filter_value == u_high_discard_filter_value)
{{
if (u_low_discard_filter_value != 0.)
{{
val *= low_discard_ratio;
}}
}}
else {{
val -= u_low_discard_filter_value;
val *= discard_ratio;
}}
}}
else {{
if (val > u_high_discard_filter_value)
{{
val = 0.;
}}
if (val < u_low_discard_filter_value)
{{
val = 0.;
}}
}}
{in_loop}
// Advance location deeper into the volume
loc += step;
}}
{after_loop}
//gl_FragColor = edge_detection(gl_FragColor, loc, step, u_shape);
/* Set depth value - from visvis TODO
int iter_depth = int(maxi);
// Calculate end position in world coordinates
vec4 position2 = vertexPosition;
position2.xyz += ray*shape*float(iter_depth);
// Project to device coordinates and set fragment depth
vec4 iproj = gl_ModelViewProjectionMatrix * position2;
iproj.z /= iproj.w;
gl_FragDepth = (iproj.z+1.0)/2.0;
*/
}}
""" # noqa
MIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
int maxi = 0; // Where the maximum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
// Refine search for max value
loc = start_loc + step * (float(maxi) - 0.5);
for (int i=0; i<10; i++) {
maxval = max(maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (maxval > u_high_discard_filter_value || maxval < u_low_discard_filter_value)
{{
maxval = 0.;
}}
// Color is associated to voxel intensity
// Moment 0
if (u_color_method == 0) {
gl_FragColor = $cmap(maxval);
}
// Moment 1
else if (u_color_method == 1) {
gl_FragColor = $cmap(loc.y);
gl_FragColor.a = maxval;
}
// Color is associated to RGB cube
else if (u_color_method == 2) {
gl_FragColor.r = loc.y;
gl_FragColor.g = loc.z;
gl_FragColor.b = loc.x;
gl_FragColor.a = maxval;
}
// Color by sigma values
else if (u_color_method == 3) {
if ( (maxval < (u_volume_mean + (3.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 0., 1., maxval);
}
// < 3 sigmas
if ( (maxval >= (u_volume_mean + (3.0 * u_volume_std))) &&
(maxval < (u_volume_mean + (4.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 1., 0., maxval);
}
if ( (maxval >= (u_volume_mean + (4.0 * u_volume_std))) &&
(maxval < (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 0., 0., maxval);
}
if ( (maxval >= (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 1., 1., maxval);
}
}
else {
// Moment 2
// TODO: verify implementation of MIP-mom2.
gl_FragColor = $cmap((maxval * ((maxval - loc.y) * (maxval - loc.y))) / maxval);
}
""",
)
MIP_FRAG_SHADER = FRAG_SHADER.format(**MIP_SNIPPETS)
LMIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
float local_maxval = -99999.0; // The local maximum encountered value
int maxi = 0; // Where the maximum value was encountered
int local_maxi = 0; // Where the local maximum value was encountered
bool local_max_found = false;
""",
in_loop="""
if( val > u_threshold && !local_max_found ) {
local_maxval = val;
local_maxi = iter;
local_max_found = true;
}
if( val > maxval) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
if (!local_max_found) {
local_maxval = maxval;
local_maxi = maxi;
}
// Refine search for max value
loc = start_loc + step * (float(local_maxi) - 0.5);
for (int i=0; i<10; i++) {
local_maxval = max(local_maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (local_maxval > u_high_discard_filter_value) {
local_maxval = 0.;
}
if (local_maxval < u_low_discard_filter_value) {
local_maxval = 0.;
}
// Color is associated to voxel intensity
if (u_color_method == 0) {
gl_FragColor = $cmap(local_maxval);
gl_FragColor.a = local_maxval;
}
// Color is associated to redshift/velocity
else {
gl_FragColor = $cmap(loc.y);
gl_FragColor.a = local_maxval;
}
""",
)
LMIP_FRAG_SHADER = FRAG_SHADER.format(**LMIP_SNIPPETS)
TRANSLUCENT_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
float mom0 = 0.;
float mom1 = 0.;
float ratio = 1/nsteps; // final average
float a1 = 0.;
float a2 = 0.;
""",
in_loop="""
float alpha;
// Case 1: Color is associated to voxel intensity
if (u_color_method == 0) {
/*color = $cmap(val);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;*/
color = $cmap(val);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;
}
else{
// Case 2: Color is associated to redshift/velocity
if (u_color_method == 1) {
color = $cmap(loc.y);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 3: Color is associated to RGB cube
else {
if (u_color_method == 2){
color.r = loc.y;
color.g = loc.z;
color.b = loc.x;
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 4: Mom2
// TODO: Finish implementation of mom2 (not correct in its present form).
else {
// mom0
a1 = mom0;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
mom0 *= a1 / alpha;
mom0 += val * a2 / alpha;
// mom1
a1 = mom1;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
mom1 *= a1 / alpha;
mom1 += loc.y * a2 / alpha;
}
}
}
integrated_color.a = alpha;
// stop integrating if the fragment becomes opaque
if( alpha > 0.99 ){
iter = nsteps;
}
""",
after_loop="""
if (u_color_method != 3){
gl_FragColor = integrated_color;
}
else {
gl_FragColor = $cmap((mom0 * (mom0-mom1 * mom0-mom1)) / mom0);
}
""",
)
TRANSLUCENT_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT_SNIPPETS)
TRANSLUCENT2_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
float ratio = 1/nsteps; // final average
""",
in_loop="""
float alpha;
// Case 1: Color is associated to voxel intensity
if (u_color_method == 0) {
color = $cmap(val);
integrated_color = (val * density_factor + integrated_color.a * (1 - density_factor)) * color;
alpha = integrated_color.a;
//alpha = a1+a2;
// integrated_color *= a1 / alpha;
// integrated_color += color * a2 / alpha;
}
else{
// Case 2: Color is associated to redshift/velocity
if (u_color_method == 1) {
color = $cmap(loc.y);
float a1 = integrated_color.a;
float a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 3: Color is associated to RGB cube
else {
color.r = loc.x;
color.g = loc.z;
color.b = loc.y;
float a1 = integrated_color.a;
float a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
}
integrated_color.a = alpha;
// stop integrating if the fragment becomes opaque
if( alpha > 0.99 ){
iter = nsteps;
}
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
TRANSLUCENT2_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT2_SNIPPETS)
ADDITIVE_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
""",
in_loop="""
color = $cmap(val);
integrated_color = 1.0 - (1.0 - integrated_color) * (1.0 - color);
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
ADDITIVE_FRAG_SHADER = FRAG_SHADER.format(**ADDITIVE_SNIPPETS)
ISO_SNIPPETS = dict(
before_loop="""
vec4 color3 = vec4(0.0); // final color
vec3 dstep = 1.5 / u_shape; // step to sample derivative
gl_FragColor = vec4(0.0);
""",
in_loop="""
if (val > u_threshold-0.2) {
// Take the last interval in smaller steps
vec3 iloc = loc - step;
for (int i=0; i<10; i++) {
val = $sample(u_volumetex, iloc).g;
if (val > u_threshold) {
color = $cmap(val);
gl_FragColor = calculateColor(color, iloc, dstep);
iter = nsteps;
break;
}
iloc += step * 0.1;
}
}
""",
after_loop="""
""",
)
ISO_FRAG_SHADER = FRAG_SHADER.format(**ISO_SNIPPETS)
MINIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // maximum encountered
float minval = 99999.0; // The minimum encountered value
int mini = 0; // Where the minimum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
}
if( val < minval ) {
minval = val;
mini = iter;
}
""",
after_loop="""
// Refine search for min value
loc = start_loc + step * (float(mini) - 0.5);
for (int i=0; i<10; i++) {
minval = min(minval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (minval > u_high_discard_filter_value || minval < u_low_discard_filter_value)
{{
minval = 0.;
}}
// Color is associated to voxel intensity
if (u_color_method == 0) {
gl_FragColor = $cmap(minval);
//gl_FragColor.a = minval;
}
else{
// Color is associated to redshift/velocity
if (u_color_method == 1) {
gl_FragColor = $cmap(loc.y);
//if (minval == 0)
gl_FragColor.a = 1-minval;
}
// Color is associated to RGB cube
else {
if (u_color_method == 2) {
gl_FragColor.r = loc.y;
gl_FragColor.g = loc.z;
gl_FragColor.b = loc.x;
gl_FragColor.a = minval;
}
// Color by sigma values
else if (u_color_method == 3) {
if ( (1-minval < (u_volume_mean + (3.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 0., 1., 1-minval);
}
// < 3 sigmas
if ( (1-minval >= (u_volume_mean + (3.0 * u_volume_std))) &&
(1-minval < (u_volume_mean + (4.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 1., 0., 1-minval);
}
if ( (1-minval >= (u_volume_mean + (4.0 * u_volume_std))) &&
(1-minval < (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 0., 0., 1-minval);
}
if ( (1-minval >= (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 1., 1., 1-minval);
}
}
// Case 4: Mom2
// TODO: verify implementation of MIP-mom2.
else {
gl_FragColor = $cmap((minval * ((minval - loc.y) * (minval - loc.y))) / minval);
}
}
}
""",
)
MINIP_FRAG_SHADER = FRAG_SHADER.format(**MINIP_SNIPPETS)
frag_dict = {
'mip': MIP_FRAG_SHADER,
'lmip': LMIP_FRAG_SHADER,
'iso': ISO_FRAG_SHADER,
'avip': TRANSLUCENT_FRAG_SHADER,
'minip': MINIP_FRAG_SHADER,
'translucent2': TRANSLUCENT2_FRAG_SHADER,
'additive': ADDITIVE_FRAG_SHADER,
}
# _interpolation_template = """
# #include "misc/spatial-filters.frag"
# vec4 texture_lookup_filtered(vec2 texcoord) {
# if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
# texcoord.y < 0.0 || texcoord.y > 1.0) {
# discard;
# }
# return %s($texture, $shape, texcoord);
# }"""
#
# _texture_lookup = """
# vec4 texture_lookup(vec2 texcoord) {
# if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
# texcoord.y < 0.0 || texcoord.y > 1.0) {
# discard;
# }
# return texture2D($texture, texcoord);
# }"""
class RenderVolumeVisual(Visual):
""" Displays a 3D Volume
Parameters
----------
vol : ndarray
The volume to display. Must be ndim==3.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'avip', 'additive', 'iso'}
The render method to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render method. By default
the mean of the given volume is used.
relative_step_size : float
The relative step size to step through the volume. Default 0.8.
Increase to e.g. 1.5 to increase performance, at the cost of
quality.
cmap : str
Colormap to use.
emulate_texture : bool
Use 2D textures to emulate a 3D texture. OpenGL ES 2.0 compatible,
but has lower performance on desktop platforms.
"""
def __init__(self, vol, clim=None, method='mip', threshold=None,
relative_step_size=0.8, cmap='grays',
emulate_texture=False, color_scale='linear',
filter_type = 0, filter_size = 1,
use_gaussian_filter = False, gaussian_filter_size=9,
density_factor=0.01, color_method='Moment 0', log_scale=0,
interpolation='linear'):
tex_cls = TextureEmulated3D if emulate_texture else Texture3D
# Storage of information of volume
self._vol_shape = ()
self._clim = None
self._need_vertex_update = True
# Set the colormap
self._cmap = get_colormap(cmap)
# Create gloo objects
self._vertices = VertexBuffer()
self._texcoord = VertexBuffer(
np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
], dtype=np.float32))
# # load 'float packed rgba8' interpolation kernel
# # to load float interpolation kernel use
# # `load_spatial_filters(packed=False)`
# kernel, self._interpolation_names = load_spatial_filters()
#
# fun = [Function(_interpolation_template % n)
# for n in self._interpolation_names]
#
# self._interpolation_names = [n.lower()
# for n in self._interpolation_names]
#
# self._interpolation_fun = dict(zip(self._interpolation_names, fun))
# self._interpolation_names.sort()
# self._interpolation_names = tuple(self._interpolation_names)
#
# print self._interpolation_fun
#
# # overwrite "nearest" and "bilinear" spatial-filters
# # with "hardware" interpolation _data_lookup_fn
# self._interpolation_fun['nearest'] = Function(_texture_lookup)
# self._interpolation_fun['bilinear'] = Function(_texture_lookup)
#
# if interpolation not in self._interpolation_names:
# raise ValueError("interpolation must be one of %s" %
# ', '.join(self._interpolation_names))
#
# self._interpolation = interpolation
# check texture interpolation
# if self._interpolation == 'bilinear':
# self._interpolation = 'linear'
# else:
# self._interpolation = 'nearest'
self._tex = tex_cls((10, 10, 10), interpolation=interpolation,
wrapping='clamp_to_edge')
# self._tex = tex_cls((10, 10, 10), interpolation='linear',
# wrapping='clamp_to_edge')
# Create program
Visual.__init__(self, vcode=VERT_SHADER, fcode="")
self.shared_program['u_volumetex'] = self._tex
self.shared_program['a_position'] = self._vertices
self.shared_program['a_texcoord'] = self._texcoord
self._draw_mode = 'triangle_strip'
self._index_buffer = IndexBuffer()
# Only show back faces of cuboid. This is required because if we are
# inside the volume, then the front faces are outside of the clipping
# box and will not be drawn.
self.set_gl_state('translucent', cull_face=False)
# Set data
self.set_data(vol, clim)
# Set params
self.method = method
self.relative_step_size = relative_step_size
#self.color_scale = color_scale
# self.data_min = self._clim[0]
# self.data_max = self._clim[1]
# moving_box_filter (=1 means no filter)
self.filter_type = filter_type
self.filter_size = filter_size
# 3D gaussian filter
self.use_gaussian_filter = use_gaussian_filter
self.gaussian_filter_size = gaussian_filter_size
self.log_scale = log_scale
self.density_factor = density_factor
self.color_method = color_method
self.threshold = threshold if (threshold is not None) else vol.mean()
# print ("threshold", self.threshold)
self.freeze()
def set_data(self, vol, clim=None):
""" Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
"""
# Check volume
if not isinstance(vol, np.ndarray):
raise ValueError('Volume visual needs a numpy array.')
if not ((vol.ndim == 3) or (vol.ndim == 4 and vol.shape[-1] <= 4)):
raise ValueError('Volume visual needs a 3D image.')
# Handle clim
if clim is not None:
clim = np.array(clim, float)
if not (clim.ndim == 1 and clim.size == 2):
raise ValueError('clim must be a 2-element array-like')
self._clim = tuple(clim)
if self._clim is None:
self._clim = np.nanmin(vol), np.nanmax(vol)
# Apply clim
vol = np.flipud(np.array(vol, dtype='float32', copy=False))
if self._clim[1] == self._clim[0]:
if self._clim[0] != 0.:
vol *= 1.0 / self._clim[0]
else:
vol -= self._clim[0]
vol /= self._clim[1] - self._clim[0]
# Deal with nan
if np.isnan(vol).any():
vol = np.nan_to_num(vol)
self.high_discard_filter_value = self._clim[1]
self.low_discard_filter_value = self._clim[0]
self.volume_mean = np.mean(vol)
self.volume_std = np.std(vol)
#self.volume_madfm = self.madfm(vol)
# Apply to texture
print ("min:", np.min(vol), "max:", np.max(vol))
self._tex.set_data(vol) # will be efficient if vol is same shape
self.shared_program['u_shape'] = (vol.shape[2], vol.shape[1], vol.shape[0])
self.shared_program['u_resolution'] = (1/vol.shape[2], 1/vol.shape[1], 1/vol.shape[0])
shape = vol.shape[:3]
if self._vol_shape != shape:
self._vol_shape = shape
self._need_vertex_update = True
self._vol_shape = shape
# Get some stats
self._kb_for_texture = np.prod(self._vol_shape) / 1024
@property
def interpolation(self):
""" Current interpolation function.
"""
return self._tex.interpolation
@interpolation.setter
def interpolation(self, interpolation):
# set interpolation technique
self._tex.interpolation = interpolation
@property
def clim(self):
""" The contrast limits that were applied to the volume data.
Settable via set_data().
"""
return self._clim
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self.shared_program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def method(self):
"""The render method to use
Current options are:
* avip: voxel colors are blended along the view ray until
the result is opaque.
* mip: maxiumum intensity projection. Cast a ray and display the
maximum value that was encountered.
* additive: voxel colors are added along the view ray until
the result is saturated.
* iso: isosurface. Cast a ray until a certain threshold is
encountered. At that location, lighning calculations are
performed to give the visual appearance of a surface.
"""
return self._method
@method.setter
def method(self, method):
# Check and save
known_methods = list(frag_dict.keys())
if method not in known_methods:
raise ValueError('Volume render method should be in %r, not %r' %
(known_methods, method))
self._method = method
# Get rid of specific variables - they may become invalid
if 'u_threshold' in self.shared_program:
self.shared_program['u_threshold'] = None
self.shared_program.frag = frag_dict[method]
self.shared_program.frag['sampler_type'] = self._tex.glsl_sampler_type
self.shared_program.frag['sample'] = self._tex.glsl_sample
self.shared_program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def color_method(self):
"""The way color is associated with voxel
Current options are:
* regular: Color is associated to voxel intensity (defined by the VR method)
* velocity/redshit: Color is associated to depth coordinate
and alpha to voxel intensity (defined by the VR method)
"""
return self._color_method
@color_method.setter
def color_method(self, color_method):
if color_method == 'Moment 0':
self._color_method = 0
elif color_method == 'Moment 1':
self._color_method = 1
elif color_method == 'rgb_cube':
self._color_method = 2
elif color_method == 'Sigmas':
self._color_method = 3
else:
self._color_method = 4
# print ("color_method", self._color_method)
self.shared_program['u_color_method'] = int(self._color_method)
self.update()
@property
def threshold(self):
""" The threshold value to apply for the isosurface render method.
Also used for the lmip transfer function.
"""
return self._threshold
@threshold.setter
def threshold(self, value):
self._threshold = float(value)
if 'u_threshold' in self.shared_program:
self.shared_program['u_threshold'] = self._threshold
self.update()
@property
def color_scale(self):
return self._color_scale
@color_scale.setter
def color_scale(self, color_scale):
if (color_scale == 'linear'):
self._color_scale = 0
else:
self._color_scale = 1
self.shared_program['u_color_scale'] = int(self._color_scale)
self.update()
@property
def log_scale(self):
return self._log_scale
@log_scale.setter
def log_scale(self, log_scale):
self._log_scale = int(log_scale)
#self.shared_program['u_log_scale'] = int(self._log_scale)
self.update()
@property
def data_min(self):
return self._data_min
@data_min.setter
def data_min(self, data_min):
self._data_min = 0.
self.shared_program['u_data_min'] = float(self._data_min)
self.update()
@property
def data_max(self):
return self._data_max
@data_max.setter
def data_max(self, data_max):
self._data_max = 0.
self.shared_program['u_data_max'] = float(self._data_max)
self.update()
@property
def moving_box_filter(self):
return self._moving_box_filter
@moving_box_filter.setter
def moving_box_filter(self, moving_box_filter):
self.shared_program['u_moving_box_filter'] = int(self._moving_box_filter)
self.update()
@property
def volume_mean(self):
return self._volume_mean
@volume_mean.setter
def volume_mean(self, volume_mean):
self._volume_mean = float(volume_mean)
self.shared_program['u_volume_mean'] = self._volume_mean
print ("self._volume_mean", self._volume_mean)
self.update()
@property
def volume_std(self):
return self._volume_std
@volume_std.setter
def volume_std(self, volume_std):
self._volume_std = float(volume_std)
self.shared_program['u_volume_std'] = self._volume_std
print("self._volume_std", self._volume_std)
self.update()
@property
def volume_madfm(self):
return self._volume_madfm
@volume_madfm.setter
def volume_madfm(self, volume_madfm):
self._volume_madfm = float(volume_madfm)
self._volume_madfm -= self._clim[0]
self._volume_madfm /= self._clim[1] - self._clim[0]
self.shared_program['u_volume_madfm'] = self._volume_madfm
self.update()
@property
def filter_size(self):
return self._filter_size
@filter_size.setter
def filter_size(self, filter_size):
self._filter_size = int(filter_size)
self.shared_program['u_filter_size'] = int(self._filter_size)
self.shared_program['u_filter_arm'] = int(np.floor(self._filter_size/2))
self.shared_program['u_filter_coeff'] = float(1/self._filter_size)
self.update()
@property
def filter_type(self):
return self._filter_type
@filter_type.setter
def filter_type(self, filter_type):
if filter_type == 'Rescale':
self._filter_type = 1
else:
self._filter_type = 0
self.shared_program['u_filter_type'] = int(self._filter_type)
self.update()
@property
def use_gaussian_filter(self):
return self._use_gaussian_filter
@use_gaussian_filter.setter
def use_gaussian_filter(self, use_gaussian_filter):
# print ("use_gaussian_filter", use_gaussian_filter)
self._use_gaussian_filter = int(use_gaussian_filter)
self.shared_program['u_use_gaussian_filter'] = int(self._use_gaussian_filter)
self.update()
@property
def gaussian_filter_size(self):
return self._gaussian_filter_size
@gaussian_filter_size.setter
def gaussian_filter_size(self, gaussian_filter_size):
self._gaussian_filter_size = int(gaussian_filter_size)
self.shared_program['u_gaussian_filter_size'] = int(self._gaussian_filter_size)
self.update()
@property
def high_discard_filter_value(self):
return self._high_discard_filter_value
@high_discard_filter_value.setter
def high_discard_filter_value(self, high_discard_filter_value):
self._high_discard_filter_value = float(high_discard_filter_value)
self._high_discard_filter_value -= self._clim[0]
self._high_discard_filter_value /= self._clim[1] - self._clim[0]
self.shared_program['u_high_discard_filter_value'] = self._high_discard_filter_value
self.update()
@property
def low_discard_filter_value(self):
return self._low_discard_filter_value
@low_discard_filter_value.setter
def low_discard_filter_value(self, low_discard_filter_value):
self._low_discard_filter_value = float(low_discard_filter_value)
self._low_discard_filter_value -= self._clim[0]
self._low_discard_filter_value /= self._clim[1] - self._clim[0]
self.shared_program['u_low_discard_filter_value'] = self._low_discard_filter_value
self.update()
@property
def density_factor(self):
return self._density_factor
@density_factor.setter
def density_factor(self, density_factor):
self._density_factor = float(density_factor)
self.shared_program['u_density_factor'] = self._density_factor
self.update()
@property
def relative_step_size(self):
""" The relative step size used during raycasting.
Larger values yield higher performance at reduced quality. If
set > 2.0 the ray skips entire voxels. Recommended values are
between 0.5 and 1.5. The amount of quality degradation depends
on the render method.
"""
return self._relative_step_size
@relative_step_size.setter
def relative_step_size(self, value):
value = float(value)
if value < 0.1:
raise ValueError('relative_step_size cannot be smaller than 0.1')
self._relative_step_size = value
self.shared_program['u_relative_step_size'] = value
def _create_vertex_data(self):
""" Create and set positions and texture coords from the given shape
We have six faces with 1 quad (2 triangles) each, resulting in
6*2*3 = 36 vertices in total.
"""
shape = self._vol_shape
# Get corner coordinates. The -0.5 offset is to center
# pixels/voxels. This works correctly for anisotropic data.
x0, x1 = -0.5, shape[2] - 0.5
y0, y1 = -0.5, shape[1] - 0.5
z0, z1 = -0.5, shape[0] - 0.5
pos = np.array([
[x0, y0, z0],
[x1, y0, z0],
[x0, y1, z0],
[x1, y1, z0],
[x0, y0, z1],
[x1, y0, z1],
[x0, y1, z1],
[x1, y1, z1],
], dtype=np.float32)
"""
6-------7
/| /|
4-------5 |
| | | |
| 2-----|-3
|/ |/
0-------1
"""
# Order is chosen such that normals face outward; front faces will be
# culled.
indices = np.array([2, 6, 0, 4, 5, 6, 7, 2, 3, 0, 1, 5, 3, 7],
dtype=np.uint32)
# Apply
self._vertices.set_data(pos)
self._index_buffer.set_data(indices)
def _compute_bounds(self, axis, view):
return 0, self._vol_shape[axis]
def _prepare_transforms(self, view):
trs = view.transforms
view.view_program.vert['transform'] = trs.get_transform()
view_tr_f = trs.get_transform('visual', 'document')
view_tr_i = view_tr_f.inverse
view.view_program.vert['viewtransformf'] = view_tr_f
view.view_program.vert['viewtransformi'] = view_tr_i
def _prepare_draw(self, view):
if self._need_vertex_update:
self._create_vertex_data()
def madfm(self, volume):
# As defined in Whiting, M. T. "DUCHAMP: a 3D source finder for spectral-lines data", MNRAS, 2012.
return np.median(volume - np.median(volume)) * 1.4826042
RenderVolume = create_visual_node(RenderVolumeVisual)
def get_interpolation_fun():
return get_interpolation_fun()
| 33.252964 | 110 | 0.567297 |
7b27764a19c2b8ce27db21198338af1db77ae1c8 | 14,153 | py | Python | tests/test_ec2/test_spot_fleet.py | stj/moto | 502957f1f9560cb6dff75271e6812498f4ff7bba | [
"Apache-2.0"
] | 1 | 2021-08-14T05:58:23.000Z | 2021-08-14T05:58:23.000Z | tests/test_ec2/test_spot_fleet.py | stj/moto | 502957f1f9560cb6dff75271e6812498f4ff7bba | [
"Apache-2.0"
] | null | null | null | tests/test_ec2/test_spot_fleet.py | stj/moto | 502957f1f9560cb6dff75271e6812498f4ff7bba | [
"Apache-2.0"
] | 1 | 2021-03-01T08:48:09.000Z | 2021-03-01T08:48:09.000Z | from __future__ import unicode_literals
import boto3
import sure # noqa
from moto import mock_ec2
def get_subnet_id(conn):
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
subnet = conn.create_subnet(
VpcId=vpc["VpcId"], CidrBlock="10.0.0.0/16", AvailabilityZone="us-east-1a"
)["Subnet"]
subnet_id = subnet["SubnetId"]
return subnet_id
def spot_config(subnet_id, allocation_strategy="lowestPrice"):
return {
"ClientToken": "string",
"SpotPrice": "0.12",
"TargetCapacity": 6,
"IamFleetRole": "arn:aws:iam::123456789012:role/fleet",
"LaunchSpecifications": [
{
"ImageId": "ami-123",
"KeyName": "my-key",
"SecurityGroups": [{"GroupId": "sg-123"}],
"UserData": "some user data",
"InstanceType": "t2.small",
"BlockDeviceMappings": [
{
"VirtualName": "string",
"DeviceName": "string",
"Ebs": {
"SnapshotId": "string",
"VolumeSize": 123,
"DeleteOnTermination": True | False,
"VolumeType": "standard",
"Iops": 123,
"Encrypted": True | False,
},
"NoDevice": "string",
}
],
"Monitoring": {"Enabled": True},
"SubnetId": subnet_id,
"IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"},
"EbsOptimized": False,
"WeightedCapacity": 2.0,
"SpotPrice": "0.13",
},
{
"ImageId": "ami-123",
"KeyName": "my-key",
"SecurityGroups": [{"GroupId": "sg-123"}],
"UserData": "some user data",
"InstanceType": "t2.large",
"Monitoring": {"Enabled": True},
"SubnetId": subnet_id,
"IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"},
"EbsOptimized": False,
"WeightedCapacity": 4.0,
"SpotPrice": "10.00",
},
],
"AllocationStrategy": allocation_strategy,
"FulfilledCapacity": 6,
}
@mock_ec2
def test_create_spot_fleet_with_lowest_price():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id)
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
spot_fleet_requests = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"]
len(spot_fleet_requests).should.equal(1)
spot_fleet_request = spot_fleet_requests[0]
spot_fleet_request["SpotFleetRequestState"].should.equal("active")
spot_fleet_config = spot_fleet_request["SpotFleetRequestConfig"]
spot_fleet_config["SpotPrice"].should.equal("0.12")
spot_fleet_config["TargetCapacity"].should.equal(6)
spot_fleet_config["IamFleetRole"].should.equal(
"arn:aws:iam::123456789012:role/fleet"
)
spot_fleet_config["AllocationStrategy"].should.equal("lowestPrice")
spot_fleet_config["FulfilledCapacity"].should.equal(6.0)
len(spot_fleet_config["LaunchSpecifications"]).should.equal(2)
launch_spec = spot_fleet_config["LaunchSpecifications"][0]
launch_spec["EbsOptimized"].should.equal(False)
launch_spec["SecurityGroups"].should.equal([{"GroupId": "sg-123"}])
launch_spec["IamInstanceProfile"].should.equal(
{"Arn": "arn:aws:iam::123456789012:role/fleet"}
)
launch_spec["ImageId"].should.equal("ami-123")
launch_spec["InstanceType"].should.equal("t2.small")
launch_spec["KeyName"].should.equal("my-key")
launch_spec["Monitoring"].should.equal({"Enabled": True})
launch_spec["SpotPrice"].should.equal("0.13")
launch_spec["SubnetId"].should.equal(subnet_id)
launch_spec["UserData"].should.equal("some user data")
launch_spec["WeightedCapacity"].should.equal(2.0)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(3)
@mock_ec2
def test_create_diversified_spot_fleet():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
diversified_config = spot_config(subnet_id, allocation_strategy="diversified")
spot_fleet_res = conn.request_spot_fleet(SpotFleetRequestConfig=diversified_config)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(2)
instance_types = set([instance["InstanceType"] for instance in instances])
instance_types.should.equal(set(["t2.small", "t2.large"]))
instances[0]["InstanceId"].should.contain("i-")
@mock_ec2
def test_create_spot_fleet_request_with_tag_spec():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
tag_spec = [
{
"ResourceType": "instance",
"Tags": [
{"Key": "tag-1", "Value": "foo"},
{"Key": "tag-2", "Value": "bar"},
],
}
]
config = spot_config(subnet_id)
config["LaunchSpecifications"][0]["TagSpecifications"] = tag_spec
spot_fleet_res = conn.request_spot_fleet(SpotFleetRequestConfig=config)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
spot_fleet_requests = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"]
spot_fleet_config = spot_fleet_requests[0]["SpotFleetRequestConfig"]
spot_fleet_config["LaunchSpecifications"][0]["TagSpecifications"][0][
"ResourceType"
].should.equal("instance")
for tag in tag_spec[0]["Tags"]:
spot_fleet_config["LaunchSpecifications"][0]["TagSpecifications"][0][
"Tags"
].should.contain(tag)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = conn.describe_instances(
InstanceIds=[i["InstanceId"] for i in instance_res["ActiveInstances"]]
)
for instance in instances["Reservations"][0]["Instances"]:
for tag in tag_spec[0]["Tags"]:
instance["Tags"].should.contain(tag)
@mock_ec2
def test_cancel_spot_fleet_request():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id)
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
conn.cancel_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True
)
spot_fleet_requests = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"]
len(spot_fleet_requests).should.equal(0)
@mock_ec2
def test_modify_spot_fleet_request_up():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id)
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
conn.modify_spot_fleet_request(SpotFleetRequestId=spot_fleet_id, TargetCapacity=20)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(10)
spot_fleet_config = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"][0]["SpotFleetRequestConfig"]
spot_fleet_config["TargetCapacity"].should.equal(20)
spot_fleet_config["FulfilledCapacity"].should.equal(20.0)
@mock_ec2
def test_modify_spot_fleet_request_up_diversified():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id, allocation_strategy="diversified")
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
conn.modify_spot_fleet_request(SpotFleetRequestId=spot_fleet_id, TargetCapacity=19)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(7)
spot_fleet_config = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"][0]["SpotFleetRequestConfig"]
spot_fleet_config["TargetCapacity"].should.equal(19)
spot_fleet_config["FulfilledCapacity"].should.equal(20.0)
@mock_ec2
def test_modify_spot_fleet_request_down_no_terminate():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id)
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
conn.modify_spot_fleet_request(
SpotFleetRequestId=spot_fleet_id,
TargetCapacity=1,
ExcessCapacityTerminationPolicy="noTermination",
)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(3)
spot_fleet_config = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"][0]["SpotFleetRequestConfig"]
spot_fleet_config["TargetCapacity"].should.equal(1)
spot_fleet_config["FulfilledCapacity"].should.equal(6.0)
@mock_ec2
def test_modify_spot_fleet_request_down_odd():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id)
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
conn.modify_spot_fleet_request(SpotFleetRequestId=spot_fleet_id, TargetCapacity=7)
conn.modify_spot_fleet_request(SpotFleetRequestId=spot_fleet_id, TargetCapacity=5)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(3)
spot_fleet_config = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"][0]["SpotFleetRequestConfig"]
spot_fleet_config["TargetCapacity"].should.equal(5)
spot_fleet_config["FulfilledCapacity"].should.equal(6.0)
@mock_ec2
def test_modify_spot_fleet_request_down():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id)
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
conn.modify_spot_fleet_request(SpotFleetRequestId=spot_fleet_id, TargetCapacity=1)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(1)
spot_fleet_config = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"][0]["SpotFleetRequestConfig"]
spot_fleet_config["TargetCapacity"].should.equal(1)
spot_fleet_config["FulfilledCapacity"].should.equal(2.0)
@mock_ec2
def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
spot_fleet_res = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config(subnet_id)
)
spot_fleet_id = spot_fleet_res["SpotFleetRequestId"]
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
conn.terminate_instances(InstanceIds=[i["InstanceId"] for i in instances[1:]])
conn.modify_spot_fleet_request(
SpotFleetRequestId=spot_fleet_id,
TargetCapacity=1,
ExcessCapacityTerminationPolicy="noTermination",
)
instance_res = conn.describe_spot_fleet_instances(SpotFleetRequestId=spot_fleet_id)
instances = instance_res["ActiveInstances"]
len(instances).should.equal(1)
spot_fleet_config = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"][0]["SpotFleetRequestConfig"]
spot_fleet_config["TargetCapacity"].should.equal(1)
spot_fleet_config["FulfilledCapacity"].should.equal(2.0)
@mock_ec2
def test_create_spot_fleet_without_spot_price():
conn = boto3.client("ec2", region_name="us-west-2")
subnet_id = get_subnet_id(conn)
# remove prices to force a fallback to ondemand price
spot_config_without_price = spot_config(subnet_id)
del spot_config_without_price["SpotPrice"]
for spec in spot_config_without_price["LaunchSpecifications"]:
del spec["SpotPrice"]
spot_fleet_id = conn.request_spot_fleet(
SpotFleetRequestConfig=spot_config_without_price
)["SpotFleetRequestId"]
spot_fleet_requests = conn.describe_spot_fleet_requests(
SpotFleetRequestIds=[spot_fleet_id]
)["SpotFleetRequestConfigs"]
len(spot_fleet_requests).should.equal(1)
spot_fleet_request = spot_fleet_requests[0]
spot_fleet_config = spot_fleet_request["SpotFleetRequestConfig"]
len(spot_fleet_config["LaunchSpecifications"]).should.equal(2)
launch_spec1 = spot_fleet_config["LaunchSpecifications"][0]
launch_spec2 = spot_fleet_config["LaunchSpecifications"][1]
# AWS will figure out the price
assert "SpotPrice" not in launch_spec1
assert "SpotPrice" not in launch_spec2
| 37.741333 | 88 | 0.695895 |
570b79a958d99031d5708797b54646954bf80a6f | 17,669 | py | Python | tests/test_corosync_config.py | AlexTalker/iml-agent | 5ebcfe96be670912d9a9b7fbb23431af0d54f768 | [
"MIT"
] | 1 | 2021-02-08T16:51:57.000Z | 2021-02-08T16:51:57.000Z | tests/test_corosync_config.py | AlexTalker/iml-agent | 5ebcfe96be670912d9a9b7fbb23431af0d54f768 | [
"MIT"
] | null | null | null | tests/test_corosync_config.py | AlexTalker/iml-agent | 5ebcfe96be670912d9a9b7fbb23431af0d54f768 | [
"MIT"
] | null | null | null | import sys
import mock
from iml_common.test.command_capture_testcase import (
CommandCaptureTestCase,
CommandCaptureCommand,
)
from iml_common.lib.firewall_control import FirewallControlEL7
from iml_common.lib.service_control import ServiceControlEL7
from iml_common.lib.agent_rpc import agent_result_ok
class FakeEtherInfo(object):
def __init__(self, attrs):
self.__dict__.update(attrs)
def __getattr__(self, attr):
return self.__dict__[attr]
class fake_ethtool(object):
IFF_SLAVE = 2048
def __init__(self, interfaces={}):
self.interfaces = interfaces
def get_interfaces_info(self, name):
return [FakeEtherInfo(self.interfaces[name])]
def get_devices(self):
return self.interfaces.keys()
def get_hwaddr(self, name):
return self.interfaces[name]["mac_address"]
def get_flags(self, name):
# just hard-code this for now
return 4163
class TestConfigureCorosync(CommandCaptureTestCase):
def setUp(self):
super(TestConfigureCorosync, self).setUp()
from chroma_agent.lib.corosync import CorosyncRingInterface
from chroma_agent.lib.corosync import env
def get_ring0():
return CorosyncRingInterface("eth0.1.1?1b34*430")
mock.patch("chroma_agent.lib.corosync.get_ring0", get_ring0).start()
self.interfaces = {
"eth0.1.1?1b34*430": {
"device": "eth0.1.1?1b34*430",
"mac_address": "de:ad:be:ef:ca:fe",
"ipv4_address": "192.168.1.1",
"ipv4_netmask": "255.255.255.0",
"link_up": True,
},
"eth1": {
"device": "eth1",
"mac_address": "ba:db:ee:fb:aa:af",
"ipv4_address": None,
"ipv4_netmask": 0,
"link_up": True,
},
}
# Just mock out the entire module ... This will make the tests
# run on OS X or on Linux without the python-ethtool package.
self.old_ethtool = sys.modules.get("ethtool", None)
ethtool = fake_ethtool(self.interfaces)
sys.modules["ethtool"] = ethtool
self.write_ifcfg = mock.patch("chroma_agent.lib.node_admin.write_ifcfg").start()
self.unmanage_network = mock.patch(
"chroma_agent.lib.node_admin.unmanage_network"
).start()
self.write_config_to_file = mock.patch(
"chroma_agent.action_plugins.manage_corosync.write_config_to_file"
).start()
mock.patch(
"chroma_agent.action_plugins.manage_pacemaker.unconfigure_pacemaker"
).start()
old_set_address = CorosyncRingInterface.set_address
def set_address(obj, address, prefix):
if self.interfaces[obj.name]["ipv4_address"] is None:
self.interfaces[obj.name]["ipv4_address"] = address
self.interfaces[obj.name]["ipv4_netmask"] = prefix
old_set_address(obj, address, prefix)
mock.patch(
"chroma_agent.lib.corosync.CorosyncRingInterface.set_address", set_address
).start()
@property
def has_link(obj):
return self.interfaces[obj.name]["link_up"]
self.link_patcher = mock.patch(
"chroma_agent.lib.corosync.CorosyncRingInterface.has_link", has_link
)
self.link_patcher.start()
mock.patch(
"chroma_agent.lib.corosync.find_unused_port", return_value=4242
).start()
mock.patch("chroma_agent.lib.corosync.discover_existing_mcastport").start()
self.conf_template = env.get_template("corosync.conf")
# mock out firewall control calls and check with assert_has_calls in tests
self.mock_add_port = mock.patch.object(
FirewallControlEL7, "_add_port", return_value=None
).start()
self.mock_remove_port = mock.patch.object(
FirewallControlEL7, "_remove_port", return_value=None
).start()
# mock out service control objects with ServiceControlEL7 spec and check with assert_has_calls in tests
# this assumes, quite rightly, that manage_corosync and manage_corosync2 will not both be used in the same test
self.mock_corosync_service = mock.create_autospec(ServiceControlEL7)
self.mock_corosync_service.enable.return_value = None
self.mock_corosync_service.disable.return_value = None
mock.patch(
"chroma_agent.action_plugins.manage_corosync.corosync_service",
self.mock_corosync_service,
).start()
mock.patch(
"chroma_agent.action_plugins.manage_corosync2.corosync_service",
self.mock_corosync_service,
).start()
self.mock_pcsd_service = mock.create_autospec(ServiceControlEL7)
self.mock_pcsd_service.enable.return_value = None
self.mock_pcsd_service.start.return_value = None
mock.patch(
"chroma_agent.action_plugins.manage_corosync2.pcsd_service",
self.mock_pcsd_service,
).start()
mock.patch(
"chroma_agent.action_plugins.manage_corosync.firewall_control",
FirewallControlEL7(),
).start()
mock.patch(
"chroma_agent.action_plugins.manage_corosync2.firewall_control",
FirewallControlEL7(),
).start()
# Guaranteed cleanup with unittest2
self.addCleanup(mock.patch.stopall)
def tearDown(self):
if self.old_ethtool:
sys.modules["ethtool"] = self.old_ethtool
def _ring_iface_info(self, mcast_port):
from netaddr import IPNetwork
interfaces = []
for name in sorted(self.interfaces.keys()):
interface = self.interfaces[name]
bindnetaddr = IPNetwork(
"%s/%s" % (interface["ipv4_address"], interface["ipv4_netmask"])
).network
ringnumber = name[-1]
interfaces.append(
FakeEtherInfo(
{
"ringnumber": ringnumber,
"bindnetaddr": bindnetaddr,
"mcastaddr": "226.94.%s.1" % ringnumber,
"mcastport": mcast_port,
}
)
)
return interfaces
def _render_test_config(self, mcast_port):
return self.conf_template.render(interfaces=self._ring_iface_info(mcast_port))
def test_manual_ring1_config(self):
from chroma_agent.action_plugins.manage_corosync_common import configure_network
from chroma_agent.action_plugins.manage_corosync import configure_corosync
ring0_name = "eth0.1.1?1b34*430"
ring1_name = "eth1"
ring1_ipaddr = "10.42.42.42"
ring1_netmask = "255.255.255.0"
old_mcast_port = None
new_mcast_port = "4242"
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(("/sbin/ip", "link", "set", "dev", ring1_name, "up")),
CommandCaptureCommand(
(
"/sbin/ip",
"addr",
"add",
"%s/%s" % (ring1_ipaddr, ring1_netmask),
"dev",
ring1_name,
)
),
)
# now a two-step process! first network...
self.assertEqual(
agent_result_ok,
configure_network(
ring0_name,
ring1_name=ring1_name,
ring1_ipaddr=ring1_ipaddr,
ring1_prefix=ring1_netmask,
),
)
self.write_ifcfg.assert_called_with(
ring1_name, "ba:db:ee:fb:aa:af", "10.42.42.42", "255.255.255.0"
)
self.unmanage_network.assert_called_with(ring1_name, "ba:db:ee:fb:aa:af")
# ...then corosync
self.assertEqual(
agent_result_ok,
configure_corosync(ring0_name, ring1_name, old_mcast_port, new_mcast_port),
)
test_config = self._render_test_config(new_mcast_port)
self.write_config_to_file.assert_called_with(
"/etc/corosync/corosync.conf", test_config
)
# check correct firewall and service calls were made
self.mock_add_port.assert_has_calls([mock.call(new_mcast_port, "udp")])
self.mock_remove_port.assert_not_called()
self.mock_corosync_service.enable.assert_called_once_with()
self.assertRanAllCommandsInOrder()
self.mock_remove_port.reset_mock()
self.mock_add_port.reset_mock()
self.mock_corosync_service.reset_mock()
# ...now change corosync mcast port
old_mcast_port = "4242"
new_mcast_port = "4246"
self.assertEqual(
agent_result_ok,
configure_corosync(ring0_name, ring1_name, old_mcast_port, new_mcast_port),
)
test_config = self._render_test_config(new_mcast_port)
# check we try to write template with new_mcast_port value
self.write_config_to_file.assert_called_with(
"/etc/corosync/corosync.conf", test_config
)
# check correct firewall and service calls were made
self.mock_remove_port.assert_has_calls([mock.call(old_mcast_port, "udp")])
self.mock_add_port.assert_has_calls([mock.call(new_mcast_port, "udp")])
self.mock_corosync_service.enable.assert_called_once_with()
def _test_manual_ring1_config_corosync2(self, fqdn=False):
import socket
from chroma_agent.action_plugins.manage_corosync2 import (
configure_corosync2_stage_1,
)
from chroma_agent.action_plugins.manage_corosync2 import (
configure_corosync2_stage_2,
)
from chroma_agent.action_plugins.manage_corosync2 import change_mcast_port
from chroma_agent.action_plugins.manage_corosync2 import PCS_TCP_PORT
from chroma_agent.action_plugins.manage_corosync_common import configure_network
ring0_name = "eth0.1.1?1b34*430"
ring1_name = "eth1"
ring1_ipaddr = "10.42.42.42"
ring1_netmask = "255.255.255.0"
mcast_port = "4242"
new_node_fqdn = "servera.somewhere.org"
pcs_password = "bondJAMESbond"
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(("/sbin/ip", "link", "set", "dev", ring1_name, "up")),
CommandCaptureCommand(
(
"/sbin/ip",
"addr",
"add",
"/".join([ring1_ipaddr, ring1_netmask]),
"dev",
ring1_name,
)
),
)
if fqdn:
self.add_commands(
CommandCaptureCommand(("hostnamectl", "set-hostname", new_node_fqdn))
)
self.add_commands(
CommandCaptureCommand(
("bash", "-c", "echo bondJAMESbond | passwd --stdin hacluster")
),
CommandCaptureCommand(
tuple(
["pcs", "cluster", "auth"]
+ [new_node_fqdn]
+ ["-u", "hacluster", "-p", "bondJAMESbond"]
)
),
)
# now a two-step process! first network...
self.assertEqual(
agent_result_ok,
configure_network(
ring0_name,
ring1_name=ring1_name,
ring1_ipaddr=ring1_ipaddr,
ring1_prefix=ring1_netmask,
),
)
self.write_ifcfg.assert_called_with(
ring1_name, "ba:db:ee:fb:aa:af", "10.42.42.42", "255.255.255.0"
)
# fetch ring info
r0, r1 = self._ring_iface_info(mcast_port)
# add shell commands to be expected populated with ring interface info
self.add_command(
(
"pcs",
"cluster",
"setup",
"--name",
"lustre-ha-cluster",
"--force",
new_node_fqdn,
"--transport",
"udp",
"--rrpmode",
"passive",
"--addr0",
str(r0.bindnetaddr),
"--mcast0",
str(r0.mcastaddr),
"--mcastport0",
str(r0.mcastport),
"--addr1",
str(r1.bindnetaddr),
"--mcast1",
str(r1.mcastaddr),
"--mcastport1",
str(r1.mcastport),
"--token",
"17000",
"--fail_recv_const",
"10",
)
)
# ...then corosync / pcsd
if fqdn:
self.assertEqual(
agent_result_ok,
configure_corosync2_stage_1(mcast_port, pcs_password, new_node_fqdn),
)
else:
self.assertEqual(
agent_result_ok, configure_corosync2_stage_1(mcast_port, pcs_password)
)
self.assertEqual(
agent_result_ok,
configure_corosync2_stage_2(
ring0_name, ring1_name, new_node_fqdn, mcast_port, pcs_password, True
),
)
# check correct firewall and service calls were made
self.mock_add_port.assert_has_calls(
[mock.call(mcast_port, "udp"), mock.call(PCS_TCP_PORT, "tcp")]
)
self.mock_remove_port.assert_not_called()
self.mock_pcsd_service.start.assert_called_once_with()
self.mock_corosync_service.enable.assert_called_once_with()
self.mock_pcsd_service.enable.assert_called_once_with()
self.mock_remove_port.reset_mock()
self.mock_add_port.reset_mock()
self.mock_corosync_service.reset_mock()
# ...now change corosync mcast port
old_mcast_port = "4242"
new_mcast_port = "4246"
# add shell command to be expected when updating corosync.conf file with new mcast port value
self.add_command(
(
"sed",
"-i.bak",
"s/mcastport:.*/mcastport: %s/g" % new_mcast_port,
"/etc/corosync/corosync.conf",
)
)
self.assertEqual(
agent_result_ok, change_mcast_port(old_mcast_port, new_mcast_port)
)
# check correct firewall and service calls were made
self.mock_add_port.assert_has_calls([mock.call(new_mcast_port, "udp")])
self.mock_remove_port.assert_has_calls([mock.call(old_mcast_port, "udp")])
with self.assertRaises(AssertionError):
self.mock_corosync_service.enable.assert_any_call()
self.assertRanAllCommandsInOrder()
def test_manual_ring1_config_corosync2(self):
self._test_manual_ring1_config_corosync2(False)
def test_manual_ring1_config_corosync2_fqdn(self):
self._test_manual_ring1_config_corosync2(True)
def test_unconfigure_corosync2(self):
from chroma_agent.action_plugins.manage_corosync2 import unconfigure_corosync2
from chroma_agent.action_plugins.manage_corosync2 import PCS_TCP_PORT
host_fqdn = "serverb.somewhere.org"
mcast_port = "4242"
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(("pcs", "status", "nodes", "corosync")),
CommandCaptureCommand(
("pcs", "--force", "cluster", "node", "remove", host_fqdn)
),
)
self.assertEqual(agent_result_ok, unconfigure_corosync2(host_fqdn, mcast_port))
self.mock_corosync_service.disable.assert_called_once_with()
self.mock_remove_port.assert_has_calls(
[mock.call(PCS_TCP_PORT, "tcp"), mock.call(mcast_port, "udp")]
)
self.assertRanAllCommandsInOrder()
def test_find_subnet(self):
from chroma_agent.lib.corosync import find_subnet
from netaddr import IPNetwork
test_map = {
("192.168.1.0", "24"): IPNetwork("10.0.0.0/24"),
("10.0.1.0", "24"): IPNetwork("10.128.0.0/24"),
("10.128.0.0", "9"): IPNetwork("10.0.0.0/9"),
("10.127.255.254", "9"): IPNetwork("10.128.0.0/9"),
("10.255.255.255", "32"): IPNetwork("10.0.0.0/32"),
}
for args, output in test_map.items():
self.assertEqual(output, find_subnet(*args))
def test_link_state_unknown(self):
with mock.patch("__builtin__.open", mock.mock_open(read_data="unknown")):
with mock.patch(
"chroma_agent.lib.corosync.CorosyncRingInterface.__getattr__",
return_value=False,
):
with mock.patch("os.path.exists", return_value=True):
self.link_patcher.stop()
from chroma_agent.lib.corosync import get_ring0
iface = get_ring0()
# add shell commands to be expected
self.add_commands(
CommandCaptureCommand(
("/sbin/ip", "link", "set", "dev", iface.name, "up")
),
CommandCaptureCommand(
("/sbin/ip", "link", "set", "dev", iface.name, "down")
),
)
self.assertFalse(iface.has_link)
self.assertRanAllCommandsInOrder()
| 35.05754 | 119 | 0.583281 |
1001c3299b5237bd9b5cb95d03248b2d10c884a6 | 184 | py | Python | app.py | PI2-Windris/processing_service | 95e09ce4d39baf91e88be4394b22b4bb8cc107ea | [
"MIT"
] | null | null | null | app.py | PI2-Windris/processing_service | 95e09ce4d39baf91e88be4394b22b4bb8cc107ea | [
"MIT"
] | null | null | null | app.py | PI2-Windris/processing_service | 95e09ce4d39baf91e88be4394b22b4bb8cc107ea | [
"MIT"
] | null | null | null | from flask import Flask
from apis import api
app = Flask(__name__)
api.init_app(app)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=os.getenv('PORT', 8004))
| 20.444444 | 69 | 0.695652 |
276de3711a37ef1de427b0ad14740bc4bde69fc4 | 4,934 | py | Python | mayan/apps/storage/tests/test_utils.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 336 | 2019-05-09T07:05:19.000Z | 2022-03-25T09:50:22.000Z | mayan/apps/storage/tests/test_utils.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 86 | 2021-09-01T23:53:02.000Z | 2021-09-20T02:25:10.000Z | mayan/apps/storage/tests/test_utils.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 257 | 2019-05-14T10:26:37.000Z | 2022-03-30T03:37:36.000Z | from pathlib import Path
import shutil
from django.utils.encoding import force_text
from mayan.apps.documents.storages import storage_document_files
from mayan.apps.documents.tests.base import GenericDocumentTestCase
from mayan.apps.mimetype.api import get_mimetype
from mayan.apps.testing.tests.base import BaseTestCase
from ..utils import PassthroughStorageProcessor, mkdtemp, patch_files
from .mixins import StorageProcessorTestMixin
class PatchFilesTestCase(BaseTestCase):
test_replace_text = 'replaced_text'
def setUp(self):
super().setUp()
self.temporary_directory = mkdtemp()
self.path_temporary_directory = Path(self.temporary_directory)
self.path_test_file = self.path_temporary_directory / 'test_file.txt'
with self.path_test_file.open(mode='w') as file_object:
file_object.writelines(
[
'line 1\n',
' line 2\n',
'line 3\n',
]
)
def tearDown(self):
super().tearDown()
shutil.rmtree(path=self.temporary_directory, ignore_errors=True)
def _patch_test_file(self):
replace_list = [
{
'filename_pattern': '*',
'content_patterns': [
{
'search': self.test_search_text,
'replace': self.test_replace_text,
}
]
}
]
patch_files(
path=self.path_temporary_directory, replace_list=replace_list
)
with self.path_test_file.open(mode='r') as file_object:
self.final_text = file_object.read()
def test_file_patching_single_line(self):
self.test_search_text = 'line 1'
self._patch_test_file()
self.assertEqual(self.final_text, 'replaced_text\n line 2\nline 3\n')
def test_file_patching_multi_line(self):
self.test_search_text = 'line 2\nline 3\n'
self._patch_test_file()
self.assertEqual(self.final_text, 'line 1\n replaced_text')
def test_file_patching_spaces(self):
self.test_search_text = ' line 2'
self._patch_test_file()
self.assertEqual(self.final_text, 'line 1\nreplaced_text\nline 3\n')
def test_file_patching_no_matches(self):
self.test_search_text = 'line 4'
self._patch_test_file()
self.assertEqual(self.final_text, 'line 1\n line 2\nline 3\n')
class StorageProcessorTestCase(
StorageProcessorTestMixin, GenericDocumentTestCase
):
auto_upload_test_document = False
def _execute_storage_procesor(self, reverse=None):
storage_processor = PassthroughStorageProcessor(
app_label='documents',
defined_storage_name=storage_document_files.name,
log_file=force_text(s=self.path_test_file),
model_name='DocumentFile'
)
storage_processor.execute(reverse=reverse)
def _upload_and_process(self):
self.defined_storage.dotted_path = 'django.core.files.storage.FileSystemStorage'
self.defined_storage.kwargs = {
'location': self.document_storage_kwargs['location']
}
self._upload_test_document()
self.defined_storage.dotted_path = 'mayan.apps.storage.backends.compressedstorage.ZipCompressedPassthroughStorage'
self.defined_storage.kwargs = {
'next_storage_backend': 'django.core.files.storage.FileSystemStorage',
'next_storage_backend_arguments': {
'location': self.document_storage_kwargs['location']
}
}
self._execute_storage_procesor()
def test_processor_forwards(self):
self._upload_and_process()
with open(file=self.test_document.file_latest.file.path, mode='rb') as file_object:
self.assertEqual(
get_mimetype(file_object=file_object),
('application/zip', 'binary')
)
self.assertEqual(
self.test_document.file_latest.checksum,
self.test_document.file_latest.checksum_update(save=False)
)
def test_processor_forwards_and_reverse(self):
self._upload_and_process()
self._execute_storage_procesor(reverse=True)
self.defined_storage.dotted_path = 'django.core.files.storage.FileSystemStorage'
self.defined_storage.kwargs = {
'location': self.document_storage_kwargs['location']
}
with open(file=self.test_document.file_latest.file.path, mode='rb') as file_object:
self.assertNotEqual(
get_mimetype(file_object=file_object),
('application/zip', 'binary')
)
self.assertEqual(
self.test_document.file_latest.checksum,
self.test_document.file_latest.checksum_update(save=False)
)
| 32.460526 | 122 | 0.644102 |
a97d0d1b87293a9c688d18016403153f170f590f | 4,076 | py | Python | main/model.py | hashtagChameleon/3DMPPE_ROOTNET_RELEASE | 58422f05030ad38501447b226a63c4151bd63b7d | [
"MIT"
] | 380 | 2019-07-22T18:30:12.000Z | 2022-03-31T08:45:36.000Z | main/model.py | juyongjiang/3DMPPE_ROOTNET_RELEASE | 8bef0cd332c3423050a6f3b382d2a574623e1ffa | [
"MIT"
] | 41 | 2019-08-06T03:12:02.000Z | 2022-03-28T07:54:36.000Z | main/model.py | juyongjiang/3DMPPE_ROOTNET_RELEASE | 8bef0cd332c3423050a6f3b382d2a574623e1ffa | [
"MIT"
] | 58 | 2019-08-05T02:55:53.000Z | 2022-03-31T08:45:18.000Z | import torch
import torch.nn as nn
from torch.nn import functional as F
from nets.resnet import ResNetBackbone
from config import cfg
class RootNet(nn.Module):
def __init__(self):
self.inplanes = 2048
self.outplanes = 256
super(RootNet, self).__init__()
self.deconv_layers = self._make_deconv_layer(3)
self.xy_layer = nn.Conv2d(
in_channels=self.outplanes,
out_channels=1,
kernel_size=1,
stride=1,
padding=0
)
self.depth_layer = nn.Conv2d(
in_channels=self.inplanes,
out_channels=1,
kernel_size=1,
stride=1,
padding=0
)
def _make_deconv_layer(self, num_layers):
layers = []
inplanes = self.inplanes
outplanes = self.outplanes
for i in range(num_layers):
layers.append(
nn.ConvTranspose2d(
in_channels=inplanes,
out_channels=outplanes,
kernel_size=4,
stride=2,
padding=1,
output_padding=0,
bias=False))
layers.append(nn.BatchNorm2d(outplanes))
layers.append(nn.ReLU(inplace=True))
inplanes = outplanes
return nn.Sequential(*layers)
def forward(self, x, k_value):
# x,y
xy = self.deconv_layers(x)
xy = self.xy_layer(xy)
xy = xy.view(-1,1,cfg.output_shape[0]*cfg.output_shape[1])
xy = F.softmax(xy,2)
xy = xy.view(-1,1,cfg.output_shape[0],cfg.output_shape[1])
hm_x = xy.sum(dim=(2))
hm_y = xy.sum(dim=(3))
coord_x = hm_x * torch.arange(cfg.output_shape[1]).float().cuda()
coord_y = hm_y * torch.arange(cfg.output_shape[0]).float().cuda()
coord_x = coord_x.sum(dim=2)
coord_y = coord_y.sum(dim=2)
# z
img_feat = torch.mean(x.view(x.size(0), x.size(1), x.size(2)*x.size(3)), dim=2) # global average pooling
img_feat = torch.unsqueeze(img_feat,2); img_feat = torch.unsqueeze(img_feat,3);
gamma = self.depth_layer(img_feat)
gamma = gamma.view(-1,1)
depth = gamma * k_value.view(-1,1)
coord = torch.cat((coord_x, coord_y, depth), dim=1)
return coord
def init_weights(self):
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.xy_layer.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
for m in self.depth_layer.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
class ResPoseNet(nn.Module):
def __init__(self, backbone, root):
super(ResPoseNet, self).__init__()
self.backbone = backbone
self.root = root
def forward(self, input_img, k_value, target=None):
fm = self.backbone(input_img)
coord = self.root(fm, k_value)
if target is None:
return coord
else:
target_coord = target['coord']
target_vis = target['vis']
target_have_depth = target['have_depth']
## coordrinate loss
loss_coord = torch.abs(coord - target_coord) * target_vis
loss_coord = (loss_coord[:,0] + loss_coord[:,1] + loss_coord[:,2] * target_have_depth.view(-1))/3.
return loss_coord
def get_pose_net(cfg, is_train):
backbone = ResNetBackbone(cfg.resnet_type)
root_net = RootNet()
if is_train:
backbone.init_weights()
root_net.init_weights()
model = ResPoseNet(backbone, root_net)
return model
| 32.094488 | 112 | 0.562807 |
61698f5cf2929f25d734cc0d2b94828901c1caa7 | 7,849 | py | Python | binder/iris.py | draves15/final | ae8e8e8d35b10bd63e1f0a6d034cf30a98b4ac76 | [
"Apache-2.0"
] | null | null | null | binder/iris.py | draves15/final | ae8e8e8d35b10bd63e1f0a6d034cf30a98b4ac76 | [
"Apache-2.0"
] | null | null | null | binder/iris.py | draves15/final | ae8e8e8d35b10bd63e1f0a6d034cf30a98b4ac76 | [
"Apache-2.0"
] | null | null | null | # Dua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
# Here is a BiBTeX citation as well:
# @misc{Dua:2019 ,
# author = "Dua, Dheeru and Graff, Casey",
# year = "2017",
# title = "{UCI} Machine Learning Repository",
# url = "http://archive.ics.uci.edu/ml",
# institution = "University of California, Irvine, School of Information and Computer Sciences" }#
# https://osu.instructure.com/courses/103775/assignments/2302426?module_item_id=6434076
# 1.1 Install SciPy Libraries
# see enviroment.yml
# 1.2 Start Binder and Create a Jupyter Notebook to check Python and Versions
# Check the versions of libraries
# Python version
import sys
print('Python: {}'.format(sys.version))
# scipy
import scipy
print('scipy: {}'.format(scipy.__version__))
# numpy
import numpy
print('numpy: {}'.format(numpy.__version__))
# matplotlib
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
# pandas
import pandas
print('pandas: {}'.format(pandas.__version__))
# scikit-learn
import sklearn
print('sklearn: {}'.format(sklearn.__version__))
# 2.1 Load libraries
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# 2.2 Load dataset
url = "iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
# 3.1 Dimensions of Dataset
# shape
print(dataset.shape)
# 3.2 Peek at the Data
# head
print(dataset.head(20))
# 3.3 Statistical Summary
# descriptions
print(dataset.describe())
# 3.4 Class Distribution
# class distribution
print(dataset.groupby('class').size())
# 3.5 Complete Example
# summarize the data
from pandas import read_csv
# Load dataset
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
# shape
print(dataset.shape)
# head
print(dataset.head(20))
# descriptions
print(dataset.describe())
# class distribution
print(dataset.groupby('class').size())
# 4.1 Univariate Plots
# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
pyplot.show()
# 4.2 Multivariate Plots
# scatter plot matrix
scatter_matrix(dataset)
pyplot.show()
# histograms
dataset.hist()
pyplot.show()
# 4.3 Complete Example
# visualize the data
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
# Load dataset
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
pyplot.show()
# histograms
dataset.hist()
pyplot.show()
# scatter plot matrix
scatter_matrix(dataset)
pyplot.show()
# 5.1 Create a Validation Dataset
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
y = array[:,4]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)
# 5.2 Test Harness
# 5.3 Build Models
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
# 5.4 Select Best Model
# Compare Algorithms
pyplot.boxplot(results, labels=names)
pyplot.title('Algorithm Comparison')
pyplot.show()
# 5.5 Complete Example
# compare algorithms
from pandas import read_csv
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# Load dataset
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
y = array[:,4]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1, shuffle=True)
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
# Compare Algorithms
pyplot.boxplot(results, labels=names)
pyplot.title('Algorithm Comparison')
pyplot.show()
# 6.1 Make Predictions
# Make predictions on validation dataset
model = SVC(gamma='auto')
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
# 6.2 Evaluate Predictions
# Evaluate predictions
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# 6.3 Complete Example
# make predictions
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
# Load dataset
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
y = array[:,4]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)
# Make predictions on validation dataset
model = SVC(gamma='auto')
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
# Evaluate predictions
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| 32.704167 | 178 | 0.771181 |
d4ad137147a9932bfefca09ea2e75c42d32e0ef6 | 8,930 | py | Python | common/Rtree.py | 2019ly/CSD | c4ba1ea08b2893e1d61a4be41c1b257de8be5336 | [
"MIT"
] | 39 | 2020-06-21T05:14:03.000Z | 2021-11-15T01:20:25.000Z | common/Rtree.py | 2019ly/CSD | c4ba1ea08b2893e1d61a4be41c1b257de8be5336 | [
"MIT"
] | null | null | null | common/Rtree.py | 2019ly/CSD | c4ba1ea08b2893e1d61a4be41c1b257de8be5336 | [
"MIT"
] | 5 | 2020-06-26T10:33:27.000Z | 2020-10-19T06:54:28.000Z | # -*- coding:utf-8 -*-
import random
from heapq import heappop, heappush
from shapely.geometry import box, Point, Polygon
from shapely.ops import unary_union
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import common
def bounding_box(geoms):
return unary_union(geoms).envelope
class RtreeIndex(object):
def __init__(self, minx, miny, maxx, maxy, data):
self.count = 0
self.leaf_count = 0
self.MAX_CHILDREN_NUM = 10
self.MIN_CHILDREN_NUM = int(self.MAX_CHILDREN_NUM / 2)
self.CLUSTER_NUM = 2
self.root = Node.create(self, None, Polygon(), [])
self.space = box(minx, miny, maxx, maxy)
self.geometries = dict()
self.points = self.geometries
self.size = len(data)
for e in data:
self.insert(e[0], e[1])
def insert(self, o, geometry):
root = self.root.insert(o, geometry)
if len(root) > 1:
self.root = Node.create_with_children(self, root)
self.geometries[o] = geometry
def get_all(self, node=None):
if node is None:
node = self.root
yield node.obj, node.geom
if not node.is_data_node:
for c in node.children:
for n in self.get_all(c):
yield n
def range(self, r):
for x in self.root.search(lambda x: r.intersects(x.geom), lambda x: r.intersects(x.geom)):
yield x.obj, x.geom
def nearest(self, q, k=1):
min_heap = common.MinHeap()
knn = common.MaxHeap()
min_heap.push((0, self.root))
while len(min_heap) > 0:
node_min_dist, node = min_heap.pop()
if len(knn) >= k and node_min_dist > knn.first()[0]:
break
if node.is_leaf_node:
for c in node.children:
c_min_dist = c.geom.distance(q)
if len(knn) < k or c_min_dist < knn.first()[0]:
knn.push((c_min_dist, c))
if len(knn) > k:
knn.pop()
else:
for c in node.children:
c_min_dist = c.geom.distance(q)
if len(knn) < k or c_min_dist < knn.first()[0]:
min_heap.push((c_min_dist, c))
return [(e[1].obj, e[1].geom, e[0]) for e in knn.values]
class Node(object):
@classmethod
def create(cls, tree, obj, geom, children):
return Node(tree, obj, geom, children)
@classmethod
def create_with_children(cls, tree, children):
geom = bounding_box([c.geom for c in children])
node = Node.create(tree, None, geom, children)
assert (not node.is_data_node)
return node
@classmethod
def create_data_node(cls, tree, obj, geom):
node = Node.create(tree, obj, geom, None)
assert node.is_data_node
return node
def __init__(self, tree, obj, geom, children):
self.tree = tree
self.obj = obj
self.geom = geom
self.children = children
def search(self, intermediate_node_predicate, data_node_predicate):
if self.is_data_node:
if data_node_predicate(self):
yield self
else:
if intermediate_node_predicate(self):
for c in self.children:
for cr in c.search(intermediate_node_predicate, data_node_predicate):
yield cr
@property
def is_data_node(self):
if self.children is not None:
return False
if self.geom is not None and self.obj is not None:
return True
return False
@property
def is_leaf_node(self):
if self.is_data_node:
return False
if len(self.children) == 0 or self.children[0].is_data_node:
return True
return False
@property
def is_intermediate_node(self):
if (not self.is_data_node) and (not self.is_leaf_node):
return True
def insert(self, obj, geom):
self.geom = bounding_box([self.geom, geom])
if self.is_leaf_node:
self.children.append(Node.create_data_node(self.tree, obj, geom))
return self.adjust()
else:
inserting_child = self.find_inserting_child(geom)
new_children = inserting_child.insert(obj, geom)
if len(new_children) > 1:
self.children.remove(inserting_child)
self.children += new_children
return self.adjust()
def find_inserting_child(self, geom):
min_enlargement = float('inf')
min_area = float('inf')
inserting_child = None
for e in self.children:
area = bounding_box([e.geom, geom]).area
enlargement = area - e.geom.area
if enlargement < min_enlargement:
inserting_child = e
min_enlargement = enlargement
min_area = area
elif enlargement == min_enlargement:
if area < min_area:
inserting_child = e
min_area = area
return inserting_child
def adjust(self):
if len(self.children) <= self.tree.MAX_CHILDREN_NUM:
return [self]
return self.split()
def quadratic_split(self):
seeds, remain_entries = self.pick_seeds()
groups = [[seeds[0]], [seeds[1]]]
group_bounding_boxes = [seeds[0].geom, seeds[1].geom]
while len(remain_entries) > 0:
if len(groups[0]) > self.tree.MIN_CHILDREN_NUM:
groups[1] += remain_entries
break
if len(groups[1]) > self.tree.MIN_CHILDREN_NUM:
groups[0] += remain_entries
break
e, bounding_boxes = self.pick_next(remain_entries, group_bounding_boxes)
areas = [bounding_boxes[0].area, bounding_boxes[1].area]
area_differences = [areas[0] - group_bounding_boxes[0].area, areas[1] - group_bounding_boxes[1].area]
if area_differences[0] < area_differences[1]:
target_group_id = 0
elif area_differences[0] > area_differences[1]:
target_group_id = 1
else:
if areas[0] < areas[1]:
target_group_id = 0
elif areas[0] > areas[1]:
target_group_id = 1
else:
if len(groups[0]) <= len(groups[1]):
target_group_id = 0
else:
target_group_id = 1
groups[target_group_id].append(e)
group_bounding_boxes[target_group_id] = bounding_boxes[target_group_id]
return [Node.create_with_children(self.tree, group) for group in groups]
def pick_seeds(self):
seeds = []
area_difference = -float('inf')
for i in range(0, len(self.children) - 1):
for j in range(i + 1, len(self.children)):
area = bounding_box([self.children[i].geom, self.children[j].geom]).area - self.children[i].geom.area - \
self.children[j].geom.area
if area > area_difference:
seeds = (self.children[i], self.children[j])
area_difference = area
remain_entries = []
for e in self.children:
if e not in seeds:
remain_entries.append(e)
return seeds, remain_entries
@staticmethod
def pick_next(remain_entries, group_bounding_boxes):
difference = -1
for i in range(len(remain_entries)):
bbox1 = bounding_box([group_bounding_boxes[0], remain_entries[i].geom])
bbox2 = bounding_box([group_bounding_boxes[1], remain_entries[i].geom])
d1 = bbox1.area - group_bounding_boxes[0].area
d2 = bbox2.area - group_bounding_boxes[1].area
if abs(d1 - d2) > difference:
difference = abs(d1 - d2)
next_entry_id = i
next_bounding_boxes = [bbox1, bbox2]
next_entry = remain_entries.pop(next_entry_id)
return next_entry, next_bounding_boxes
split = quadratic_split
def k_means_cluster(k, nodes):
if len(nodes) <= k:
return [[node] for node in nodes]
centers = [node.geom.centroid.coords[0] for node in nodes]
clustering = KMeans(n_clusters=k)
clustering.fit(centers)
labels = clustering.labels_
return [[nodes[j] for j in range(len(nodes)) if labels[j] == i] for i in range(k)]
def plot_geometry(ax, geometry, color='gray'):
if isinstance(geometry, Point):
ax.plot(geometry.x, geometry.y, '.', color='red')
else:
coordinates = list(geometry.exterior.coords)
ax.plot([c[0] for c in coordinates], [c[1] for c in coordinates], '-', color=color)
| 36.153846 | 121 | 0.569877 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.