hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f4e926cc31185b94f26605c013ccd31d4f9b0f | 7,462 | py | Python | numba_dppy/tests/njit_tests/dpnp/test_numpy_rng.py | vlad-perevezentsev/numba-dppy | 9c8dabf929368db96c3a2abf42072178b6cd9634 | [
"Apache-2.0"
] | null | null | null | numba_dppy/tests/njit_tests/dpnp/test_numpy_rng.py | vlad-perevezentsev/numba-dppy | 9c8dabf929368db96c3a2abf42072178b6cd9634 | [
"Apache-2.0"
] | null | null | null | numba_dppy/tests/njit_tests/dpnp/test_numpy_rng.py | vlad-perevezentsev/numba-dppy | 9c8dabf929368db96c3a2abf42072178b6cd9634 | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Numba-DPPY
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import dpctl
import numpy as np
from numba import njit
import pytest
from numba_dppy.testing import dpnp_debug
from .dpnp_skip_test import dpnp_skip_test as skip_test
# dpnp throws -30 (CL_INVALID_VALUE) when invoked with multiple kinds of
# devices at runtime, so testing for level0 only
list_of_filter_strs = [
# "opencl:gpu:0",
"level0:gpu:0",
# "opencl:cpu:0",
]
@pytest.fixture(params=list_of_filter_strs)
def filter_str(request):
return request.param
list_of_size = [
9,
(2, 5),
(3, 2, 4),
]
none_size = [None]
@pytest.fixture(params=list_of_size)
def unary_size(request):
return request.param
@pytest.fixture(params=list_of_size + none_size)
def three_arg_size(request):
return request.param
list_of_one_arg = [
("random_sample", 0.0, 1.0),
("ranf", 0.0, 1.0),
("sample", 0.0, 1.0),
("random", 0.0, 1.0),
("standard_exponential", 0.0, None),
("standard_normal", None, None),
("standard_cauchy", None, None),
]
@pytest.fixture(params=list_of_one_arg)
def one_arg_fn(request):
func_str = "def fn(size):\n return np.random." + request.param[0] + "(size)"
ldict = {}
exec(func_str, globals(), ldict)
fn = ldict["fn"]
return fn, request.param
def test_one_arg_fn(filter_str, one_arg_fn, unary_size, capfd):
if skip_test(filter_str):
pytest.skip()
op, params = one_arg_fn
name, low, high = params
f = njit(op)
with dpctl.device_context(filter_str), dpnp_debug():
actual = f(unary_size)
captured = capfd.readouterr()
assert "dpnp implementation" in captured.out
if low != None:
assert np.all(actual >= low)
if high != None:
assert np.all(actual < high)
list_of_two_arg_fn = [
("chisquare", 3, 0, None),
("exponential", 3.0, 0, None),
("gamma", 2.0, 0, None),
("geometric", 0.35, 0, None),
("poisson", 5.0, 0, None),
("rayleigh", 2.0, 0, None),
("standard_gamma", 2.0, 0, None),
("weibull", 5.0, 0, None),
]
@pytest.fixture(params=list_of_two_arg_fn)
def two_arg_fn(request):
return request.param
def get_two_arg_fn(op_name):
func_str = (
"def fn(first_arg, second_arg):\n\treturn np.random."
+ op_name
+ "(first_arg, second_arg)"
)
ldict = {}
exec(func_str, globals(), ldict)
fn = ldict["fn"]
return fn
def test_two_arg_fn(filter_str, two_arg_fn, unary_size, capfd):
if skip_test(filter_str):
pytest.skip()
op_name, first_arg, low, high = two_arg_fn
if op_name == "gamma":
pytest.skip("AttributeError: 'NoneType' object has no attribute 'ravel'")
op = get_two_arg_fn(op_name)
f = njit(op)
with dpctl.device_context(filter_str), dpnp_debug():
actual = f(first_arg, unary_size)
captured = capfd.readouterr()
assert "dpnp implementation" in captured.out
if low != None and high == None:
if np.isscalar(actual):
assert actual >= low
else:
actual = actual.ravel()
assert np.all(actual >= low)
list_of_three_arg_fn = [
("randint", 2, 23, 0, None),
("random_integers", 2, 23, 1, None),
("beta", 2.56, 0.8, 0, 1.0),
("binomial", 5, 0.0, 0, 1.0),
("gumbel", 0.5, 0.1, None, None),
("laplace", 0.0, 1.0, None, None),
("lognormal", 3.0, 1.0, None, None),
("multinomial", 100, np.array([1 / 7.0] * 5), 0, 100),
("multivariate_normal", (1, 2), [[1, 0], [0, 1]], None, None),
("negative_binomial", 1, 0.1, 0, None),
("normal", 0.0, 0.1, None, None),
("uniform", -1.0, 0.0, -1.0, 0.0),
]
@pytest.fixture(params=list_of_three_arg_fn)
def three_arg_fn(request):
return request.param
def get_three_arg_fn(op_name):
func_str = (
"def fn(first_arg, second_arg, third_arg):\n\treturn np.random."
+ op_name
+ "(first_arg, second_arg, third_arg)"
)
ldict = {}
exec(func_str, globals(), ldict)
fn = ldict["fn"]
return fn
def test_three_arg_fn(filter_str, three_arg_fn, three_arg_size, capfd):
if skip_test(filter_str):
pytest.skip()
op_name, first_arg, second_arg, low, high = three_arg_fn
if op_name == "multinomial":
pytest.skip("DPNP RNG Error: dpnp_rng_multinomial_c() failed")
elif op_name == "multivariate_normal":
pytest.skip(
"No implementation of function Function(<class "
"'numba_dppy.dpnp_glue.stubs.dpnp.multivariate_normal'>) found for signature"
)
elif op_name == "negative_binomial":
pytest.skip("DPNP RNG Error: dpnp_rng_negative_binomial_c() failed.")
elif op_name == "gumbel":
pytest.skip("DPNP error")
op = get_three_arg_fn(op_name)
f = njit(op)
with dpctl.device_context(filter_str), dpnp_debug():
actual = f(first_arg, second_arg, three_arg_size)
captured = capfd.readouterr()
assert "dpnp implementation" in captured.out
if low != None and high == None:
if second_arg:
low = first_arg
high = second_arg
assert np.all(actual >= low)
assert np.all(actual <= high)
else:
high = first_arg
assert np.all(actual >= low)
assert np.all(actual <= high)
elif low != None and high != None:
if np.isscalar(actual):
assert actual >= low
assert actual <= high
else:
actual = actual.ravel()
assert np.all(actual >= low)
assert np.all(actual <= high)
def test_rand(filter_str):
if skip_test(filter_str):
pytest.skip()
@njit
def f():
c = np.random.rand(3, 2)
return c
with dpctl.device_context(filter_str), dpnp_debug():
actual = f()
actual = actual.ravel()
assert np.all(actual >= 0.0)
assert np.all(actual < 1.0)
def test_hypergeometric(filter_str, three_arg_size):
if skip_test(filter_str):
pytest.skip()
@njit
def f(ngood, nbad, nsamp, size):
res = np.random.hypergeometric(ngood, nbad, nsamp, size)
return res
ngood, nbad, nsamp = 100, 2, 10
with dpctl.device_context(filter_str), dpnp_debug():
actual = f(ngood, nbad, nsamp, three_arg_size)
if np.isscalar(actual):
assert actual >= 0
assert actual <= min(nsamp, ngood + nbad)
else:
actual = actual.ravel()
assert np.all(actual >= 0)
assert np.all(actual <= min(nsamp, ngood + nbad))
| 28.265152 | 89 | 0.592469 | >= 0
assert actual <= min(nsamp, ngood + nbad)
else:
actual = actual.ravel()
assert np.all(actual >= 0)
assert np.all(actual <= min(nsamp, ngood + nbad))
| true | true |
f7f4ea801fc453d75b58ead0003b616af6e45c42 | 1,417 | py | Python | isochrones/extinction.py | Sam-2727/isochrones | 11f49c6c693e91bf275bb6a20af41b5f42e233da | [
"MIT"
] | 100 | 2015-03-12T12:51:03.000Z | 2022-01-07T23:16:01.000Z | isochrones/extinction.py | Sam-2727/isochrones | 11f49c6c693e91bf275bb6a20af41b5f42e233da | [
"MIT"
] | 154 | 2015-02-26T20:47:57.000Z | 2022-03-29T09:51:50.000Z | isochrones/extinction.py | Sam-2727/isochrones | 11f49c6c693e91bf275bb6a20af41b5f42e233da | [
"MIT"
] | 62 | 2015-02-03T17:58:43.000Z | 2021-12-04T22:31:20.000Z | import re
from .config import on_rtd
if not on_rtd:
from astropy.coordinates import SkyCoord
from six.moves import urllib
def get_AV_infinity(ra, dec, frame="icrs"):
"""
Gets the A_V exctinction at infinity for a given line of sight.
Queries the NED database.
:param ra,dec:
Desired coordinates, in degrees.
:param frame: (optional)
Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
"""
coords = SkyCoord(ra, dec, unit="deg", frame=frame).transform_to("icrs")
rah, ram, ras = coords.ra.hms
decd, decm, decs = coords.dec.dms
if decd > 0:
decsign = "%2B"
else:
decsign = "%2D"
url = (
"http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon="
+ "%i" % rah
+ "%3A"
+ "%i" % ram
+ "%3A"
+ "%05.2f" % ras
+ "&lat=%s" % decsign
+ "%i" % abs(decd)
+ "%3A"
+ "%i" % abs(decm)
+ "%3A"
+ "%05.2f" % abs(decs)
+ "&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0"
)
AV = None
for line in urllib.request.urlopen(url).readlines():
m = re.search(b"^Landolt V \(0.54\)\s+(\d+\.\d+)", line)
if m:
AV = float(m.group(1))
break
if AV is None:
raise RuntimeError("AV query fails! URL is {}".format(url))
return AV
| 26.240741 | 112 | 0.540579 | import re
from .config import on_rtd
if not on_rtd:
from astropy.coordinates import SkyCoord
from six.moves import urllib
def get_AV_infinity(ra, dec, frame="icrs"):
coords = SkyCoord(ra, dec, unit="deg", frame=frame).transform_to("icrs")
rah, ram, ras = coords.ra.hms
decd, decm, decs = coords.dec.dms
if decd > 0:
decsign = "%2B"
else:
decsign = "%2D"
url = (
"http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon="
+ "%i" % rah
+ "%3A"
+ "%i" % ram
+ "%3A"
+ "%05.2f" % ras
+ "&lat=%s" % decsign
+ "%i" % abs(decd)
+ "%3A"
+ "%i" % abs(decm)
+ "%3A"
+ "%05.2f" % abs(decs)
+ "&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0"
)
AV = None
for line in urllib.request.urlopen(url).readlines():
m = re.search(b"^Landolt V \(0.54\)\s+(\d+\.\d+)", line)
if m:
AV = float(m.group(1))
break
if AV is None:
raise RuntimeError("AV query fails! URL is {}".format(url))
return AV
| true | true |
f7f4eaa731734a5f16108db5ffda3da7a3611b1d | 235 | py | Python | ThreefoldLoginPkg/utils/parse.py | threefoldtech/threefold-login-python-sdk | f9a179ff9f1d5930e1f01f86909688857b66c768 | [
"Apache-2.0"
] | null | null | null | ThreefoldLoginPkg/utils/parse.py | threefoldtech/threefold-login-python-sdk | f9a179ff9f1d5930e1f01f86909688857b66c768 | [
"Apache-2.0"
] | null | null | null | ThreefoldLoginPkg/utils/parse.py | threefoldtech/threefold-login-python-sdk | f9a179ff9f1d5930e1f01f86909688857b66c768 | [
"Apache-2.0"
] | 1 | 2021-09-07T23:03:52.000Z | 2021-09-07T23:03:52.000Z | from urllib.parse import urlparse, parse_qs
import json
def parse_signed_attempt_from_url(url):
urlObject = urlparse(url)
signedAttempt = parse_qs(urlObject.query)['signedAttempt']
return json.loads(signedAttempt[0])
| 18.076923 | 62 | 0.761702 | from urllib.parse import urlparse, parse_qs
import json
def parse_signed_attempt_from_url(url):
urlObject = urlparse(url)
signedAttempt = parse_qs(urlObject.query)['signedAttempt']
return json.loads(signedAttempt[0])
| true | true |
f7f4eb019eb2bce03522a08513c756ff6dce7325 | 2,294 | py | Python | tests/test_oembed_cache.py | Krankdud/speedtech | 6b2581b52653e93f3f62673a0938c9331abef7c7 | [
"MIT"
] | null | null | null | tests/test_oembed_cache.py | Krankdud/speedtech | 6b2581b52653e93f3f62673a0938c9331abef7c7 | [
"MIT"
] | 2 | 2019-10-21T15:07:08.000Z | 2021-06-01T21:51:08.000Z | tests/test_oembed_cache.py | Krankdud/speedtech | 6b2581b52653e93f3f62673a0938c9331abef7c7 | [
"MIT"
] | null | null | null | import shutil
import tempfile
import time
import unittest
import unittest.mock as mock
from werkzeug.contrib.cache import SimpleCache, FileSystemCache
from speeddb import oembed_cache
from tests.constants import *
class OembedCacheTestCase(unittest.TestCase):
def test_init_simple_cache(self):
oembed_cache.init_cache()
self.assertTrue(isinstance(oembed_cache.cache, SimpleCache))
def test_init_file_cache(self):
temp_dir = tempfile.mkdtemp()
oembed_cache.init_cache(cache_type='file', cache_dir=temp_dir)
self.assertTrue(isinstance(oembed_cache.cache, FileSystemCache))
shutil.rmtree(temp_dir)
@mock.patch('speeddb.oembed_cache.PyEmbed', autospec=True)
def test_get_not_in_cache(self, mock_pyembed):
mock_pyembed.return_value.embed.return_value = OEMBED_MARKUP
oembed_cache.init_cache()
markup = oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
self.assertEqual(markup.striptags(), OEMBED_MARKUP)
@mock.patch('speeddb.oembed_cache.PyEmbed', autospec=True)
def test_get_in_cache(self, mock_pyembed):
mock_pyembed.return_value.embed.return_value = OEMBED_MARKUP
oembed_cache.init_cache()
oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.reset_mock()
markup = oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_not_called()
self.assertEqual(markup.striptags(), OEMBED_MARKUP)
@mock.patch('speeddb.oembed_cache.PyEmbed', autospec=True)
def test_get_cache_timeout(self, mock_pyembed):
mock_pyembed.return_value.embed.return_value = OEMBED_MARKUP
oembed_cache.init_cache(timeout=1)
oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
time.sleep(2)
markup = oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
self.assertEqual(markup.striptags(), OEMBED_MARKUP)
if __name__ == '__main__':
unittest.main()
| 37.606557 | 77 | 0.731473 | import shutil
import tempfile
import time
import unittest
import unittest.mock as mock
from werkzeug.contrib.cache import SimpleCache, FileSystemCache
from speeddb import oembed_cache
from tests.constants import *
class OembedCacheTestCase(unittest.TestCase):
def test_init_simple_cache(self):
oembed_cache.init_cache()
self.assertTrue(isinstance(oembed_cache.cache, SimpleCache))
def test_init_file_cache(self):
temp_dir = tempfile.mkdtemp()
oembed_cache.init_cache(cache_type='file', cache_dir=temp_dir)
self.assertTrue(isinstance(oembed_cache.cache, FileSystemCache))
shutil.rmtree(temp_dir)
@mock.patch('speeddb.oembed_cache.PyEmbed', autospec=True)
def test_get_not_in_cache(self, mock_pyembed):
mock_pyembed.return_value.embed.return_value = OEMBED_MARKUP
oembed_cache.init_cache()
markup = oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
self.assertEqual(markup.striptags(), OEMBED_MARKUP)
@mock.patch('speeddb.oembed_cache.PyEmbed', autospec=True)
def test_get_in_cache(self, mock_pyembed):
mock_pyembed.return_value.embed.return_value = OEMBED_MARKUP
oembed_cache.init_cache()
oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.reset_mock()
markup = oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_not_called()
self.assertEqual(markup.striptags(), OEMBED_MARKUP)
@mock.patch('speeddb.oembed_cache.PyEmbed', autospec=True)
def test_get_cache_timeout(self, mock_pyembed):
mock_pyembed.return_value.embed.return_value = OEMBED_MARKUP
oembed_cache.init_cache(timeout=1)
oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
time.sleep(2)
markup = oembed_cache.get(CLIP_URL_TWITTER)
mock_pyembed.return_value.embed.assert_called_with(CLIP_URL_TWITTER)
self.assertEqual(markup.striptags(), OEMBED_MARKUP)
if __name__ == '__main__':
unittest.main()
| true | true |
f7f4eb63d8aab5b7166773945c75731403ae2a92 | 42,065 | py | Python | vn.trader/uiBasicWidget.py | yuzhucu/vnpy | c7049a4ae910b74e1ebd89bdcafc38076951cee5 | [
"MIT"
] | 2 | 2021-01-01T06:05:56.000Z | 2021-03-14T09:43:14.000Z | vn.trader/uiBasicWidget.py | CraigLuo/vnpy | c7049a4ae910b74e1ebd89bdcafc38076951cee5 | [
"MIT"
] | null | null | null | vn.trader/uiBasicWidget.py | CraigLuo/vnpy | c7049a4ae910b74e1ebd89bdcafc38076951cee5 | [
"MIT"
] | 2 | 2018-06-28T13:37:50.000Z | 2019-09-16T13:57:20.000Z | # encoding: UTF-8
import json
import csv
import os
from collections import OrderedDict
from PyQt4 import QtGui, QtCore
from eventEngine import *
from vtFunction import *
from vtGateway import *
#----------------------------------------------------------------------
def loadFont():
"""载入字体设置"""
fileName = 'VT_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
setting = json.load(f)
family = setting['fontFamily']
size = setting['fontSize']
font = QtGui.QFont(family, size)
except:
font = QtGui.QFont(u'微软雅黑', 12)
return font
BASIC_FONT = loadFont()
########################################################################
class BasicCell(QtGui.QTableWidgetItem):
"""基础的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(BasicCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if text == '0' or text == '0.0':
self.setText('')
else:
self.setText(text)
########################################################################
class NumCell(QtGui.QTableWidgetItem):
"""用来显示数字的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(NumCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
# 考虑到NumCell主要用来显示OrderID和TradeID之类的整数字段,
# 这里的数据转化方式使用int类型。但是由于部分交易接口的委托
# 号和成交号可能不是纯数字的形式,因此补充了一个try...except
try:
num = int(text)
self.setData(QtCore.Qt.DisplayRole, num)
except ValueError:
self.setText(text)
########################################################################
class DirectionCell(QtGui.QTableWidgetItem):
"""用来显示买卖方向的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(DirectionCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if text == DIRECTION_LONG or text == DIRECTION_NET:
self.setForeground(QtGui.QColor('red'))
elif text == DIRECTION_SHORT:
self.setForeground(QtGui.QColor('green'))
self.setText(text)
########################################################################
class NameCell(QtGui.QTableWidgetItem):
"""用来显示合约中文的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(NameCell, self).__init__()
self.mainEngine = mainEngine
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if self.mainEngine:
# 首先尝试正常获取合约对象
contract = self.mainEngine.getContract(text)
# 如果能读取合约信息
if contract:
self.setText(contract.name)
########################################################################
class BidCell(QtGui.QTableWidgetItem):
"""买价单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(BidCell, self).__init__()
self.data = None
self.setForeground(QtGui.QColor('black'))
self.setBackground(QtGui.QColor(255,174,201))
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
self.setText(text)
########################################################################
class AskCell(QtGui.QTableWidgetItem):
"""买价单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(AskCell, self).__init__()
self.data = None
self.setForeground(QtGui.QColor('black'))
self.setBackground(QtGui.QColor(160,255,160))
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
self.setText(text)
########################################################################
class BasicMonitor(QtGui.QTableWidget):
"""
基础监控
headerDict中的值对应的字典格式如下
{'chinese': u'中文名', 'cellType': BasicCell}
"""
signal = QtCore.pyqtSignal(type(Event()))
#----------------------------------------------------------------------
def __init__(self, mainEngine=None, eventEngine=None, parent=None):
"""Constructor"""
super(BasicMonitor, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 保存表头标签用
self.headerDict = OrderedDict() # 有序字典,key是英文名,value是对应的配置字典
self.headerList = [] # 对应self.headerDict.keys()
# 保存相关数据用
self.dataDict = {} # 字典,key是字段对应的数据,value是保存相关单元格的字典
self.dataKey = '' # 字典键对应的数据字段
# 监控的事件类型
self.eventType = ''
# 字体
self.font = None
# 保存数据对象到单元格
self.saveData = False
# 默认不允许根据表头进行排序,需要的组件可以开启
self.sorting = False
# 初始化右键菜单
self.initMenu()
#----------------------------------------------------------------------
def setHeaderDict(self, headerDict):
"""设置表头有序字典"""
self.headerDict = headerDict
self.headerList = headerDict.keys()
#----------------------------------------------------------------------
def setDataKey(self, dataKey):
"""设置数据字典的键"""
self.dataKey = dataKey
#----------------------------------------------------------------------
def setEventType(self, eventType):
"""设置监控的事件类型"""
self.eventType = eventType
#----------------------------------------------------------------------
def setFont(self, font):
"""设置字体"""
self.font = font
#----------------------------------------------------------------------
def setSaveData(self, saveData):
"""设置是否要保存数据到单元格"""
self.saveData = saveData
#----------------------------------------------------------------------
def initTable(self):
"""初始化表格"""
# 设置表格的列数
col = len(self.headerDict)
self.setColumnCount(col)
# 设置列表头
labels = [d['chinese'] for d in self.headerDict.values()]
self.setHorizontalHeaderLabels(labels)
# 关闭左边的垂直表头
self.verticalHeader().setVisible(False)
# 设为不可编辑
self.setEditTriggers(self.NoEditTriggers)
# 设为行交替颜色
self.setAlternatingRowColors(True)
# 设置允许排序
self.setSortingEnabled(self.sorting)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册GUI更新相关的事件监听"""
self.signal.connect(self.updateEvent)
self.eventEngine.register(self.eventType, self.signal.emit)
#----------------------------------------------------------------------
def updateEvent(self, event):
"""收到事件更新"""
data = event.dict_['data']
self.updateData(data)
#----------------------------------------------------------------------
def updateData(self, data):
"""将数据更新到表格中"""
# 如果允许了排序功能,则插入数据前必须关闭,否则插入新的数据会变乱
if self.sorting:
self.setSortingEnabled(False)
# 如果设置了dataKey,则采用存量更新模式
if self.dataKey:
key = data.__getattribute__(self.dataKey)
# 如果键在数据字典中不存在,则先插入新的一行,并创建对应单元格
if key not in self.dataDict:
self.insertRow(0)
d = {}
for n, header in enumerate(self.headerList):
content = safeUnicode(data.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content, self.mainEngine)
if self.font:
cell.setFont(self.font) # 如果设置了特殊字体,则进行单元格设置
if self.saveData: # 如果设置了保存数据对象,则进行对象保存
cell.data = data
self.setItem(0, n, cell)
d[header] = cell
self.dataDict[key] = d
# 否则如果已经存在,则直接更新相关单元格
else:
d = self.dataDict[key]
for header in self.headerList:
content = safeUnicode(data.__getattribute__(header))
cell = d[header]
cell.setContent(content)
if self.saveData: # 如果设置了保存数据对象,则进行对象保存
cell.data = data
# 否则采用增量更新模式
else:
self.insertRow(0)
for n, header in enumerate(self.headerList):
content = safeUnicode(data.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content, self.mainEngine)
if self.font:
cell.setFont(self.font)
if self.saveData:
cell.data = data
self.setItem(0, n, cell)
# 调整列宽
self.resizeColumns()
# 重新打开排序
if self.sorting:
self.setSortingEnabled(True)
#----------------------------------------------------------------------
def resizeColumns(self):
"""调整各列的大小"""
self.horizontalHeader().resizeSections(QtGui.QHeaderView.ResizeToContents)
#----------------------------------------------------------------------
def setSorting(self, sorting):
"""设置是否允许根据表头排序"""
self.sorting = sorting
#----------------------------------------------------------------------
def saveToCsv(self):
"""保存表格内容到CSV文件"""
# 先隐藏右键菜单
self.menu.close()
# 获取想要保存的文件名
path = QtGui.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')
try:
if not path.isEmpty():
with open(unicode(path), 'wb') as f:
writer = csv.writer(f)
# 保存标签
headers = [header.encode('gbk') for header in self.headerList]
writer.writerow(headers)
# 保存每行内容
for row in range(self.rowCount()):
rowdata = []
for column in range(self.columnCount()):
item = self.item(row, column)
if item is not None:
rowdata.append(
unicode(item.text()).encode('gbk'))
else:
rowdata.append('')
writer.writerow(rowdata)
except IOError:
pass
#----------------------------------------------------------------------
def initMenu(self):
"""初始化右键菜单"""
self.menu = QtGui.QMenu(self)
saveAction = QtGui.QAction(u'保存内容', self)
saveAction.triggered.connect(self.saveToCsv)
self.menu.addAction(saveAction)
#----------------------------------------------------------------------
def contextMenuEvent(self, event):
"""右键点击事件"""
self.menu.popup(QtGui.QCursor.pos())
########################################################################
class MarketMonitor(BasicMonitor):
"""市场监控组件"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(MarketMonitor, self).__init__(mainEngine, eventEngine, parent)
# 设置表头有序字典
d = OrderedDict()
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['lastPrice'] = {'chinese':u'最新价', 'cellType':BasicCell}
d['preClosePrice'] = {'chinese':u'昨收盘价', 'cellType':BasicCell}
d['volume'] = {'chinese':u'成交量', 'cellType':BasicCell}
d['openInterest'] = {'chinese':u'持仓量', 'cellType':BasicCell}
d['openPrice'] = {'chinese':u'开盘价', 'cellType':BasicCell}
d['highPrice'] = {'chinese':u'最高价', 'cellType':BasicCell}
d['lowPrice'] = {'chinese':u'最低价', 'cellType':BasicCell}
d['bidPrice1'] = {'chinese':u'买一价', 'cellType':BidCell}
d['bidVolume1'] = {'chinese':u'买一量', 'cellType':BidCell}
d['askPrice1'] = {'chinese':u'卖一价', 'cellType':AskCell}
d['askVolume1'] = {'chinese':u'卖一量', 'cellType':AskCell}
d['time'] = {'chinese':u'时间', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
# 设置数据键
self.setDataKey('vtSymbol')
# 设置监控事件类型
self.setEventType(EVENT_TICK)
# 设置字体
self.setFont(BASIC_FONT)
# 设置允许排序
self.setSorting(True)
# 初始化表格
self.initTable()
# 注册事件监听
self.registerEvent()
########################################################################
class LogMonitor(BasicMonitor):
"""日志监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(LogMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['logTime'] = {'chinese':u'时间', 'cellType':BasicCell}
d['logContent'] = {'chinese':u'内容', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_LOG)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class ErrorMonitor(BasicMonitor):
"""错误监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(ErrorMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['errorTime'] = {'chinese':u'错误时间', 'cellType':BasicCell}
d['errorID'] = {'chinese':u'错误代码', 'cellType':BasicCell}
d['errorMsg'] = {'chinese':u'错误信息', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_ERROR)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class TradeMonitor(BasicMonitor):
"""成交监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(TradeMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['tradeID'] = {'chinese':u'成交编号', 'cellType':NumCell}
d['orderID'] = {'chinese':u'委托编号', 'cellType':NumCell}
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['direction'] = {'chinese':u'方向', 'cellType':DirectionCell}
d['offset'] = {'chinese':u'开平', 'cellType':BasicCell}
d['price'] = {'chinese':u'价格', 'cellType':BasicCell}
d['volume'] = {'chinese':u'数量', 'cellType':BasicCell}
d['tradeTime'] = {'chinese':u'成交时间', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_TRADE)
self.setFont(BASIC_FONT)
self.setSorting(True)
self.initTable()
self.registerEvent()
########################################################################
class OrderMonitor(BasicMonitor):
"""委托监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(OrderMonitor, self).__init__(mainEngine, eventEngine, parent)
self.mainEngine = mainEngine
d = OrderedDict()
d['orderID'] = {'chinese':u'委托编号', 'cellType':NumCell}
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['direction'] = {'chinese':u'方向', 'cellType':DirectionCell}
d['offset'] = {'chinese':u'开平', 'cellType':BasicCell}
d['price'] = {'chinese':u'价格', 'cellType':BasicCell}
d['totalVolume'] = {'chinese':u'委托数量', 'cellType':BasicCell}
d['tradedVolume'] = {'chinese':u'成交数量', 'cellType':BasicCell}
d['status'] = {'chinese':u'状态', 'cellType':BasicCell}
d['orderTime'] = {'chinese':u'委托时间', 'cellType':BasicCell}
d['cancelTime'] = {'chinese':u'撤销时间', 'cellType':BasicCell}
d['frontID'] = {'chinese':u'前置编号', 'cellType':BasicCell}
d['sessionID'] = {'chinese':u'会话编号', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtOrderID')
self.setEventType(EVENT_ORDER)
self.setFont(BASIC_FONT)
self.setSaveData(True)
self.setSorting(True)
self.initTable()
self.registerEvent()
self.connectSignal()
#----------------------------------------------------------------------
def connectSignal(self):
"""连接信号"""
# 双击单元格撤单
self.itemDoubleClicked.connect(self.cancelOrder)
#----------------------------------------------------------------------
def cancelOrder(self, cell):
"""根据单元格的数据撤单"""
order = cell.data
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
########################################################################
class PositionMonitor(BasicMonitor):
"""持仓监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(PositionMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['direction'] = {'chinese':u'方向', 'cellType':DirectionCell}
d['position'] = {'chinese':u'持仓量', 'cellType':BasicCell}
d['ydPosition'] = {'chinese':u'昨持仓', 'cellType':BasicCell}
d['frozen'] = {'chinese':u'冻结量', 'cellType':BasicCell}
d['price'] = {'chinese':u'价格', 'cellType':BasicCell}
d['positionProfit'] = {'chinese':u'持仓盈亏', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtPositionName')
self.setEventType(EVENT_POSITION)
self.setFont(BASIC_FONT)
self.setSaveData(True)
self.initTable()
self.registerEvent()
########################################################################
class AccountMonitor(BasicMonitor):
"""账户监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(AccountMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['accountID'] = {'chinese':u'账户', 'cellType':BasicCell}
d['preBalance'] = {'chinese':u'昨结', 'cellType':BasicCell}
d['balance'] = {'chinese':u'净值', 'cellType':BasicCell}
d['available'] = {'chinese':u'可用', 'cellType':BasicCell}
d['commission'] = {'chinese':u'手续费', 'cellType':BasicCell}
d['margin'] = {'chinese':u'保证金', 'cellType':BasicCell}
d['closeProfit'] = {'chinese':u'平仓盈亏', 'cellType':BasicCell}
d['positionProfit'] = {'chinese':u'持仓盈亏', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtAccountID')
self.setEventType(EVENT_ACCOUNT)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class TradingWidget(QtGui.QFrame):
"""简单交易组件"""
signal = QtCore.pyqtSignal(type(Event()))
directionList = [DIRECTION_LONG,
DIRECTION_SHORT]
offsetList = [OFFSET_OPEN,
OFFSET_CLOSE,
OFFSET_CLOSEYESTERDAY,
OFFSET_CLOSETODAY]
priceTypeList = [PRICETYPE_LIMITPRICE,
PRICETYPE_MARKETPRICE,
PRICETYPE_FAK,
PRICETYPE_FOK]
exchangeList = [EXCHANGE_NONE,
EXCHANGE_CFFEX,
EXCHANGE_SHFE,
EXCHANGE_DCE,
EXCHANGE_CZCE,
EXCHANGE_SSE,
EXCHANGE_SZSE,
EXCHANGE_SGE,
EXCHANGE_HKEX,
EXCHANGE_HKFE,
EXCHANGE_SMART,
EXCHANGE_ICE,
EXCHANGE_CME,
EXCHANGE_NYMEX,
EXCHANGE_GLOBEX,
EXCHANGE_IDEALPRO]
currencyList = [CURRENCY_NONE,
CURRENCY_CNY,
CURRENCY_HKD,
CURRENCY_USD]
productClassList = [PRODUCT_NONE,
PRODUCT_EQUITY,
PRODUCT_FUTURES,
PRODUCT_OPTION,
PRODUCT_FOREX]
gatewayList = ['']
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(TradingWidget, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.symbol = ''
# 添加交易接口
self.gatewayList.extend(mainEngine.getAllGatewayNames())
self.initUi()
self.connectSignal()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'交易')
self.setMaximumWidth(400)
self.setFrameShape(self.Box) # 设置边框
self.setLineWidth(1)
# 左边部分
labelSymbol = QtGui.QLabel(u'代码')
labelName = QtGui.QLabel(u'名称')
labelDirection = QtGui.QLabel(u'方向类型')
labelOffset = QtGui.QLabel(u'开平')
labelPrice = QtGui.QLabel(u'价格')
self.checkFixed = QtGui.QCheckBox(u'') # 价格固定选择框
labelVolume = QtGui.QLabel(u'数量')
labelPriceType = QtGui.QLabel(u'价格类型')
labelExchange = QtGui.QLabel(u'交易所')
labelCurrency = QtGui.QLabel(u'货币')
labelProductClass = QtGui.QLabel(u'产品类型')
labelGateway = QtGui.QLabel(u'交易接口')
self.lineSymbol = QtGui.QLineEdit()
self.lineName = QtGui.QLineEdit()
self.comboDirection = QtGui.QComboBox()
self.comboDirection.addItems(self.directionList)
self.comboOffset = QtGui.QComboBox()
self.comboOffset.addItems(self.offsetList)
self.spinPrice = QtGui.QDoubleSpinBox()
self.spinPrice.setDecimals(4)
self.spinPrice.setMinimum(0)
self.spinPrice.setMaximum(100000)
self.spinVolume = QtGui.QSpinBox()
self.spinVolume.setMinimum(0)
self.spinVolume.setMaximum(1000000)
self.comboPriceType = QtGui.QComboBox()
self.comboPriceType.addItems(self.priceTypeList)
self.comboExchange = QtGui.QComboBox()
self.comboExchange.addItems(self.exchangeList)
self.comboCurrency = QtGui.QComboBox()
self.comboCurrency.addItems(self.currencyList)
self.comboProductClass = QtGui.QComboBox()
self.comboProductClass.addItems(self.productClassList)
self.comboGateway = QtGui.QComboBox()
self.comboGateway.addItems(self.gatewayList)
gridleft = QtGui.QGridLayout()
gridleft.addWidget(labelSymbol, 0, 0)
gridleft.addWidget(labelName, 1, 0)
gridleft.addWidget(labelDirection, 2, 0)
gridleft.addWidget(labelOffset, 3, 0)
gridleft.addWidget(labelPrice, 4, 0)
gridleft.addWidget(labelVolume, 5, 0)
gridleft.addWidget(labelPriceType, 6, 0)
gridleft.addWidget(labelExchange, 7, 0)
gridleft.addWidget(labelCurrency, 8, 0)
gridleft.addWidget(labelProductClass, 9, 0)
gridleft.addWidget(labelGateway, 10, 0)
gridleft.addWidget(self.lineSymbol, 0, 1, 1, -1)
gridleft.addWidget(self.lineName, 1, 1, 1, -1)
gridleft.addWidget(self.comboDirection, 2, 1, 1, -1)
gridleft.addWidget(self.comboOffset, 3, 1, 1, -1)
gridleft.addWidget(self.checkFixed, 4, 1)
gridleft.addWidget(self.spinPrice, 4, 2)
gridleft.addWidget(self.spinVolume, 5, 1, 1, -1)
gridleft.addWidget(self.comboPriceType, 6, 1, 1, -1)
gridleft.addWidget(self.comboExchange, 7, 1, 1, -1)
gridleft.addWidget(self.comboCurrency, 8, 1, 1, -1)
gridleft.addWidget(self.comboProductClass, 9, 1, 1, -1)
gridleft.addWidget(self.comboGateway, 10, 1, 1, -1)
# 右边部分
labelBid1 = QtGui.QLabel(u'买一')
labelBid2 = QtGui.QLabel(u'买二')
labelBid3 = QtGui.QLabel(u'买三')
labelBid4 = QtGui.QLabel(u'买四')
labelBid5 = QtGui.QLabel(u'买五')
labelAsk1 = QtGui.QLabel(u'卖一')
labelAsk2 = QtGui.QLabel(u'卖二')
labelAsk3 = QtGui.QLabel(u'卖三')
labelAsk4 = QtGui.QLabel(u'卖四')
labelAsk5 = QtGui.QLabel(u'卖五')
self.labelBidPrice1 = QtGui.QLabel()
self.labelBidPrice2 = QtGui.QLabel()
self.labelBidPrice3 = QtGui.QLabel()
self.labelBidPrice4 = QtGui.QLabel()
self.labelBidPrice5 = QtGui.QLabel()
self.labelBidVolume1 = QtGui.QLabel()
self.labelBidVolume2 = QtGui.QLabel()
self.labelBidVolume3 = QtGui.QLabel()
self.labelBidVolume4 = QtGui.QLabel()
self.labelBidVolume5 = QtGui.QLabel()
self.labelAskPrice1 = QtGui.QLabel()
self.labelAskPrice2 = QtGui.QLabel()
self.labelAskPrice3 = QtGui.QLabel()
self.labelAskPrice4 = QtGui.QLabel()
self.labelAskPrice5 = QtGui.QLabel()
self.labelAskVolume1 = QtGui.QLabel()
self.labelAskVolume2 = QtGui.QLabel()
self.labelAskVolume3 = QtGui.QLabel()
self.labelAskVolume4 = QtGui.QLabel()
self.labelAskVolume5 = QtGui.QLabel()
labelLast = QtGui.QLabel(u'最新')
self.labelLastPrice = QtGui.QLabel()
self.labelReturn = QtGui.QLabel()
self.labelLastPrice.setMinimumWidth(60)
self.labelReturn.setMinimumWidth(60)
gridRight = QtGui.QGridLayout()
gridRight.addWidget(labelAsk5, 0, 0)
gridRight.addWidget(labelAsk4, 1, 0)
gridRight.addWidget(labelAsk3, 2, 0)
gridRight.addWidget(labelAsk2, 3, 0)
gridRight.addWidget(labelAsk1, 4, 0)
gridRight.addWidget(labelLast, 5, 0)
gridRight.addWidget(labelBid1, 6, 0)
gridRight.addWidget(labelBid2, 7, 0)
gridRight.addWidget(labelBid3, 8, 0)
gridRight.addWidget(labelBid4, 9, 0)
gridRight.addWidget(labelBid5, 10, 0)
gridRight.addWidget(self.labelAskPrice5, 0, 1)
gridRight.addWidget(self.labelAskPrice4, 1, 1)
gridRight.addWidget(self.labelAskPrice3, 2, 1)
gridRight.addWidget(self.labelAskPrice2, 3, 1)
gridRight.addWidget(self.labelAskPrice1, 4, 1)
gridRight.addWidget(self.labelLastPrice, 5, 1)
gridRight.addWidget(self.labelBidPrice1, 6, 1)
gridRight.addWidget(self.labelBidPrice2, 7, 1)
gridRight.addWidget(self.labelBidPrice3, 8, 1)
gridRight.addWidget(self.labelBidPrice4, 9, 1)
gridRight.addWidget(self.labelBidPrice5, 10, 1)
gridRight.addWidget(self.labelAskVolume5, 0, 2)
gridRight.addWidget(self.labelAskVolume4, 1, 2)
gridRight.addWidget(self.labelAskVolume3, 2, 2)
gridRight.addWidget(self.labelAskVolume2, 3, 2)
gridRight.addWidget(self.labelAskVolume1, 4, 2)
gridRight.addWidget(self.labelReturn, 5, 2)
gridRight.addWidget(self.labelBidVolume1, 6, 2)
gridRight.addWidget(self.labelBidVolume2, 7, 2)
gridRight.addWidget(self.labelBidVolume3, 8, 2)
gridRight.addWidget(self.labelBidVolume4, 9, 2)
gridRight.addWidget(self.labelBidVolume5, 10, 2)
# 发单按钮
buttonSendOrder = QtGui.QPushButton(u'发单')
buttonCancelAll = QtGui.QPushButton(u'全撤')
size = buttonSendOrder.sizeHint()
buttonSendOrder.setMinimumHeight(size.height()*2) # 把按钮高度设为默认两倍
buttonCancelAll.setMinimumHeight(size.height()*2)
# 整合布局
hbox = QtGui.QHBoxLayout()
hbox.addLayout(gridleft)
hbox.addLayout(gridRight)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(buttonSendOrder)
vbox.addWidget(buttonCancelAll)
vbox.addStretch()
self.setLayout(vbox)
# 关联更新
buttonSendOrder.clicked.connect(self.sendOrder)
buttonCancelAll.clicked.connect(self.cancelAll)
self.lineSymbol.returnPressed.connect(self.updateSymbol)
#----------------------------------------------------------------------
def updateSymbol(self):
"""合约变化"""
# 读取组件数据
symbol = str(self.lineSymbol.text())
exchange = unicode(self.comboExchange.currentText())
currency = unicode(self.comboCurrency.currentText())
productClass = unicode(self.comboProductClass.currentText())
gatewayName = unicode(self.comboGateway.currentText())
# 查询合约
if exchange:
vtSymbol = '.'.join([symbol, exchange])
contract = self.mainEngine.getContract(vtSymbol)
else:
vtSymbol = symbol
contract = self.mainEngine.getContract(symbol)
if contract:
vtSymbol = contract.vtSymbol
gatewayName = contract.gatewayName
self.lineName.setText(contract.name)
exchange = contract.exchange # 保证有交易所代码
# 清空价格数量
self.spinPrice.setValue(0)
self.spinVolume.setValue(0)
# 清空行情显示
self.labelBidPrice1.setText('')
self.labelBidPrice2.setText('')
self.labelBidPrice3.setText('')
self.labelBidPrice4.setText('')
self.labelBidPrice5.setText('')
self.labelBidVolume1.setText('')
self.labelBidVolume2.setText('')
self.labelBidVolume3.setText('')
self.labelBidVolume4.setText('')
self.labelBidVolume5.setText('')
self.labelAskPrice1.setText('')
self.labelAskPrice2.setText('')
self.labelAskPrice3.setText('')
self.labelAskPrice4.setText('')
self.labelAskPrice5.setText('')
self.labelAskVolume1.setText('')
self.labelAskVolume2.setText('')
self.labelAskVolume3.setText('')
self.labelAskVolume4.setText('')
self.labelAskVolume5.setText('')
self.labelLastPrice.setText('')
self.labelReturn.setText('')
# 重新注册事件监听
self.eventEngine.unregister(EVENT_TICK + self.symbol, self.signal.emit)
self.eventEngine.register(EVENT_TICK + vtSymbol, self.signal.emit)
# 订阅合约
req = VtSubscribeReq()
req.symbol = symbol
req.exchange = exchange
req.currency = currency
req.productClass = productClass
# 默认跟随价
self.checkFixed.setChecked(False)
self.mainEngine.subscribe(req, gatewayName)
# 更新组件当前交易的合约
self.symbol = vtSymbol
#----------------------------------------------------------------------
def updateTick(self, event):
"""更新行情"""
tick = event.dict_['data']
if tick.vtSymbol == self.symbol:
if not self.checkFixed.isChecked():
self.spinPrice.setValue(tick.lastPrice)
self.labelBidPrice1.setText(str(tick.bidPrice1))
self.labelAskPrice1.setText(str(tick.askPrice1))
self.labelBidVolume1.setText(str(tick.bidVolume1))
self.labelAskVolume1.setText(str(tick.askVolume1))
if tick.bidPrice2:
self.labelBidPrice2.setText(str(tick.bidPrice2))
self.labelBidPrice3.setText(str(tick.bidPrice3))
self.labelBidPrice4.setText(str(tick.bidPrice4))
self.labelBidPrice5.setText(str(tick.bidPrice5))
self.labelAskPrice2.setText(str(tick.askPrice2))
self.labelAskPrice3.setText(str(tick.askPrice3))
self.labelAskPrice4.setText(str(tick.askPrice4))
self.labelAskPrice5.setText(str(tick.askPrice5))
self.labelBidVolume2.setText(str(tick.bidVolume2))
self.labelBidVolume3.setText(str(tick.bidVolume3))
self.labelBidVolume4.setText(str(tick.bidVolume4))
self.labelBidVolume5.setText(str(tick.bidVolume5))
self.labelAskVolume2.setText(str(tick.askVolume2))
self.labelAskVolume3.setText(str(tick.askVolume3))
self.labelAskVolume4.setText(str(tick.askVolume4))
self.labelAskVolume5.setText(str(tick.askVolume5))
self.labelLastPrice.setText(str(tick.lastPrice))
if tick.preClosePrice:
rt = (tick.lastPrice/tick.preClosePrice)-1
self.labelReturn.setText(('%.2f' %(rt*100))+'%')
else:
self.labelReturn.setText('')
#----------------------------------------------------------------------
def connectSignal(self):
"""连接Signal"""
self.signal.connect(self.updateTick)
#----------------------------------------------------------------------
def sendOrder(self):
"""发单"""
symbol = str(self.lineSymbol.text())
exchange = unicode(self.comboExchange.currentText())
currency = unicode(self.comboCurrency.currentText())
productClass = unicode(self.comboProductClass.currentText())
gatewayName = unicode(self.comboGateway.currentText())
# 查询合约
if exchange:
vtSymbol = '.'.join([symbol, exchange])
contract = self.mainEngine.getContract(vtSymbol)
else:
vtSymbol = symbol
contract = self.mainEngine.getContract(symbol)
if contract:
gatewayName = contract.gatewayName
exchange = contract.exchange # 保证有交易所代码
req = VtOrderReq()
req.symbol = symbol
req.exchange = exchange
req.price = self.spinPrice.value()
req.volume = self.spinVolume.value()
req.direction = unicode(self.comboDirection.currentText())
req.priceType = unicode(self.comboPriceType.currentText())
req.offset = unicode(self.comboOffset.currentText())
req.currency = currency
req.productClass = productClass
self.mainEngine.sendOrder(req, gatewayName)
#----------------------------------------------------------------------
def cancelAll(self):
"""一键撤销所有委托"""
l = self.mainEngine.getAllWorkingOrders()
for order in l:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def closePosition(self, cell):
"""根据持仓信息自动填写交易组件"""
# 读取持仓数据,cell是一个表格中的单元格对象
pos = cell.data
symbol = pos.symbol
# 更新交易组件的显示合约
self.lineSymbol.setText(symbol)
self.updateSymbol()
# 自动填写信息
self.comboPriceType.setCurrentIndex(self.priceTypeList.index(PRICETYPE_LIMITPRICE))
self.comboOffset.setCurrentIndex(self.offsetList.index(OFFSET_CLOSE))
self.spinVolume.setValue(pos.position)
if pos.direction == DIRECTION_LONG or pos.direction == DIRECTION_NET:
self.comboDirection.setCurrentIndex(self.directionList.index(DIRECTION_SHORT))
else:
self.comboDirection.setCurrentIndex(self.directionList.index(DIRECTION_LONG))
# 价格留待更新后由用户输入,防止有误操作
########################################################################
class ContractMonitor(BasicMonitor):
"""合约查询"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, parent=None):
"""Constructor"""
super(ContractMonitor, self).__init__(parent=parent)
self.mainEngine = mainEngine
d = OrderedDict()
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['exchange'] = {'chinese':u'交易所', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'vt系统代码', 'cellType':BasicCell}
d['name'] = {'chinese':u'名称', 'cellType':BasicCell}
d['productClass'] = {'chinese':u'合约类型', 'cellType':BasicCell}
d['size'] = {'chinese':u'大小', 'cellType':BasicCell}
d['priceTick'] = {'chinese':u'最小价格变动', 'cellType':BasicCell}
#d['strikePrice'] = {'chinese':u'期权行权价', 'cellType':BasicCell}
#d['underlyingSymbol'] = {'chinese':u'期权标的物', 'cellType':BasicCell}
#d['optionType'] = {'chinese':u'期权类型', 'cellType':BasicCell}
self.setHeaderDict(d)
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'合约查询')
self.setMinimumSize(800, 800)
self.setFont(BASIC_FONT)
self.initTable()
self.addMenuAction()
#----------------------------------------------------------------------
def showAllContracts(self):
"""显示所有合约数据"""
l = self.mainEngine.getAllContracts()
d = {'.'.join([contract.exchange, contract.symbol]):contract for contract in l}
l2 = d.keys()
l2.sort(reverse=True)
self.setRowCount(len(l2))
row = 0
for key in l2:
contract = d[key]
for n, header in enumerate(self.headerList):
content = safeUnicode(contract.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content)
if self.font:
cell.setFont(self.font) # 如果设置了特殊字体,则进行单元格设置
self.setItem(row, n, cell)
row = row + 1
#----------------------------------------------------------------------
def refresh(self):
"""刷新"""
self.menu.close() # 关闭菜单
self.clearContents()
self.setRowCount(0)
self.showAllContracts()
#----------------------------------------------------------------------
def addMenuAction(self):
"""增加右键菜单内容"""
refreshAction = QtGui.QAction(u'刷新', self)
refreshAction.triggered.connect(self.refresh)
self.menu.addAction(refreshAction)
#----------------------------------------------------------------------
def show(self):
"""显示"""
super(ContractMonitor, self).show()
self.refresh()
| 36.483088 | 91 | 0.501961 |
import json
import csv
import os
from collections import OrderedDict
from PyQt4 import QtGui, QtCore
from eventEngine import *
from vtFunction import *
from vtGateway import *
def loadFont():
fileName = 'VT_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
setting = json.load(f)
family = setting['fontFamily']
size = setting['fontSize']
font = QtGui.QFont(family, size)
except:
font = QtGui.QFont(u'微软雅黑', 12)
return font
BASIC_FONT = loadFont()
| true | true |
f7f4eb9ecb6b9d5fa5c5268d10294a74d06faa43 | 4,966 | py | Python | questions/q347_thesaurus/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | null | null | null | questions/q347_thesaurus/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | 1 | 2021-05-15T07:56:51.000Z | 2021-05-15T07:56:51.000Z | questions/q347_thesaurus/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | null | null | null |
def populateCounts(end) :
"""
Given the maximum number through which the table needs to be populated, we form the table.
Parameters
----------
end - the extent of continuous missing letters in string (or the length of the table needed)
Return
------
table - with i'th index pointing to the number of possible combinations possible for 'i' missing letters
"""
# Initialize a table
# Element at index i : (edge_same[i], edge_different[i])
table = [(None, None), (25, 24)]
# Populate the table till the given number
for i in range(2, end + 1) :
table.append((
(25 * table[i-1][1]),
((24 * table[i-1][1]) + (1 * table[i-1][0]))
))
# Return the populated table
return table
def getMissingConfiguration(s) :
"""
Given a string, returns all the counts of concecutive missing values in the string, in the form of array.
We also include another value indicating if the edges are same or different
"""
# Initialize variables for computing and storing the values
missing_counts = []
n = len(s)
# Iterate through the loop and find all the missing letter configurations
i = 1
while i < n-1 :
if s[i] == '?' :
c = 0
left_edge = s[i-1]
while i < n-1 and s[i] == '?' :
c += 1
i += 1
right_edge = s[i]
missing_counts.append((
c, (True if left_edge == right_edge else False)
))
else :
i += 1
# Return the missing congiguration
return missing_counts
def solveInterrior(s) :
"""
Given a string with defined edge characters, we find the number of possible ways in which we can fill the blanks.
Assumption : First and last characters are filled
Parameters
----------
s - the string with defined edge letters
Return
------
count - the total number of possible ways to fill the blanks for this edge defined string
"""
# Get the missing letters configuration from processing the string
missing_count_configuration = getMissingConfiguration(s)
# Initialize a variable for storing the overall count
total_count = 1
if len(missing_count_configuration) > 0 :
# Find the maximum number of concecutive missing characters
max_missing_count = max(missing_count_configuration, key=lambda x : x[0])[0]
# Fill the DP table
table = populateCounts(max_missing_count)
# Compute for every missing configuration
for (num_missing, same_edge) in missing_count_configuration :
if same_edge :
total_count *= table[num_missing][0]
else :
total_count *= table[num_missing][1]
return total_count
def solve(s) :
"""
Given a string with blanks, returns the number of possible ways to fill the blanks with some conditions
Parameter
---------
s - the string
Return
------
total_count - the total number of possible ways to fill the blanks
"""
if len(s) == 0 :
return 0
if len(s) == 1 :
if s == '?' :
return 26
return 1
# Check for repeated characters
n = len(s)
for i in range(1, n) :
if (s[i] != '?') and (s[i-1] != '?') and (s[i] == s[i-1]) :
return 0
# First and last characters filled
if (s[0] != '?') and (s[-1] != '?') :
# If first and last characters are not equal
if (s[0] != s[-1]) :
return 0
# If first and last characters are equal
return solveInterrior(s)
# If only first character is filled
if s[0] != '?' :
# Copy the first character to the last
s = s[:-1] + s[0]
return solveInterrior(s)
# If only the last character is filled
if s[-1] != '?' :
# Copy the last character to the first
s = s[-1] + s[1:]
return solveInterrior(s)
# If both first and last characters are missing
# If the string is just two characters long, and both are missing
if s == '??' :
return 0
# Add the 2nd and the 2nd last characters if they are empty
# The edge letters cannot be equal to these
avoid_sets = set()
if s[1] != '?' :
avoid_sets.add(s[1])
if s[-2] != '?' :
avoid_sets.add(s[-2])
# Variable to store the overall count
total_count = 0
# For every other letter possible, find the count
for i in range(97, 123) :
ch = ord(i)
if ch not in avoid_sets :
total_count += solveInterrior(ch + s[1:-1] + ch)
# Return the total count
return total_count
print(solve("abcd"))
print(solve("abc?"))
print(solve("a?za"))
print(solve("abca"))
print(solve("a??ba"))
print(solve("a???c?b?"))
print(solve("a????cb?"))
print(solve("a???c??b?"))
| 27.436464 | 117 | 0.575513 |
def populateCounts(end) :
table = [(None, None), (25, 24)]
for i in range(2, end + 1) :
table.append((
(25 * table[i-1][1]),
((24 * table[i-1][1]) + (1 * table[i-1][0]))
))
return table
def getMissingConfiguration(s) :
missing_counts = []
n = len(s)
i = 1
while i < n-1 :
if s[i] == '?' :
c = 0
left_edge = s[i-1]
while i < n-1 and s[i] == '?' :
c += 1
i += 1
right_edge = s[i]
missing_counts.append((
c, (True if left_edge == right_edge else False)
))
else :
i += 1
return missing_counts
def solveInterrior(s) :
missing_count_configuration = getMissingConfiguration(s)
total_count = 1
if len(missing_count_configuration) > 0 :
max_missing_count = max(missing_count_configuration, key=lambda x : x[0])[0]
table = populateCounts(max_missing_count)
for (num_missing, same_edge) in missing_count_configuration :
if same_edge :
total_count *= table[num_missing][0]
else :
total_count *= table[num_missing][1]
return total_count
def solve(s) :
if len(s) == 0 :
return 0
if len(s) == 1 :
if s == '?' :
return 26
return 1
n = len(s)
for i in range(1, n) :
if (s[i] != '?') and (s[i-1] != '?') and (s[i] == s[i-1]) :
return 0
if (s[0] != '?') and (s[-1] != '?') :
if (s[0] != s[-1]) :
return 0
return solveInterrior(s)
if s[0] != '?' :
s = s[:-1] + s[0]
return solveInterrior(s)
if s[-1] != '?' :
s = s[-1] + s[1:]
return solveInterrior(s)
if s == '??' :
return 0
avoid_sets = set()
if s[1] != '?' :
avoid_sets.add(s[1])
if s[-2] != '?' :
avoid_sets.add(s[-2])
total_count = 0
for i in range(97, 123) :
ch = ord(i)
if ch not in avoid_sets :
total_count += solveInterrior(ch + s[1:-1] + ch)
return total_count
print(solve("abcd"))
print(solve("abc?"))
print(solve("a?za"))
print(solve("abca"))
print(solve("a??ba"))
print(solve("a???c?b?"))
print(solve("a????cb?"))
print(solve("a???c??b?"))
| true | true |
f7f4ebc73c4bf003f26189a1ff7c80c24f3d7202 | 9,049 | py | Python | tests/test_engine_base_matrix.py | Nachtfeuer/engine | c7d86877b84f648b229c8c958078b899ad9eeeaf | [
"MIT"
] | null | null | null | tests/test_engine_base_matrix.py | Nachtfeuer/engine | c7d86877b84f648b229c8c958078b899ad9eeeaf | [
"MIT"
] | 6 | 2020-01-11T10:50:48.000Z | 2020-01-30T06:41:38.000Z | tests/test_engine_base_matrix.py | Nachtfeuer/engine | c7d86877b84f648b229c8c958078b899ad9eeeaf | [
"MIT"
] | null | null | null | """Testing of class Matrix."""
# pylint: disable=no-self-use
import math
from unittest import TestCase
from hamcrest import assert_that, equal_to, calling, raises, less_than_or_equal_to
from engine.base.matrix import Matrix
from engine.threed.vector import Vector
from engine.tools.options import Options
from engine.tools.conversions import to_radian
class TestMatrix(TestCase):
"""Testing of class Matrix."""
def test_default(self):
"""Test default constructor."""
matrix = Matrix()
assert_that(len(list(matrix.rows())), equal_to(2))
assert_that(len(list(matrix.columns())), equal_to(2))
assert_that(list(matrix.rows()), equal_to([[0, 0], [0, 0]]))
assert_that(list(matrix.columns()), equal_to([[0, 0], [0, 0]]))
assert_that(matrix.shape(), equal_to((2, 2)))
def test_setitem(self):
"""Test changing a value in the matrix."""
matrix = Matrix()
matrix[0, 0] = 1
matrix[0, 1] = 2
matrix[1, 0] = 3
matrix[1, 1] = 4
assert_that(list(matrix.rows()), equal_to([[1, 2], [3, 4]]))
assert_that(list(matrix.columns()), equal_to([[1, 3], [2, 4]]))
assert_that(calling(matrix.__setitem__).with_args(1, 5),
raises(TypeError, "Index is equired to be a tuple"))
assert_that(calling(matrix.__setitem__).with_args((1, 2, 3), 5),
raises(TypeError, "Index is equired to be a tuple with two ints"))
assert_that(calling(matrix.__setitem__).with_args((1.0, 2.0), 5),
raises(TypeError, "Index is equired to be a tuple with two ints"))
assert_that(calling(matrix.__setitem__).with_args((1, 1), "hello"),
raises(TypeError, "Value is expected to be a float or an int"))
def test_getitem(self):
"""Test retrieving a value in the matrix."""
matrix = Matrix.from_sequence([[1, 2], [3, 4]])
assert_that(matrix[0, 0], equal_to(1))
assert_that(matrix[0, 1], equal_to(2))
assert_that(matrix[1, 0], equal_to(3))
assert_that(matrix[1, 1], equal_to(4))
assert_that(calling(matrix.__getitem__).with_args(1),
raises(TypeError, "Index is equired to be a tuple"))
assert_that(calling(matrix.__getitem__).with_args((1.0, 2.0)),
raises(TypeError, "Index is equired to be a tuple with two ints"))
def test_from_sequence(self):
"""Testing of from_sequence static function."""
matrix = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
assert_that(list(matrix.rows()), equal_to([[1, 2, 3], [4, 5, 6]]))
message = "Not a list or tuple of rows"
assert_that(calling(Matrix.from_sequence).with_args(1234),
raises(TypeError, message))
message = "Not all rows are lists or tuples"
assert_that(calling(Matrix.from_sequence).with_args([1, 2, 3, 4]),
raises(TypeError, message))
message = "Either rows, columns or both are 0 entries"
assert_that(calling(Matrix.from_sequence).with_args([]),
raises(ValueError, message))
def test_mul(self):
"""Testing of __mul__ method."""
matrix_a = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
matrix_b = Matrix.from_sequence([[7, 8], [9, 10], [11, 12]])
matrix_c = matrix_a * matrix_b
assert_that(matrix_c.shape(), equal_to((2, 2)))
assert_that(list(matrix_c.rows()), equal_to([[58, 64], [139, 154]]))
message = "You cannot multiply a value of type %s with a matrix" % type(0)
assert_that(calling(matrix_a.__mul__).with_args(1234),
raises(TypeError, message))
def test_translation(self):
"""Testing translation via matrix multiplication."""
# representation of the vector or point (ignore fourth value, is always 1)
matrix_a = Matrix.from_sequence([[1, 1, 1, 1]])
# representation of the translation matrix
matrix_b = Matrix.from_sequence(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 2, 3, 1]])
# translating
matrix_c = matrix_a * matrix_b
# result vector or point (ignore fourth value is always 1)
assert_that(list(matrix_c.columns()), equal_to([[2], [3], [4], [1]]))
def test_scale(self):
"""Testing scaling via matrix multiplication."""
# representation of the vector or point (ignore fourth value, is always 1)
matrix_a = Matrix.from_sequence([[1, 2, 3, 1]])
# representation of the scale matrix
matrix_b = Matrix.from_sequence(
[[2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 2, 0], [0, 0, 0, 1]])
# scaling
matrix_c = matrix_a * matrix_b
# result vector or point (ignore fourth value is always 1)
assert_that(list(matrix_c.columns()), equal_to([[2], [4], [6], [1]]))
def test_rotation_z(self):
"""Testing rotation around z axis via matrix multiplication."""
# representation of the vector or point (ignore fourth value, is always 1)
vector = Vector(1, 1, 0).normalized()
matrix_a = Matrix.from_sequence([[vector.x, vector.y, vector.z]])
# representation of the rotation matrix along y axis
angle = to_radian(-90.0)
matrix_b = Matrix.from_sequence(
[[math.cos(angle), -math.sin(angle), 0.0],
[math.sin(angle), math.cos(angle), 0.0],
[0.0, 0.0, 1.0]])
# rotating around z axis
matrix_final = matrix_a * matrix_b
assert_that(abs(matrix_final[0, 0] - -vector.x), less_than_or_equal_to(Options.PRECISION),
"x value was %g instead of %s" % (matrix_final[0, 0], -vector.x))
assert_that(abs(matrix_final[0, 1] - vector.y), less_than_or_equal_to(Options.PRECISION),
"y value was %g instead of %s" % (matrix_final[0, 1], vector.y))
assert_that(abs(matrix_final[0, 2] - vector.z), less_than_or_equal_to(Options.PRECISION),
"z value was %g instead of %s" % (matrix_final[0, 2], vector.z))
def test_rotation_y(self):
"""Testing rotation around y axis via matrix multiplication."""
# representation of the vector or point (ignore fourth value, is always 1)
vector = Vector(1, 1, 0).normalized()
matrix_a = Matrix.from_sequence([[vector.x, vector.y, vector.z]])
# representation of the rotation matrix along y axis
angle = to_radian(90.0)
matrix_b = Matrix.from_sequence(
[[math.cos(angle), 0.0, math.sin(angle)],
[0.0, 1.0, 0.0],
[-math.sin(angle), 0.0, math.cos(angle)]])
# rotating around z axis
matrix_final = matrix_a * matrix_b
assert_that(abs(matrix_final[0, 0] - vector.z), less_than_or_equal_to(Options.PRECISION),
"x value was %g instead of %s" % (matrix_final[0, 0], vector.z))
assert_that(abs(matrix_final[0, 1] - vector.y), less_than_or_equal_to(Options.PRECISION),
"y value was %g instead of %s" % (matrix_final[0, 1], vector.y))
assert_that(abs(matrix_final[0, 2] - vector.x), less_than_or_equal_to(Options.PRECISION),
"z value was %g instead of %s" % (matrix_final[0, 2], vector.x))
def test_rotation_x(self):
"""Testing rotation around x axis via matrix multiplication."""
# representation of the vector or point (ignore fourth value, is always 1)
vector = Vector(1, 1, 0).normalized()
matrix_a = Matrix.from_sequence([[vector.x, vector.y, vector.z]])
# representation of the rotation matrix along y axis
angle = to_radian(-90.0)
matrix_b = Matrix.from_sequence(
[[1.0, 0.0, 0.0],
[0.0, math.cos(angle), -math.sin(angle)],
[0.0, math.sin(angle), math.cos(angle)]])
# rotating around z axis
matrix_final = matrix_a * matrix_b
assert_that(abs(matrix_final[0, 0] - vector.x), less_than_or_equal_to(Options.PRECISION),
"x value was %g instead of %s" % (matrix_final[0, 0], vector.z))
assert_that(abs(matrix_final[0, 1] - vector.z), less_than_or_equal_to(Options.PRECISION),
"y value was %g instead of %s" % (matrix_final[0, 1], vector.z))
assert_that(abs(matrix_final[0, 2] - vector.y), less_than_or_equal_to(Options.PRECISION),
"z value was %g instead of %s" % (matrix_final[0, 2], vector.y))
def test_repr(self):
"""Test __repr__ method."""
matrix = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
expected_string = "Matrix([[1, 2, 3], [4, 5, 6]])"
assert_that(str(matrix), equal_to(expected_string))
def test_matrix_mult_perf(benchmark):
"""Testing performance of matrix multiplication."""
matrix_a = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
matrix_b = Matrix.from_sequence([[7, 8], [9, 10], [11, 12]])
benchmark(matrix_a.__mul__, matrix_b)
| 47.130208 | 98 | 0.604597 |
import math
from unittest import TestCase
from hamcrest import assert_that, equal_to, calling, raises, less_than_or_equal_to
from engine.base.matrix import Matrix
from engine.threed.vector import Vector
from engine.tools.options import Options
from engine.tools.conversions import to_radian
class TestMatrix(TestCase):
def test_default(self):
matrix = Matrix()
assert_that(len(list(matrix.rows())), equal_to(2))
assert_that(len(list(matrix.columns())), equal_to(2))
assert_that(list(matrix.rows()), equal_to([[0, 0], [0, 0]]))
assert_that(list(matrix.columns()), equal_to([[0, 0], [0, 0]]))
assert_that(matrix.shape(), equal_to((2, 2)))
def test_setitem(self):
matrix = Matrix()
matrix[0, 0] = 1
matrix[0, 1] = 2
matrix[1, 0] = 3
matrix[1, 1] = 4
assert_that(list(matrix.rows()), equal_to([[1, 2], [3, 4]]))
assert_that(list(matrix.columns()), equal_to([[1, 3], [2, 4]]))
assert_that(calling(matrix.__setitem__).with_args(1, 5),
raises(TypeError, "Index is equired to be a tuple"))
assert_that(calling(matrix.__setitem__).with_args((1, 2, 3), 5),
raises(TypeError, "Index is equired to be a tuple with two ints"))
assert_that(calling(matrix.__setitem__).with_args((1.0, 2.0), 5),
raises(TypeError, "Index is equired to be a tuple with two ints"))
assert_that(calling(matrix.__setitem__).with_args((1, 1), "hello"),
raises(TypeError, "Value is expected to be a float or an int"))
def test_getitem(self):
matrix = Matrix.from_sequence([[1, 2], [3, 4]])
assert_that(matrix[0, 0], equal_to(1))
assert_that(matrix[0, 1], equal_to(2))
assert_that(matrix[1, 0], equal_to(3))
assert_that(matrix[1, 1], equal_to(4))
assert_that(calling(matrix.__getitem__).with_args(1),
raises(TypeError, "Index is equired to be a tuple"))
assert_that(calling(matrix.__getitem__).with_args((1.0, 2.0)),
raises(TypeError, "Index is equired to be a tuple with two ints"))
def test_from_sequence(self):
matrix = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
assert_that(list(matrix.rows()), equal_to([[1, 2, 3], [4, 5, 6]]))
message = "Not a list or tuple of rows"
assert_that(calling(Matrix.from_sequence).with_args(1234),
raises(TypeError, message))
message = "Not all rows are lists or tuples"
assert_that(calling(Matrix.from_sequence).with_args([1, 2, 3, 4]),
raises(TypeError, message))
message = "Either rows, columns or both are 0 entries"
assert_that(calling(Matrix.from_sequence).with_args([]),
raises(ValueError, message))
def test_mul(self):
matrix_a = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
matrix_b = Matrix.from_sequence([[7, 8], [9, 10], [11, 12]])
matrix_c = matrix_a * matrix_b
assert_that(matrix_c.shape(), equal_to((2, 2)))
assert_that(list(matrix_c.rows()), equal_to([[58, 64], [139, 154]]))
message = "You cannot multiply a value of type %s with a matrix" % type(0)
assert_that(calling(matrix_a.__mul__).with_args(1234),
raises(TypeError, message))
def test_translation(self):
matrix_a = Matrix.from_sequence([[1, 1, 1, 1]])
matrix_b = Matrix.from_sequence(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, 2, 3, 1]])
matrix_c = matrix_a * matrix_b
assert_that(list(matrix_c.columns()), equal_to([[2], [3], [4], [1]]))
def test_scale(self):
matrix_a = Matrix.from_sequence([[1, 2, 3, 1]])
matrix_b = Matrix.from_sequence(
[[2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 2, 0], [0, 0, 0, 1]])
matrix_c = matrix_a * matrix_b
assert_that(list(matrix_c.columns()), equal_to([[2], [4], [6], [1]]))
def test_rotation_z(self):
vector = Vector(1, 1, 0).normalized()
matrix_a = Matrix.from_sequence([[vector.x, vector.y, vector.z]])
angle = to_radian(-90.0)
matrix_b = Matrix.from_sequence(
[[math.cos(angle), -math.sin(angle), 0.0],
[math.sin(angle), math.cos(angle), 0.0],
[0.0, 0.0, 1.0]])
matrix_final = matrix_a * matrix_b
assert_that(abs(matrix_final[0, 0] - -vector.x), less_than_or_equal_to(Options.PRECISION),
"x value was %g instead of %s" % (matrix_final[0, 0], -vector.x))
assert_that(abs(matrix_final[0, 1] - vector.y), less_than_or_equal_to(Options.PRECISION),
"y value was %g instead of %s" % (matrix_final[0, 1], vector.y))
assert_that(abs(matrix_final[0, 2] - vector.z), less_than_or_equal_to(Options.PRECISION),
"z value was %g instead of %s" % (matrix_final[0, 2], vector.z))
def test_rotation_y(self):
vector = Vector(1, 1, 0).normalized()
matrix_a = Matrix.from_sequence([[vector.x, vector.y, vector.z]])
angle = to_radian(90.0)
matrix_b = Matrix.from_sequence(
[[math.cos(angle), 0.0, math.sin(angle)],
[0.0, 1.0, 0.0],
[-math.sin(angle), 0.0, math.cos(angle)]])
matrix_final = matrix_a * matrix_b
assert_that(abs(matrix_final[0, 0] - vector.z), less_than_or_equal_to(Options.PRECISION),
"x value was %g instead of %s" % (matrix_final[0, 0], vector.z))
assert_that(abs(matrix_final[0, 1] - vector.y), less_than_or_equal_to(Options.PRECISION),
"y value was %g instead of %s" % (matrix_final[0, 1], vector.y))
assert_that(abs(matrix_final[0, 2] - vector.x), less_than_or_equal_to(Options.PRECISION),
"z value was %g instead of %s" % (matrix_final[0, 2], vector.x))
def test_rotation_x(self):
vector = Vector(1, 1, 0).normalized()
matrix_a = Matrix.from_sequence([[vector.x, vector.y, vector.z]])
angle = to_radian(-90.0)
matrix_b = Matrix.from_sequence(
[[1.0, 0.0, 0.0],
[0.0, math.cos(angle), -math.sin(angle)],
[0.0, math.sin(angle), math.cos(angle)]])
matrix_final = matrix_a * matrix_b
assert_that(abs(matrix_final[0, 0] - vector.x), less_than_or_equal_to(Options.PRECISION),
"x value was %g instead of %s" % (matrix_final[0, 0], vector.z))
assert_that(abs(matrix_final[0, 1] - vector.z), less_than_or_equal_to(Options.PRECISION),
"y value was %g instead of %s" % (matrix_final[0, 1], vector.z))
assert_that(abs(matrix_final[0, 2] - vector.y), less_than_or_equal_to(Options.PRECISION),
"z value was %g instead of %s" % (matrix_final[0, 2], vector.y))
def test_repr(self):
matrix = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
expected_string = "Matrix([[1, 2, 3], [4, 5, 6]])"
assert_that(str(matrix), equal_to(expected_string))
def test_matrix_mult_perf(benchmark):
matrix_a = Matrix.from_sequence([[1, 2, 3], [4, 5, 6]])
matrix_b = Matrix.from_sequence([[7, 8], [9, 10], [11, 12]])
benchmark(matrix_a.__mul__, matrix_b)
| true | true |
f7f4ec39fb9ddc0c3b058c539016113f296f07cf | 630 | py | Python | tests/test_getObjectRoutes.py | benjaminguinaudeau/TikTok-Api | 3ca25c05bc758e628ab5c35a4c4cf3d1810184a2 | [
"MIT"
] | null | null | null | tests/test_getObjectRoutes.py | benjaminguinaudeau/TikTok-Api | 3ca25c05bc758e628ab5c35a4c4cf3d1810184a2 | [
"MIT"
] | null | null | null | tests/test_getObjectRoutes.py | benjaminguinaudeau/TikTok-Api | 3ca25c05bc758e628ab5c35a4c4cf3d1810184a2 | [
"MIT"
] | null | null | null | from TikTokApi import TikTokApi
import os
api = TikTokApi.get_instance(custom_verifyFp=os.environ.get("verifyFp", None))
def test_tiktok_object():
assert len(api.getTikTokById("6829267836783971589")) > 0
assert (
len(
api.getTikTokByUrl(
"https://www.tiktok.com/@therock/video/6829267836783971589"
)
)
> 0
)
def test_user_object():
assert len(api.getUserObject("therock")) > 0
def test_music_object():
assert len(api.getMusicObject("6820695018429253633")) > 0
def test_hashtag_object():
assert len(api.getHashtagObject("funny")) > 0
| 21.724138 | 78 | 0.657143 | from TikTokApi import TikTokApi
import os
api = TikTokApi.get_instance(custom_verifyFp=os.environ.get("verifyFp", None))
def test_tiktok_object():
assert len(api.getTikTokById("6829267836783971589")) > 0
assert (
len(
api.getTikTokByUrl(
"https://www.tiktok.com/@therock/video/6829267836783971589"
)
)
> 0
)
def test_user_object():
assert len(api.getUserObject("therock")) > 0
def test_music_object():
assert len(api.getMusicObject("6820695018429253633")) > 0
def test_hashtag_object():
assert len(api.getHashtagObject("funny")) > 0
| true | true |
f7f4ec604a62b269cb5052c5cf795d02f6aa9f41 | 746 | py | Python | src/restful/rest/rest/urls.py | zgarciboogie/workout_tracker | ead429d9fac2e25eb8b49b1d1238133821b00cf1 | [
"MIT"
] | null | null | null | src/restful/rest/rest/urls.py | zgarciboogie/workout_tracker | ead429d9fac2e25eb8b49b1d1238133821b00cf1 | [
"MIT"
] | null | null | null | src/restful/rest/rest/urls.py | zgarciboogie/workout_tracker | ead429d9fac2e25eb8b49b1d1238133821b00cf1 | [
"MIT"
] | null | null | null | """rest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 33.909091 | 77 | 0.707775 | from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
f7f4ed5ad462b25a556d52ea00c4c87cbd1d1aff | 62 | py | Python | maiornum.py | ruancs/Python | 8987cd4a31af903782f67afd611e6b8893cc770b | [
"MIT"
] | 2 | 2020-02-04T05:04:59.000Z | 2020-02-22T07:55:16.000Z | maiornum.py | ruancs/Python | 8987cd4a31af903782f67afd611e6b8893cc770b | [
"MIT"
] | null | null | null | maiornum.py | ruancs/Python | 8987cd4a31af903782f67afd611e6b8893cc770b | [
"MIT"
] | null | null | null | def maximo (x, y):
if x > y:
return(x)
else:
return(y)
| 8.857143 | 18 | 0.532258 | def maximo (x, y):
if x > y:
return(x)
else:
return(y)
| true | true |
f7f4edae6acb4fc2b9d1e9a60f362ea6a9af3141 | 2,615 | py | Python | tensorflow_datasets/translate/wmt19.py | turgunyusuf/datasets | 50af6bfdf2dc96c7500883d0aaa83c2315aa87dc | [
"Apache-2.0"
] | 2 | 2019-05-10T16:22:31.000Z | 2019-09-16T11:07:46.000Z | tensorflow_datasets/translate/wmt19.py | turgunyusuf/datasets | 50af6bfdf2dc96c7500883d0aaa83c2315aa87dc | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/translate/wmt19.py | turgunyusuf/datasets | 50af6bfdf2dc96c7500883d0aaa83c2315aa87dc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WMT19: Translate dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.translate import wmt
_URL = "http://www.statmt.org/wmt19/translation-task.html"
# TODO(adarob): Update with citation of overview paper once it is published.
_CITATION = """
@ONLINE {wmt19translate,
author = "Wikimedia Foundation",
title = "ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News",
url = "http://www.statmt.org/wmt19/translation-task.html"
}
"""
_LANGUAGE_PAIRS = [
(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]
] + [("fr", "de")]
class Wmt19Translate(wmt.WmtTranslate):
"""WMT 19 translation datasets for {(xx, "en")} + ("fr", "de") pairs."""
BUILDER_CONFIGS = [
wmt.WmtConfig( # pylint:disable=g-complex-comprehension
description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version="0.0.2")
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
tfds.Split.TRAIN: [
"europarl_v9", "europarl_v7_frde", "paracrawl_v3",
"paracrawl_v1_ru", "paracrawl_v3_frde", "commoncrawl",
"commoncrawl_frde", "newscommentary_v14", "newscommentary_v14_frde",
"czeng_17", "yandexcorpus", "wikititles_v1", "uncorpus_v1",
"rapid_2016_ltfi", "rapid_2019"] + wmt.CWMT_SUBSET_NAMES,
tfds.Split.VALIDATION: [
"euelections_dev2019", "newsdev2014", "newsdev2015", "newsdev2016",
"newsdev2017", "newsdev2018", "newsdev2019", "newsdiscussdev2015",
"newsdiscusstest2015", "newssyscomb2009", "newstest2008",
"newstest2009", "newstest2010", "newstest2011", "newstest2012",
"newstest2013", "newstest2014", "newstest2015", "newstest2016",
"newstestB2016", "newstest2017", "newstestB2017", "newstest2018"]
}
| 39.621212 | 115 | 0.668834 |
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.translate import wmt
_URL = "http://www.statmt.org/wmt19/translation-task.html"
_CITATION = """
@ONLINE {wmt19translate,
author = "Wikimedia Foundation",
title = "ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News",
url = "http://www.statmt.org/wmt19/translation-task.html"
}
"""
_LANGUAGE_PAIRS = [
(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]
] + [("fr", "de")]
class Wmt19Translate(wmt.WmtTranslate):
BUILDER_CONFIGS = [
wmt.WmtConfig(
description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
url=_URL,
citation=_CITATION,
language_pair=(l1, l2),
version="0.0.2")
for l1, l2 in _LANGUAGE_PAIRS
]
@property
def _subsets(self):
return {
tfds.Split.TRAIN: [
"europarl_v9", "europarl_v7_frde", "paracrawl_v3",
"paracrawl_v1_ru", "paracrawl_v3_frde", "commoncrawl",
"commoncrawl_frde", "newscommentary_v14", "newscommentary_v14_frde",
"czeng_17", "yandexcorpus", "wikititles_v1", "uncorpus_v1",
"rapid_2016_ltfi", "rapid_2019"] + wmt.CWMT_SUBSET_NAMES,
tfds.Split.VALIDATION: [
"euelections_dev2019", "newsdev2014", "newsdev2015", "newsdev2016",
"newsdev2017", "newsdev2018", "newsdev2019", "newsdiscussdev2015",
"newsdiscusstest2015", "newssyscomb2009", "newstest2008",
"newstest2009", "newstest2010", "newstest2011", "newstest2012",
"newstest2013", "newstest2014", "newstest2015", "newstest2016",
"newstestB2016", "newstest2017", "newstestB2017", "newstest2018"]
}
| true | true |
f7f4edc650e7799840030f911471f1e3f2b19b0a | 5,011 | py | Python | venv/Lib/site-packages/PySide2/examples/widgets/graphicsview/anchorlayout.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 41 | 2021-06-19T13:57:18.000Z | 2021-12-02T17:08:53.000Z | venv/Lib/site-packages/PySide2/examples/widgets/graphicsview/anchorlayout.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | venvWIN/Lib/site-packages/PySide2/examples/widgets/graphicsview/anchorlayout.py | NeroNekro/PortableController | a8bbfc1b6c8cb2c919e48eb0104e42f436059b18 | [
"BSD-3-Clause"
] | 4 | 2021-07-02T03:09:51.000Z | 2021-11-25T13:00:10.000Z |
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
from PySide2 import QtCore, QtWidgets
def createItem(minimum, preferred, maximum, name):
w = QtWidgets.QGraphicsProxyWidget()
w.setWidget(QtWidgets.QPushButton(name))
w.setMinimumSize(minimum)
w.setPreferredSize(preferred)
w.setMaximumSize(maximum)
w.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
return w
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
scene = QtWidgets.QGraphicsScene()
scene.setSceneRect(0, 0, 800, 480)
minSize = QtCore.QSizeF(30, 100)
prefSize = QtCore.QSizeF(210, 100)
maxSize = QtCore.QSizeF(300, 100)
a = createItem(minSize, prefSize, maxSize, "A")
b = createItem(minSize, prefSize, maxSize, "B")
c = createItem(minSize, prefSize, maxSize, "C")
d = createItem(minSize, prefSize, maxSize, "D")
e = createItem(minSize, prefSize, maxSize, "E")
f = createItem(QtCore.QSizeF(30, 50), QtCore.QSizeF(150, 50), maxSize, "F")
g = createItem(QtCore.QSizeF(30, 50), QtCore.QSizeF(30, 100), maxSize, "G")
l = QtWidgets.QGraphicsAnchorLayout()
l.setSpacing(0)
w = QtWidgets.QGraphicsWidget(None, QtCore.Qt.Window)
w.setPos(20, 20)
w.setLayout(l)
# Vertical.
l.addAnchor(a, QtCore.Qt.AnchorTop, l, QtCore.Qt.AnchorTop)
l.addAnchor(b, QtCore.Qt.AnchorTop, l, QtCore.Qt.AnchorTop)
l.addAnchor(c, QtCore.Qt.AnchorTop, a, QtCore.Qt.AnchorBottom)
l.addAnchor(c, QtCore.Qt.AnchorTop, b, QtCore.Qt.AnchorBottom)
l.addAnchor(c, QtCore.Qt.AnchorBottom, d, QtCore.Qt.AnchorTop)
l.addAnchor(c, QtCore.Qt.AnchorBottom, e, QtCore.Qt.AnchorTop)
l.addAnchor(d, QtCore.Qt.AnchorBottom, l, QtCore.Qt.AnchorBottom)
l.addAnchor(e, QtCore.Qt.AnchorBottom, l, QtCore.Qt.AnchorBottom)
l.addAnchor(c, QtCore.Qt.AnchorTop, f, QtCore.Qt.AnchorTop)
l.addAnchor(c, QtCore.Qt.AnchorVerticalCenter, f, QtCore.Qt.AnchorBottom)
l.addAnchor(f, QtCore.Qt.AnchorBottom, g, QtCore.Qt.AnchorTop)
l.addAnchor(c, QtCore.Qt.AnchorBottom, g, QtCore.Qt.AnchorBottom)
# Horizontal.
l.addAnchor(l, QtCore.Qt.AnchorLeft, a, QtCore.Qt.AnchorLeft)
l.addAnchor(l, QtCore.Qt.AnchorLeft, d, QtCore.Qt.AnchorLeft)
l.addAnchor(a, QtCore.Qt.AnchorRight, b, QtCore.Qt.AnchorLeft)
l.addAnchor(a, QtCore.Qt.AnchorRight, c, QtCore.Qt.AnchorLeft)
l.addAnchor(c, QtCore.Qt.AnchorRight, e, QtCore.Qt.AnchorLeft)
l.addAnchor(b, QtCore.Qt.AnchorRight, l, QtCore.Qt.AnchorRight)
l.addAnchor(e, QtCore.Qt.AnchorRight, l, QtCore.Qt.AnchorRight)
l.addAnchor(d, QtCore.Qt.AnchorRight, e, QtCore.Qt.AnchorLeft)
l.addAnchor(l, QtCore.Qt.AnchorLeft, f, QtCore.Qt.AnchorLeft)
l.addAnchor(l, QtCore.Qt.AnchorLeft, g, QtCore.Qt.AnchorLeft)
l.addAnchor(f, QtCore.Qt.AnchorRight, g, QtCore.Qt.AnchorRight)
scene.addItem(w)
scene.setBackgroundBrush(QtCore.Qt.darkGreen)
view = QtWidgets.QGraphicsView(scene)
view.show()
sys.exit(app.exec_())
| 39.769841 | 85 | 0.700858 | true | true | |
f7f4ee2d824ef7d2ab32b897a286ee699744e5ec | 19,608 | py | Python | official/transformer/v2/transformer_main.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 5 | 2020-11-16T06:26:19.000Z | 2022-03-27T02:01:40.000Z | official/transformer/v2/transformer_main.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 5 | 2020-11-13T18:50:30.000Z | 2022-02-10T01:42:36.000Z | official/transformer/v2/transformer_main.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 3 | 2017-08-15T11:29:03.000Z | 2020-12-07T18:06:12.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train and evaluate the Transformer model.
See README for description of setting the training schedule and evaluating the
BLEU score.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: disable=g-bad-import-order
from official.transformer import compute_bleu
from official.transformer.utils import tokenizer
from official.transformer.v2 import data_pipeline
from official.transformer.v2 import metrics
from official.transformer.v2 import misc
from official.transformer.v2 import optimizer
from official.transformer.v2 import transformer
from official.transformer.v2 import translate
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import keras_utils
from official.utils.misc import distribution_utils
INF = int(1e9)
BLEU_DIR = "bleu"
_SINGLE_SAMPLE = 1
def translate_and_compute_bleu(model,
params,
subtokenizer,
bleu_source,
bleu_ref,
distribution_strategy=None):
"""Translate file and report the cased and uncased bleu scores.
Args:
model: A Keras model, used to generate the translations.
params: A dictionary, containing the translation related parameters.
subtokenizer: A subtokenizer object, used for encoding and decoding source
and translated lines.
bleu_source: A file containing source sentences for translation.
bleu_ref: A file containing the reference for the translated sentences.
distribution_strategy: A platform distribution strategy, used for TPU based
translation.
Returns:
uncased_score: A float, the case insensitive BLEU score.
cased_score: A float, the case sensitive BLEU score.
"""
# Create temporary file to store translation.
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp_filename = tmp.name
translate.translate_file(
model,
params,
subtokenizer,
bleu_source,
output_file=tmp_filename,
print_all_translations=False,
distribution_strategy=distribution_strategy)
# Compute uncased and cased bleu scores.
uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)
cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)
os.remove(tmp_filename)
return uncased_score, cased_score
def evaluate_and_log_bleu(model,
params,
bleu_source,
bleu_ref,
vocab_file,
distribution_strategy=None):
"""Calculate and record the BLEU score.
Args:
model: A Keras model, used to generate the translations.
params: A dictionary, containing the translation related parameters.
bleu_source: A file containing source sentences for translation.
bleu_ref: A file containing the reference for the translated sentences.
vocab_file: A file containing the vocabulary for translation.
distribution_strategy: A platform distribution strategy, used for TPU based
translation.
Returns:
uncased_score: A float, the case insensitive BLEU score.
cased_score: A float, the case sensitive BLEU score.
"""
subtokenizer = tokenizer.Subtokenizer(vocab_file)
uncased_score, cased_score = translate_and_compute_bleu(
model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy)
logging.info("Bleu score (uncased): %s", uncased_score)
logging.info("Bleu score (cased): %s", cased_score)
return uncased_score, cased_score
class TransformerTask(object):
"""Main entry of Transformer model."""
def __init__(self, flags_obj):
"""Init function of TransformerMain.
Args:
flags_obj: Object containing parsed flag values, i.e., FLAGS.
Raises:
ValueError: if not using static batch for input data on TPU.
"""
self.flags_obj = flags_obj
self.predict_model = None
# Add flag-defined parameters to params object
num_gpus = flags_core.get_num_gpus(flags_obj)
self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus)
params["num_gpus"] = num_gpus
params["use_ctl"] = flags_obj.use_ctl
params["data_dir"] = flags_obj.data_dir
params["model_dir"] = flags_obj.model_dir
params["static_batch"] = flags_obj.static_batch
params["max_length"] = flags_obj.max_length
params["decode_batch_size"] = flags_obj.decode_batch_size
params["decode_max_length"] = flags_obj.decode_max_length
params["padded_decode"] = flags_obj.padded_decode
params["num_parallel_calls"] = (
flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE)
params["use_synthetic_data"] = flags_obj.use_synthetic_data
params["batch_size"] = flags_obj.batch_size or params["default_batch_size"]
params["repeat_dataset"] = None
params["dtype"] = flags_core.get_tf_dtype(flags_obj)
params["enable_tensorboard"] = flags_obj.enable_tensorboard
params["enable_metrics_in_training"] = flags_obj.enable_metrics_in_training
params["steps_between_evals"] = flags_obj.steps_between_evals
self.distribution_strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs,
tpu_address=flags_obj.tpu or "")
if self.use_tpu:
params["num_replicas"] = self.distribution_strategy.num_replicas_in_sync
if not params["static_batch"]:
raise ValueError("TPU requires static batch for input data.")
else:
logging.info("Running transformer with num_gpus = %d", num_gpus)
if self.distribution_strategy:
logging.info("For training, using distribution strategy: %s",
self.distribution_strategy)
else:
logging.info("Not using any distribution strategy.")
if params["dtype"] == tf.float16:
# TODO(reedwm): It's pretty ugly to set the global policy in a constructor
# like this. What if multiple instances of TransformerTask are created?
# We should have a better way in the tf.keras.mixed_precision API of doing
# this.
loss_scale = flags_core.get_loss_scale(
flags_obj, default_for_fp16="dynamic")
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
"mixed_float16", loss_scale=loss_scale)
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
elif params["dtype"] == tf.bfloat16:
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
"mixed_bfloat16")
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
@property
def use_tpu(self):
if self.distribution_strategy:
return isinstance(self.distribution_strategy,
tf.distribute.experimental.TPUStrategy)
return False
def train(self):
"""Trains the model."""
params = self.params
flags_obj = self.flags_obj
# Sets config options.
keras_utils.set_session_config(enable_xla=flags_obj.enable_xla)
_ensure_dir(flags_obj.model_dir)
with distribution_utils.get_strategy_scope(self.distribution_strategy):
model = transformer.create_model(params, is_train=True)
opt = self._create_optimizer()
current_step = 0
checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)
latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir)
if latest_checkpoint:
checkpoint.restore(latest_checkpoint)
logging.info("Loaded checkpoint %s", latest_checkpoint)
current_step = opt.iterations.numpy()
if params["use_ctl"]:
train_loss_metric = tf.keras.metrics.Mean(
"training_loss", dtype=tf.float32)
if params["enable_tensorboard"]:
summary_writer = tf.compat.v2.summary.create_file_writer(
flags_obj.model_dir)
else:
summary_writer = tf.compat.v2.summary.create_noop_writer()
train_metrics = [train_loss_metric]
if params["enable_metrics_in_training"]:
train_metrics = train_metrics + model.metrics
else:
model.compile(opt)
model.summary()
if self.use_tpu:
# Different from experimental_distribute_dataset,
# experimental_distribute_datasets_from_function requires
# per-replica/local batch size.
params["batch_size"] /= self.distribution_strategy.num_replicas_in_sync
train_ds = (
self.distribution_strategy
.experimental_distribute_datasets_from_function(
lambda ctx: data_pipeline.train_input_fn(params, ctx)))
else:
train_ds = data_pipeline.train_input_fn(params)
map_data_fn = data_pipeline.map_data_for_transformer_fn
train_ds = train_ds.map(
map_data_fn, num_parallel_calls=params["num_parallel_calls"])
if params["use_ctl"]:
train_ds_iterator = iter(train_ds)
callbacks = self._create_callbacks(flags_obj.model_dir, 0, params)
# TODO(b/139418525): Refactor the custom training loop logic.
@tf.function
def train_steps(iterator, steps):
"""Training steps function for TPU runs.
Args:
iterator: The input iterator of the training dataset.
steps: An integer, the number of training steps.
Returns:
A float, the loss value.
"""
def _step_fn(inputs):
"""Per-replica step function."""
inputs, targets = inputs
with tf.GradientTape() as tape:
logits = model([inputs, targets], training=True)
loss = metrics.transformer_loss(logits, targets,
params["label_smoothing"],
params["vocab_size"])
# Scales the loss, which results in using the average loss across all
# of the replicas for backprop.
scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync
# De-dupes variables due to keras tracking issues.
tvars = list({id(v): v for v in model.trainable_variables}.values())
grads = tape.gradient(scaled_loss, tvars)
opt.apply_gradients(zip(grads, tvars))
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
for _ in tf.range(steps):
train_loss_metric.reset_states()
self.distribution_strategy.experimental_run_v2(
_step_fn, args=(next(iterator),))
cased_score, uncased_score = None, None
cased_score_history, uncased_score_history = [], []
while current_step < flags_obj.train_steps:
remaining_steps = flags_obj.train_steps - current_step
train_steps_per_eval = (
remaining_steps if remaining_steps < flags_obj.steps_between_evals
else flags_obj.steps_between_evals)
current_iteration = current_step // flags_obj.steps_between_evals
logging.info(
"Start train iteration at global step:{}".format(current_step))
history = None
if params["use_ctl"]:
if not self.use_tpu:
raise NotImplementedError(
"Custom training loop on GPUs is not implemented.")
# Runs training steps.
with summary_writer.as_default():
train_steps(
train_ds_iterator,
tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32))
current_step += train_steps_per_eval
train_loss = train_loss_metric.result().numpy().astype(float)
logging.info("Train Step: %d/%d / loss = %s", current_step,
flags_obj.train_steps, train_loss)
if params["enable_tensorboard"]:
for metric_obj in train_metrics:
tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(),
current_step)
checkpoint_name = checkpoint.save(
os.path.join(flags_obj.model_dir,
"ctl_step_{}.ckpt".format(current_step)))
logging.info("Saved checkpoint to %s", checkpoint_name)
else:
if self.use_tpu:
raise NotImplementedError(
"Keras model.fit on TPUs is not implemented.")
history = model.fit(
train_ds,
initial_epoch=current_iteration,
epochs=current_iteration + 1,
steps_per_epoch=train_steps_per_eval,
callbacks=callbacks,
# If TimeHistory is enabled, progress bar would be messy. Increase
# the verbose level to get rid of it.
verbose=(2 if flags_obj.enable_time_history else 1))
current_step += train_steps_per_eval
logging.info("Train history: {}".format(history.history))
logging.info("End train iteration at global step:{}".format(current_step))
if (flags_obj.bleu_source and flags_obj.bleu_ref):
uncased_score, cased_score = self.eval()
cased_score_history.append([current_iteration + 1, cased_score])
uncased_score_history.append([current_iteration + 1, uncased_score])
stats = ({
"loss": train_loss
} if history is None else misc.build_stats(history, callbacks))
if uncased_score and cased_score:
stats["bleu_uncased"] = uncased_score
stats["bleu_cased"] = cased_score
stats["bleu_uncased_history"] = uncased_score_history
stats["bleu_cased_history"] = cased_score_history
return stats
def eval(self):
"""Evaluates the model."""
distribution_strategy = self.distribution_strategy if self.use_tpu else None
# We only want to create the model under DS scope for TPU case.
# When 'distribution_strategy' is None, a no-op DummyContextManager will
# be used.
with distribution_utils.get_strategy_scope(distribution_strategy):
if not self.predict_model:
self.predict_model = transformer.create_model(self.params, False)
self._load_weights_if_possible(
self.predict_model,
tf.train.latest_checkpoint(self.flags_obj.model_dir))
self.predict_model.summary()
return evaluate_and_log_bleu(
self.predict_model, self.params, self.flags_obj.bleu_source,
self.flags_obj.bleu_ref, self.flags_obj.vocab_file,
distribution_strategy)
def predict(self):
"""Predicts result from the model."""
params = self.params
flags_obj = self.flags_obj
with tf.name_scope("model"):
model = transformer.create_model(params, is_train=False)
self._load_weights_if_possible(
model, tf.train.latest_checkpoint(self.flags_obj.model_dir))
model.summary()
subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file)
ds = data_pipeline.eval_input_fn(params)
ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE)
ret = model.predict(ds)
val_outputs, _ = ret
length = len(val_outputs)
for i in range(length):
translate.translate_from_input(val_outputs[i], subtokenizer)
def _create_callbacks(self, cur_log_dir, init_steps, params):
"""Creates a list of callbacks."""
sfunc = optimizer.LearningRateFn(params["learning_rate"],
params["hidden_size"],
params["learning_rate_warmup_steps"])
scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps)
callbacks = misc.get_callbacks(params["steps_between_evals"])
callbacks.append(scheduler_callback)
ckpt_full_path = os.path.join(cur_log_dir, "cp-{epoch:04d}.ckpt")
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
ckpt_full_path, save_weights_only=True))
return callbacks
def _load_weights_if_possible(self, model, init_weight_path=None):
"""Loads model weights when it is provided."""
if init_weight_path:
logging.info("Load weights: {}".format(init_weight_path))
# TODO(b/139414977): Having the same variable restoring method for both
# TPU and GPU.
if self.use_tpu:
checkpoint = tf.train.Checkpoint(
model=model, optimizer=self._create_optimizer())
checkpoint.restore(init_weight_path)
else:
model.load_weights(init_weight_path)
else:
logging.info("Weights not loaded from path:{}".format(init_weight_path))
def _create_optimizer(self):
"""Creates optimizer."""
params = self.params
# TODO(b/139414679): Explore the difference between using
# LearningRateSchedule and callback for GPU runs, and try to merge them.
lr_schedule = optimizer.LearningRateSchedule(
params["learning_rate"], params["hidden_size"],
params["learning_rate_warmup_steps"])
opt = tf.keras.optimizers.Adam(
lr_schedule if self.use_tpu else params["learning_rate"],
params["optimizer_adam_beta1"],
params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"])
if params["dtype"] == tf.float16:
opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
opt,
loss_scale=flags_core.get_loss_scale(
self.flags_obj, default_for_fp16="dynamic"))
if self.flags_obj.fp16_implementation == "graph_rewrite":
# Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
# determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# which will ensure tf.compat.v2.keras.mixed_precision and
# tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
# up.
opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt)
return opt
def _ensure_dir(log_dir):
"""Makes log dir if not existed."""
if not tf.io.gfile.exists(log_dir):
tf.io.gfile.makedirs(log_dir)
def main(_):
flags_obj = flags.FLAGS
with logger.benchmark_context(flags_obj):
task = TransformerTask(flags_obj)
# Execute flag override logic for better model performance
if flags_obj.tf_gpu_thread_mode:
keras_utils.set_gpu_thread_mode_and_count(
per_gpu_thread_count=flags_obj.per_gpu_thread_count,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
num_gpus=flags_obj.num_gpus,
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
if flags_obj.mode == "train":
task.train()
elif flags_obj.mode == "predict":
task.predict()
elif flags_obj.mode == "eval":
task.eval()
else:
raise ValueError("Invalid mode {}".format(flags_obj.mode))
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
logging.set_verbosity(logging.INFO)
misc.define_transformer_flags()
app.run(main)
| 39.373494 | 80 | 0.693135 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.transformer import compute_bleu
from official.transformer.utils import tokenizer
from official.transformer.v2 import data_pipeline
from official.transformer.v2 import metrics
from official.transformer.v2 import misc
from official.transformer.v2 import optimizer
from official.transformer.v2 import transformer
from official.transformer.v2 import translate
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import keras_utils
from official.utils.misc import distribution_utils
INF = int(1e9)
BLEU_DIR = "bleu"
_SINGLE_SAMPLE = 1
def translate_and_compute_bleu(model,
params,
subtokenizer,
bleu_source,
bleu_ref,
distribution_strategy=None):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp_filename = tmp.name
translate.translate_file(
model,
params,
subtokenizer,
bleu_source,
output_file=tmp_filename,
print_all_translations=False,
distribution_strategy=distribution_strategy)
uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)
cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)
os.remove(tmp_filename)
return uncased_score, cased_score
def evaluate_and_log_bleu(model,
params,
bleu_source,
bleu_ref,
vocab_file,
distribution_strategy=None):
subtokenizer = tokenizer.Subtokenizer(vocab_file)
uncased_score, cased_score = translate_and_compute_bleu(
model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy)
logging.info("Bleu score (uncased): %s", uncased_score)
logging.info("Bleu score (cased): %s", cased_score)
return uncased_score, cased_score
class TransformerTask(object):
def __init__(self, flags_obj):
self.flags_obj = flags_obj
self.predict_model = None
num_gpus = flags_core.get_num_gpus(flags_obj)
self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus)
params["num_gpus"] = num_gpus
params["use_ctl"] = flags_obj.use_ctl
params["data_dir"] = flags_obj.data_dir
params["model_dir"] = flags_obj.model_dir
params["static_batch"] = flags_obj.static_batch
params["max_length"] = flags_obj.max_length
params["decode_batch_size"] = flags_obj.decode_batch_size
params["decode_max_length"] = flags_obj.decode_max_length
params["padded_decode"] = flags_obj.padded_decode
params["num_parallel_calls"] = (
flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE)
params["use_synthetic_data"] = flags_obj.use_synthetic_data
params["batch_size"] = flags_obj.batch_size or params["default_batch_size"]
params["repeat_dataset"] = None
params["dtype"] = flags_core.get_tf_dtype(flags_obj)
params["enable_tensorboard"] = flags_obj.enable_tensorboard
params["enable_metrics_in_training"] = flags_obj.enable_metrics_in_training
params["steps_between_evals"] = flags_obj.steps_between_evals
self.distribution_strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs,
tpu_address=flags_obj.tpu or "")
if self.use_tpu:
params["num_replicas"] = self.distribution_strategy.num_replicas_in_sync
if not params["static_batch"]:
raise ValueError("TPU requires static batch for input data.")
else:
logging.info("Running transformer with num_gpus = %d", num_gpus)
if self.distribution_strategy:
logging.info("For training, using distribution strategy: %s",
self.distribution_strategy)
else:
logging.info("Not using any distribution strategy.")
if params["dtype"] == tf.float16:
# like this. What if multiple instances of TransformerTask are created?
# We should have a better way in the tf.keras.mixed_precision API of doing
# this.
loss_scale = flags_core.get_loss_scale(
flags_obj, default_for_fp16="dynamic")
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
"mixed_float16", loss_scale=loss_scale)
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
elif params["dtype"] == tf.bfloat16:
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
"mixed_bfloat16")
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
@property
def use_tpu(self):
if self.distribution_strategy:
return isinstance(self.distribution_strategy,
tf.distribute.experimental.TPUStrategy)
return False
def train(self):
params = self.params
flags_obj = self.flags_obj
# Sets config options.
keras_utils.set_session_config(enable_xla=flags_obj.enable_xla)
_ensure_dir(flags_obj.model_dir)
with distribution_utils.get_strategy_scope(self.distribution_strategy):
model = transformer.create_model(params, is_train=True)
opt = self._create_optimizer()
current_step = 0
checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)
latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir)
if latest_checkpoint:
checkpoint.restore(latest_checkpoint)
logging.info("Loaded checkpoint %s", latest_checkpoint)
current_step = opt.iterations.numpy()
if params["use_ctl"]:
train_loss_metric = tf.keras.metrics.Mean(
"training_loss", dtype=tf.float32)
if params["enable_tensorboard"]:
summary_writer = tf.compat.v2.summary.create_file_writer(
flags_obj.model_dir)
else:
summary_writer = tf.compat.v2.summary.create_noop_writer()
train_metrics = [train_loss_metric]
if params["enable_metrics_in_training"]:
train_metrics = train_metrics + model.metrics
else:
model.compile(opt)
model.summary()
if self.use_tpu:
# Different from experimental_distribute_dataset,
# experimental_distribute_datasets_from_function requires
# per-replica/local batch size.
params["batch_size"] /= self.distribution_strategy.num_replicas_in_sync
train_ds = (
self.distribution_strategy
.experimental_distribute_datasets_from_function(
lambda ctx: data_pipeline.train_input_fn(params, ctx)))
else:
train_ds = data_pipeline.train_input_fn(params)
map_data_fn = data_pipeline.map_data_for_transformer_fn
train_ds = train_ds.map(
map_data_fn, num_parallel_calls=params["num_parallel_calls"])
if params["use_ctl"]:
train_ds_iterator = iter(train_ds)
callbacks = self._create_callbacks(flags_obj.model_dir, 0, params)
# TODO(b/139418525): Refactor the custom training loop logic.
@tf.function
def train_steps(iterator, steps):
def _step_fn(inputs):
inputs, targets = inputs
with tf.GradientTape() as tape:
logits = model([inputs, targets], training=True)
loss = metrics.transformer_loss(logits, targets,
params["label_smoothing"],
params["vocab_size"])
# Scales the loss, which results in using the average loss across all
# of the replicas for backprop.
scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync
# De-dupes variables due to keras tracking issues.
tvars = list({id(v): v for v in model.trainable_variables}.values())
grads = tape.gradient(scaled_loss, tvars)
opt.apply_gradients(zip(grads, tvars))
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
for _ in tf.range(steps):
train_loss_metric.reset_states()
self.distribution_strategy.experimental_run_v2(
_step_fn, args=(next(iterator),))
cased_score, uncased_score = None, None
cased_score_history, uncased_score_history = [], []
while current_step < flags_obj.train_steps:
remaining_steps = flags_obj.train_steps - current_step
train_steps_per_eval = (
remaining_steps if remaining_steps < flags_obj.steps_between_evals
else flags_obj.steps_between_evals)
current_iteration = current_step // flags_obj.steps_between_evals
logging.info(
"Start train iteration at global step:{}".format(current_step))
history = None
if params["use_ctl"]:
if not self.use_tpu:
raise NotImplementedError(
"Custom training loop on GPUs is not implemented.")
# Runs training steps.
with summary_writer.as_default():
train_steps(
train_ds_iterator,
tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32))
current_step += train_steps_per_eval
train_loss = train_loss_metric.result().numpy().astype(float)
logging.info("Train Step: %d/%d / loss = %s", current_step,
flags_obj.train_steps, train_loss)
if params["enable_tensorboard"]:
for metric_obj in train_metrics:
tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(),
current_step)
checkpoint_name = checkpoint.save(
os.path.join(flags_obj.model_dir,
"ctl_step_{}.ckpt".format(current_step)))
logging.info("Saved checkpoint to %s", checkpoint_name)
else:
if self.use_tpu:
raise NotImplementedError(
"Keras model.fit on TPUs is not implemented.")
history = model.fit(
train_ds,
initial_epoch=current_iteration,
epochs=current_iteration + 1,
steps_per_epoch=train_steps_per_eval,
callbacks=callbacks,
# If TimeHistory is enabled, progress bar would be messy. Increase
# the verbose level to get rid of it.
verbose=(2 if flags_obj.enable_time_history else 1))
current_step += train_steps_per_eval
logging.info("Train history: {}".format(history.history))
logging.info("End train iteration at global step:{}".format(current_step))
if (flags_obj.bleu_source and flags_obj.bleu_ref):
uncased_score, cased_score = self.eval()
cased_score_history.append([current_iteration + 1, cased_score])
uncased_score_history.append([current_iteration + 1, uncased_score])
stats = ({
"loss": train_loss
} if history is None else misc.build_stats(history, callbacks))
if uncased_score and cased_score:
stats["bleu_uncased"] = uncased_score
stats["bleu_cased"] = cased_score
stats["bleu_uncased_history"] = uncased_score_history
stats["bleu_cased_history"] = cased_score_history
return stats
def eval(self):
distribution_strategy = self.distribution_strategy if self.use_tpu else None
# We only want to create the model under DS scope for TPU case.
# When 'distribution_strategy' is None, a no-op DummyContextManager will
# be used.
with distribution_utils.get_strategy_scope(distribution_strategy):
if not self.predict_model:
self.predict_model = transformer.create_model(self.params, False)
self._load_weights_if_possible(
self.predict_model,
tf.train.latest_checkpoint(self.flags_obj.model_dir))
self.predict_model.summary()
return evaluate_and_log_bleu(
self.predict_model, self.params, self.flags_obj.bleu_source,
self.flags_obj.bleu_ref, self.flags_obj.vocab_file,
distribution_strategy)
def predict(self):
params = self.params
flags_obj = self.flags_obj
with tf.name_scope("model"):
model = transformer.create_model(params, is_train=False)
self._load_weights_if_possible(
model, tf.train.latest_checkpoint(self.flags_obj.model_dir))
model.summary()
subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file)
ds = data_pipeline.eval_input_fn(params)
ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE)
ret = model.predict(ds)
val_outputs, _ = ret
length = len(val_outputs)
for i in range(length):
translate.translate_from_input(val_outputs[i], subtokenizer)
def _create_callbacks(self, cur_log_dir, init_steps, params):
sfunc = optimizer.LearningRateFn(params["learning_rate"],
params["hidden_size"],
params["learning_rate_warmup_steps"])
scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps)
callbacks = misc.get_callbacks(params["steps_between_evals"])
callbacks.append(scheduler_callback)
ckpt_full_path = os.path.join(cur_log_dir, "cp-{epoch:04d}.ckpt")
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
ckpt_full_path, save_weights_only=True))
return callbacks
def _load_weights_if_possible(self, model, init_weight_path=None):
if init_weight_path:
logging.info("Load weights: {}".format(init_weight_path))
# TODO(b/139414977): Having the same variable restoring method for both
# TPU and GPU.
if self.use_tpu:
checkpoint = tf.train.Checkpoint(
model=model, optimizer=self._create_optimizer())
checkpoint.restore(init_weight_path)
else:
model.load_weights(init_weight_path)
else:
logging.info("Weights not loaded from path:{}".format(init_weight_path))
def _create_optimizer(self):
params = self.params
# TODO(b/139414679): Explore the difference between using
# LearningRateSchedule and callback for GPU runs, and try to merge them.
lr_schedule = optimizer.LearningRateSchedule(
params["learning_rate"], params["hidden_size"],
params["learning_rate_warmup_steps"])
opt = tf.keras.optimizers.Adam(
lr_schedule if self.use_tpu else params["learning_rate"],
params["optimizer_adam_beta1"],
params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"])
if params["dtype"] == tf.float16:
opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
opt,
loss_scale=flags_core.get_loss_scale(
self.flags_obj, default_for_fp16="dynamic"))
if self.flags_obj.fp16_implementation == "graph_rewrite":
# Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
# determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# which will ensure tf.compat.v2.keras.mixed_precision and
# tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
# up.
opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt)
return opt
def _ensure_dir(log_dir):
if not tf.io.gfile.exists(log_dir):
tf.io.gfile.makedirs(log_dir)
def main(_):
flags_obj = flags.FLAGS
with logger.benchmark_context(flags_obj):
task = TransformerTask(flags_obj)
# Execute flag override logic for better model performance
if flags_obj.tf_gpu_thread_mode:
keras_utils.set_gpu_thread_mode_and_count(
per_gpu_thread_count=flags_obj.per_gpu_thread_count,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
num_gpus=flags_obj.num_gpus,
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
if flags_obj.mode == "train":
task.train()
elif flags_obj.mode == "predict":
task.predict()
elif flags_obj.mode == "eval":
task.eval()
else:
raise ValueError("Invalid mode {}".format(flags_obj.mode))
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
logging.set_verbosity(logging.INFO)
misc.define_transformer_flags()
app.run(main)
| true | true |
f7f4ee40dfa5458e0e2073e838b3dde810443002 | 3,590 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/hsa/tests/hsadrv/test_hlc.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 8 | 2019-10-07T16:33:47.000Z | 2020-12-07T03:59:58.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/hsa/tests/hsadrv/test_hlc.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1 | 2018-04-03T22:37:40.000Z | 2018-04-03T23:53:43.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/hsa/tests/hsadrv/test_hlc.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 5 | 2020-08-27T20:44:18.000Z | 2021-08-21T22:54:11.000Z | from __future__ import print_function, absolute_import
import numba.unittest_support as unittest
from numba.hsa.hlc import hlc
SPIR_SAMPLE = """
; ModuleID = 'kernel.out.bc'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-n32"
target triple = "hsail64-pc-unknown-amdopencl"
define spir_kernel void @copy(float addrspace(1)* nocapture %input,
float addrspace(1)* nocapture %output) {
%1 = load float addrspace(1)* %input, align 4, !tbaa !8
store float %1, float addrspace(1)* %output, align 4, !tbaa !8
ret void
}
!opencl.kernels = !{!0}
!opencl.enable.FP_CONTRACT = !{}
!opencl.spir.version = !{!6}
!opencl.ocl.version = !{!6}
!opencl.used.extensions = !{!7}
!opencl.used.optional.core.features = !{!7}
!opencl.compiler.options = !{!7}
!0 = metadata !{void (float addrspace(1)*, float addrspace(1)*)* @copy, metadata !1, metadata !2, metadata !3, metadata !4, metadata !5}
!1 = metadata !{metadata !"kernel_arg_addr_space", i32 1, i32 1}
!2 = metadata !{metadata !"kernel_arg_access_qual", metadata !"none", metadata !"none"}
!3 = metadata !{metadata !"kernel_arg_type", metadata !"float*", metadata !"float*"}
!4 = metadata !{metadata !"kernel_arg_type_qual", metadata !"", metadata !""}
!5 = metadata !{metadata !"kernel_arg_base_type", metadata !"float*", metadata !"float*"}
!6 = metadata !{i32 1, i32 2}
!7 = metadata !{}
!8 = metadata !{metadata !"float", metadata !9}
!9 = metadata !{metadata !"omnipotent char", metadata !10}
!10 = metadata !{metadata !"Simple C/C++ TBAA"}
"""
class TestHLC(unittest.TestCase):
def test_hsail(self):
hlcmod = hlc.Module()
hlcmod.load_llvm(SPIR_SAMPLE)
hsail = hlcmod.finalize().hsail
self.assertIn("prog kernel ©", hsail)
def test_brig(self):
# Genreate BRIG
hlcmod = hlc.Module()
hlcmod.load_llvm(SPIR_SAMPLE)
brig = hlcmod.finalize().brig
# Check the first 8 bytes for the magic string
self.assertEqual(brig[:8].decode('latin1'), 'HSA BRIG')
# Compile
from numba.hsa.hsadrv.driver import BrigModule, Program, hsa, Executable
agent = hsa.components[0]
brigmod = BrigModule(brig)
prog = Program()
prog.add_module(brigmod)
code = prog.finalize(agent.isa)
ex = Executable()
ex.load(agent, code)
ex.freeze()
sym = ex.get_symbol(agent, "©")
self.assertNotEqual(sym.kernel_object, 0)
self.assertGreater(sym.kernarg_segment_size, 0)
# Execute
import ctypes
import numpy as np
sig = hsa.create_signal(1)
kernarg_region = [r for r in agent.regions if r.supports_kernargs][0]
kernarg_types = (ctypes.c_void_p * 2)
kernargs = kernarg_region.allocate(kernarg_types)
src = np.random.random(1).astype(np.float32)
dst = np.zeros_like(src)
kernargs[0] = src.ctypes.data
kernargs[1] = dst.ctypes.data
hsa.hsa_memory_register(src.ctypes.data, src.nbytes)
hsa.hsa_memory_register(dst.ctypes.data, dst.nbytes)
hsa.hsa_memory_register(ctypes.byref(kernargs),
ctypes.sizeof(kernargs))
queue = agent.create_queue_single(32)
queue.dispatch(sym, kernargs, workgroup_size=(1,),
grid_size=(1,))
np.testing.assert_equal(dst, src)
if __name__ == '__main__':
unittest.main()
| 35.544554 | 232 | 0.648468 | from __future__ import print_function, absolute_import
import numba.unittest_support as unittest
from numba.hsa.hlc import hlc
SPIR_SAMPLE = """
; ModuleID = 'kernel.out.bc'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-n32"
target triple = "hsail64-pc-unknown-amdopencl"
define spir_kernel void @copy(float addrspace(1)* nocapture %input,
float addrspace(1)* nocapture %output) {
%1 = load float addrspace(1)* %input, align 4, !tbaa !8
store float %1, float addrspace(1)* %output, align 4, !tbaa !8
ret void
}
!opencl.kernels = !{!0}
!opencl.enable.FP_CONTRACT = !{}
!opencl.spir.version = !{!6}
!opencl.ocl.version = !{!6}
!opencl.used.extensions = !{!7}
!opencl.used.optional.core.features = !{!7}
!opencl.compiler.options = !{!7}
!0 = metadata !{void (float addrspace(1)*, float addrspace(1)*)* @copy, metadata !1, metadata !2, metadata !3, metadata !4, metadata !5}
!1 = metadata !{metadata !"kernel_arg_addr_space", i32 1, i32 1}
!2 = metadata !{metadata !"kernel_arg_access_qual", metadata !"none", metadata !"none"}
!3 = metadata !{metadata !"kernel_arg_type", metadata !"float*", metadata !"float*"}
!4 = metadata !{metadata !"kernel_arg_type_qual", metadata !"", metadata !""}
!5 = metadata !{metadata !"kernel_arg_base_type", metadata !"float*", metadata !"float*"}
!6 = metadata !{i32 1, i32 2}
!7 = metadata !{}
!8 = metadata !{metadata !"float", metadata !9}
!9 = metadata !{metadata !"omnipotent char", metadata !10}
!10 = metadata !{metadata !"Simple C/C++ TBAA"}
"""
class TestHLC(unittest.TestCase):
def test_hsail(self):
hlcmod = hlc.Module()
hlcmod.load_llvm(SPIR_SAMPLE)
hsail = hlcmod.finalize().hsail
self.assertIn("prog kernel ©", hsail)
def test_brig(self):
hlcmod = hlc.Module()
hlcmod.load_llvm(SPIR_SAMPLE)
brig = hlcmod.finalize().brig
self.assertEqual(brig[:8].decode('latin1'), 'HSA BRIG')
from numba.hsa.hsadrv.driver import BrigModule, Program, hsa, Executable
agent = hsa.components[0]
brigmod = BrigModule(brig)
prog = Program()
prog.add_module(brigmod)
code = prog.finalize(agent.isa)
ex = Executable()
ex.load(agent, code)
ex.freeze()
sym = ex.get_symbol(agent, "©")
self.assertNotEqual(sym.kernel_object, 0)
self.assertGreater(sym.kernarg_segment_size, 0)
import ctypes
import numpy as np
sig = hsa.create_signal(1)
kernarg_region = [r for r in agent.regions if r.supports_kernargs][0]
kernarg_types = (ctypes.c_void_p * 2)
kernargs = kernarg_region.allocate(kernarg_types)
src = np.random.random(1).astype(np.float32)
dst = np.zeros_like(src)
kernargs[0] = src.ctypes.data
kernargs[1] = dst.ctypes.data
hsa.hsa_memory_register(src.ctypes.data, src.nbytes)
hsa.hsa_memory_register(dst.ctypes.data, dst.nbytes)
hsa.hsa_memory_register(ctypes.byref(kernargs),
ctypes.sizeof(kernargs))
queue = agent.create_queue_single(32)
queue.dispatch(sym, kernargs, workgroup_size=(1,),
grid_size=(1,))
np.testing.assert_equal(dst, src)
if __name__ == '__main__':
unittest.main()
| true | true |
f7f4ef57a41c9f8beb69ab2a810e66283279e842 | 173 | py | Python | answers/Nitish1702/Day3/Answer1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/Nitish1702/Day3/Answer1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/Nitish1702/Day3/Answer1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | n=int(input())
def series(n):
a=''
l=[]
for i in range (1,n+1):
a+=str(i)
l.append(int(a))
print("Sum of the series is: ", sum(l))
series(n)
| 17.3 | 43 | 0.479769 | n=int(input())
def series(n):
a=''
l=[]
for i in range (1,n+1):
a+=str(i)
l.append(int(a))
print("Sum of the series is: ", sum(l))
series(n)
| true | true |
f7f4efa733a03d05d92269a584f852b6b755e644 | 48,174 | py | Python | app/routes.py | deepmatters/impactflowjp | 0df100aaca42204434022bf5a18ca391d2b6f554 | [
"MIT"
] | null | null | null | app/routes.py | deepmatters/impactflowjp | 0df100aaca42204434022bf5a18ca391d2b6f554 | [
"MIT"
] | null | null | null | app/routes.py | deepmatters/impactflowjp | 0df100aaca42204434022bf5a18ca391d2b6f554 | [
"MIT"
] | null | null | null | from flask import render_template, redirect, url_for, flash, request, json, jsonify
from flask_login import current_user, login_user, logout_user, login_required
from flask_mail import Message
from app import app, db, mail
from app.models import User, Project, Stakeholder, Activity, Output, Outcome
from app.forms import SignupForm, LoginForm, ForgetForm, PasswordChangeForm, PasswordResetForm, ProjectForm, StakeholderForm, ActivityForm, OutputForm, OutcomeForm
import pymongo
import random
from datetime import datetime
from threading import Thread
import boto3
import pytz
import os
import ast
from sqlalchemy.orm.attributes import flag_modified
# Define a function to check file extension for file upload
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
# Time conversion from UTC, to be used as filter in Jinja2
# E.g. {{ user.lastlogin_dt|datetimefilter }}
def datetimefilter(value, format="%d-%m-%Y %H:%M:%S"):
tz = pytz.timezone('Asia/Tokyo') # timezone you want to convert to from UTC
utc = pytz.timezone('UTC')
value = utc.localize(value, is_dst=None).astimezone(pytz.utc)
local_dt = value.astimezone(tz)
return local_dt.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
# Defind img convert function to eval string saved in db as list
# In Jinja2, when testing if having img, USE {% if db_object.img_url|length > 2 %}
def img_convert(img_url):
if len(img_url) > 2: # 2 mean empty bracket []. This happens when img is deleted.
img_list = ast.literal_eval(img_url)
img = img_list[0] # Select only the 1st img
else: # If img_url is an empty bracket (len = 2), or None
img = None
return img
app.jinja_env.filters['img_convert'] = img_convert
"""
Error handling
Don't forget to output header page variable for each content type
"""
@app.errorhandler(404)
def page_not_found(e):
if request.path.split('/')[1] == 'project':
return render_template('404.html', project=[]), 404
else:
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
if request.path.split('/')[1] == 'project':
print('This is 500 error from /project')
return render_template('500.html', project=[]), 500
else:
print('This is 500 error')
return render_template('500.html'), 500
@app.route('/')
def home():
if current_user.is_authenticated:
projects = Project.query.filter(Project.user_id == current_user.id).order_by(Project.create_dt.desc()).all()
projects_published = Project.query.filter(Project.published == True).order_by(Project.create_dt.desc()).all()
projects_unpublished = Project.query.filter(Project.published == False).order_by(Project.create_dt.desc()).all()
return render_template('home.html', projects=projects, projects_published=projects_published, projects_unpublished=projects_unpublished)
else:
projects_published = Project.query.filter(Project.published == True).order_by(Project.create_dt.desc()).all()
return render_template('home.html', projects_published=projects_published)
@app.route('/project-all')
def project_all():
projects_published = Project.query.filter(Project.published == True).order_by(Project.create_dt.desc()).all()
return render_template('project-all.html', projects_published=projects_published)
@app.route('/about')
def about():
return render_template('about.html')
"""
Login and user sub-system
"""
@app.route('/signup', methods=('GET', 'POST'))
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = SignupForm()
if form.validate_on_submit():
# Get data from form
name = form.name.data
email = form.email.data
password = form.password.data
password_check = form.password_check.data
# Check if email already exist
email_exist = User.query.filter_by(email=email).first()
if email_exist:
comment = f"{email} このメールアドレスはすでに登録されています。"
return render_template('signup-error.html', comment=comment)
# Check if passwords match
if password == password_check:
password_final = password
else:
comment = "2つのパスワードが一致する必要があります。"
return render_template('signup-error.html', comment=comment)
# Create user with name, email, password
new_user = User(name=name, email=email)
new_user.set_password(password_final)
db.session.add(new_user)
db.session.commit()
# Give confirmation, login, and redirect to profile page
user = User.query.filter_by(email=form.email.data).first()
login_user(user)
flash("登録が完了し、すでにログインしています")
return redirect('/profile')
return render_template('signup.html', form=form)
# Function to send mail using thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
@app.route('/forget', methods=('GET', 'POST'))
def forget():
form = ForgetForm()
if form.validate_on_submit():
# Get data from form
email = form.email.data
# Check if entered email is an existing user or not
user = User.query.filter_by(email=email).first()
if user is None:
# Return comment and error type
comment = "電子メールが見つかりません。"
error_type = "wrong_email"
return render_template('forget-result.html', comment=comment, error_type=error_type)
# If email exists, proceed to password recovery process
else:
# Generate password_reset_id
rand_universe = [1,2,3,4,5,6,7,8,9,"a","b","c","d","e","f","g","A","B","C","D","E","F","G"]
rand_str = ""
rand_list = random.sample(rand_universe, k=12)
password_reset_id = rand_str.join([str(i) for i in rand_list])
# Insert password_reset_id in db for this user
user.password_reset_id = password_reset_id
db.session.commit()
# Send an email to user
"""
!!! MUST CUSTOMISE MESSAGE BODY IN IMPLEMENTATION !!!
"""
msg = Message(subject='[jp.ImpactFlow.org] パスワードの再設定',
sender = 'support@cfapp.org',
recipients = [email]) # <<< CONFIGURE WEBSITE URL
msg.body = ("https://jp.impactflow.org/password-reset/" + password_reset_id + " 上記リンクより、パスワードの再設定をお願いします。") # <<< CONFIGURE EMAIL MESSAGE AND URL
Thread(target=send_async_email, args=(app, msg)).start() # Send mail asynchronously
# Return comment
comment = "パスワード再設定のご案内をメールにてお送りしました。"
return render_template('forget-result.html', comment=comment)
return render_template('forget.html', form=form)
# Password recovery API endpoint
@app.route('/password-reset/<string:password_reset_id>')
def password_reset(password_reset_id):
# Check if password_reset_id is valid or not
user = User.query.filter_by(password_reset_id=password_reset_id).first()
if user is None:
flash("パスワード再設定用リンクが無効、または使用されています。")
return redirect('/')
# If password_reset_id is valid, proceed to reset password
else:
form = PasswordResetForm()
return render_template('password-reset.html', password_reset_id=password_reset_id, form=form)
@app.route('/password-reset-result', methods=('GET', 'POST'))
def password_reset_result():
form = PasswordResetForm()
if form.validate_on_submit():
# Get data from form
password_reset_id = form.password_reset_id.data
password_new = form.password_new.data
password_new_check = form.password_new_check.data
# Get the user who belong to this password_reset_id
user = User.query.filter_by(password_reset_id=password_reset_id).first()
# Check if new passwords match each other
if password_new != password_new_check:
# Return comment and error type
comment = "2つのパスワードが一致する必要があります。"
error_type = "unmatched_password_check_reset"
return render_template('password-change-result.html', comment=comment, error_type=error_type, password_reset_id=password_reset_id)
# Proceed if passwords check passed
else:
# Generate new password hash
user.set_password(password_new)
# Update password_reset_id with blank string so the id can be used only this time only
# and can't be used in API
user.password_reset_id = ""
db.session.commit()
# Login user instantly
login_user(user)
flash("正常にログインしました")
# Return comment
comment = "次回から新しいパスワードでログインしてください。"
return render_template('password-change-result.html', comment=comment)
return render_template('password-change-result.html')
@app.route('/login', methods=('GET', 'POST'))
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
return render_template('fail.html')
login_user(user)
# Update lastlogin_dt to the current time
user.lastlogin_dt = datetime.now()
db.session.commit()
flash("正常にログインしました")
return redirect('/')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("正常にログアウトしました")
return redirect(url_for('home'))
@app.route('/password-change', methods=('GET', 'POST'))
@login_required
def password_change():
form = PasswordChangeForm()
if form.validate_on_submit():
# Get data from form
pass_current = form.password_current.data
pass_new = form.password_new.data
pass_new_check = form.password_new_check.data
# Connect to db
user = User.query.filter_by(id=current_user.id).first()
# Check if current pass matches pass in db
if not user.check_password(pass_current):
# Return comment and error type
comment = "パスワードが正しくありません。"
error_type = "wrong_pass_current"
return render_template('password-change-result.html', comment=comment, error_type=error_type)
# Check if new passwords match each other
elif pass_new != pass_new_check:
# Return comment and error type
comment = "2つのパスワードが一致する必要があります。"
error_type = "unmatched_password_check"
return render_template('password-change-result.html', comment=comment, error_type=error_type)
# Proceed if 2 above checks passed
else:
# Generate new password hash
user.set_password(pass_new)
db.session.commit()
# Return comment
comment = "次回から新しいパスワードでログインしてください。"
return render_template('password-change-result.html', comment=comment)
return render_template('password-change.html', form=form)
"""
Profile
"""
@app.route('/profile')
@login_required
def profile():
user = User.query.filter_by(id=current_user.id).first()
projects = Project.query.filter(Project.user_id == user.id).order_by(Project.create_dt.desc()).all()
return render_template('profile.html', user=user, projects=projects)
"""
Project
"""
@app.route('/project-create', methods=('GET', 'POST'))
@login_required
def project_create():
form = ProjectForm()
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
project = Project(
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(project)
db.session.commit()
flash("新規プロジェクトの作成が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('project-create.html', form=form)
@app.route('/project/<int:project_id>')
def project(project_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholders = Stakeholder.query.filter(Stakeholder.project_id == project_id).order_by(Stakeholder.id.asc()).all()
# Get activities for each stakeholder
activities = []
for stakeholder in stakeholders:
activities_raw = Activity.query.filter(Activity.stakeholder_id == stakeholder.id).order_by(Activity.id.asc()).all()
for activity in activities_raw:
activities.append([activity.stakeholder_id, activity])
# Get outputs for each activity
outputs = []
for activity in activities:
outputs_raw = Output.query.filter(Output.activity_id == activity[1].id).order_by(Output.id.asc()).all()
for output in outputs_raw:
outputs.append([output.activity_id, output])
# Get outcomes for each output
outcomes = []
for output in outputs:
outcomes_raw = Outcome.query.filter(Outcome.output_id == output[1].id).order_by(Outcome.id.asc()).all()
for outcome in outcomes_raw:
outcomes.append([outcome.output_id, outcome])
# Find a list of project's impact that haven't been checked in outcome stage
# 1. Create a unique list of outcomes' impactCheck fields to check against project data's impact
outcomes_forcheck = []
for output in outputs:
outcomes_raw = Outcome.query.filter(Outcome.output_id == output[1].id).order_by(Outcome.id.asc()).all()
for outcome in outcomes_raw:
outcomes_forcheck.append(outcome)
outcome_impact_check = []
for outcome in outcomes_forcheck:
for impact in outcome.json['impactCheck']:
outcome_impact_check.append(impact)
outcome_impact_set = set(outcome_impact_check)
# 2. Create a list of impact from project data
project_impact = []
for impact in project.json['impact']:
project_impact.append(impact['objective'])
project_impact_set = set(project_impact)
# 3. Find symetric difference between outcome_impact_set and project_impact_set
impact_diff = project_impact_set.symmetric_difference(outcome_impact_set)
# 4. Reorder impact diff by creating a new list against project_impact
impact_diff_ordered = []
for impact in project_impact:
if impact in impact_diff:
impact_diff_ordered.append(impact)
# Editability check
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
# Converting record.img_url from db to list
if project.img_url:
img_url_raw = project.img_url
imgs = ast.literal_eval(img_url_raw)
else:
imgs = None
return render_template('project.html', project=project, imgs=imgs, stakeholders=stakeholders, activities=activities, outputs=outputs, outcomes=outcomes, impact_diff_ordered=impact_diff_ordered, editable=editable)
@app.route('/project/<int:project_id>/edit', methods=('GET', 'POST'))
@login_required
def project_edit(project_id):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = ProjectForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
project.mod_user_id = current_user.id
project.mod_dt = now
project.published = json_data['published']
project.json = json_data
db.session.commit()
flash("プロジェクトの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขโครงการสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(project.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('project-edit.html', project=project, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/delete')
@login_required
def project_delete(project_id):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('project-delete.html', project=project, project_id=project_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/delete/confirm')
@login_required
def project_delete_confirm(project_id):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(project)
db.session.commit()
flash("プロジェクトを正常に削除しました。")
return redirect('/')
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/publish/<string:mode>')
@login_required
def project_share_toggle(project_id, mode):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if mode == 'on':
project.json['published'] = True
project.published = True
flag_modified(project, "json")
db.session.add(project)
db.session.commit()
return redirect('/project/' + str(project_id))
elif mode == 'off':
project.json['published'] = False
project.published = False
flag_modified(project, "json")
db.session.add(project)
db.session.commit()
return redirect('/project/' + str(project_id))
"""
Stakeholder
"""
@app.route('/project/<int:project_id>/stakeholder-create', methods=('GET', 'POST'))
@login_required
def stakeholder_create(project_id):
project = Project.query.filter(Project.id == project_id).first()
form = StakeholderForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
stakeholder = Stakeholder(
project_id=project_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(stakeholder)
db.session.commit()
flash("ステークホルダーの新規作成が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('stakeholder-create.html', form=form, project=project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>')
def stakeholder(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activities = Activity.query.filter(Activity.stakeholder_id == stakeholder_id).order_by(Activity.create_dt.desc()).all()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('stakeholder.html', project=project, stakeholder=stakeholder, activities=activities, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/edit', methods=('GET', 'POST'))
@login_required
def stakeholder_edit(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = StakeholderForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
stakeholder.mod_user_id = current_user.id
stakeholder.mod_dt = now
stakeholder.published = json_data['published']
stakeholder.json = json_data
db.session.commit()
flash("ステークホルダーの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขผู้มีส่วนได้เสียสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(stakeholder.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('stakeholder-edit.html', project=project, stakeholder=stakeholder, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/delete')
@login_required
def stakeholder_delete(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('stakeholder-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/delete/confirm')
@login_required
def stakeholder_delete_confirm(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(stakeholder)
db.session.commit()
flash("ステークホルダーを正常に削除しました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
"""
Activity
"""
@app.route('/project/<int:project_id>/<int:stakeholder_id>/activity-create', methods=('GET', 'POST'))
@login_required
def activity_create(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
form = ActivityForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
activity = Activity(
stakeholder_id=stakeholder_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(activity)
db.session.commit()
flash("新しい活動が正常に作成されました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('activity-create.html', form=form, project=project, stakeholder=stakeholder)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>')
def activity(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('activity.html', project=project, stakeholder=stakeholder, activity=activity, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/edit', methods=('GET', 'POST'))
@login_required
def activity_edit(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = ActivityForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
activity.mod_user_id = current_user.id
activity.mod_dt = now
activity.published = json_data['published']
activity.json = json_data
db.session.commit()
flash("活動の編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขกิจกรรมสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(activity.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('activity-edit.html', project=project, stakeholder=stakeholder, activity=activity, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/delete')
@login_required
def activity_delete(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('activity-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id, activity=activity, activity_id=activity_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/delete/confirm')
@login_required
def activity_delete_confirm(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(activity)
db.session.commit()
flash("活動を正常に削除しました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
"""
Output
"""
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/output-create', methods=('GET', 'POST'))
@login_required
def output_create(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
form = OutputForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
output = Output(
activity_id=activity_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(output)
db.session.commit()
flash("新規アウトプットが正常に作成されました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('output-create.html', form=form, project=project, stakeholder=stakeholder, activity=activity)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>')
def output(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('output.html', project=project, stakeholder=stakeholder, activity=activity, output=output, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/edit', methods=('GET', 'POST'))
@login_required
def output_edit(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = OutputForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
output.mod_user_id = current_user.id
output.mod_dt = now
output.published = json_data['published']
output.json = json_data
db.session.commit()
flash("アウトプットの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขผลผลิตสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(output.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('output-edit.html', project=project, stakeholder=stakeholder, activity=activity, output=output, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/delete')
@login_required
def output_delete(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('output-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id, activity=activity, activity_id=activity_id, output=output, output_id=output_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/delete/confirm')
@login_required
def output_delete_confirm(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(output)
db.session.commit()
flash("アウトプットが正常に削除されました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
"""
Outcome
"""
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/outcome-create', methods=('GET', 'POST'))
@login_required
def outcome_create(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
form = OutcomeForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
outcome = Outcome(
output_id=output_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(outcome)
db.session.commit()
flash("新しいアウトカムの作成が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
# Dump project's json to be used for objectives check
data_project = json.dumps(project.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('outcome-create.html', form=form, project=project, stakeholder=stakeholder, activity=activity, output=output, data_project=data_project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>')
def outcome(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('outcome.html', project=project, stakeholder=stakeholder, activity=activity, output=output, outcome=outcome, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>/edit', methods=('GET', 'POST'))
@login_required
def outcome_edit(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = OutcomeForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
outcome.mod_user_id = current_user.id
outcome.mod_dt = now
outcome.published = json_data['published']
outcome.json = json_data
db.session.commit()
flash("アウトカムの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขผลลัพธ์สำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(outcome.json, sort_keys=False, indent=4, ensure_ascii=False)
# Dump project's json to be used for objectives check
data_project = json.dumps(project.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('outcome-edit.html', project=project, stakeholder=stakeholder, activity=activity, output=output, outcome=outcome, form=form, data=data, data_project=data_project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>/delete')
@login_required
def outcome_delete(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('outcome-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id, activity=activity, activity_id=activity_id, output=output, output_id=output_id, outcome=outcome, outcome_id=outcome_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>/delete/confirm')
@login_required
def outcome_delete_confirm(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(outcome)
db.session.commit()
flash("結果を正常に削除しました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
"""
File uploader
"""
@app.route('/project/<int:project_id>/upload')
def project_upload(project_id):
form = ProjectForm()
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('upload.html', form=form, project=project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/file-post', methods=('GET', 'POST'))
def file_post(project_id):
if request.method == 'POST':
files = request.files.getlist("file")
if files:
file_num = 0
file_uploaded_num = 0
file_list = []
status = []
for file in files:
file_num += 1
file_uploaded_name = file.filename
print(f"To upload: {file_uploaded_name}")
# Upload file filename
if allowed_file(file_uploaded_name):
file_uploaded_num += 1
# Local and S3 folder, S3 bucket name
"""CUSTOMISE LOCAL/S3 FOLDERS AND S3 BUCKET HERE"""
local_folder = "project-img"
s3_bucket_folder = "project-img"
s3_bucket = "impactflowjp"
# Standardise file filename
"""CUSTOMISE FILE NAME TEMPLATE HERE"""
file_name = "project-" + str(project_id) + "-" + str(file_uploaded_num) + os.path.splitext(file_uploaded_name)[1]
# Upload to server. NEED app.route_path FOR os.path.join TO WORK
file.save(os.path.join(app.root_path, 'static/' + local_folder, file_name))
# Upload to S3
s3_destination = (s3_bucket_folder + '/' + file_name)
s3_testmultiupload = boto3.resource('s3').Bucket(s3_bucket)
s3_testmultiupload.upload_file(
Filename=os.path.join(app.root_path, 'static/' + local_folder, file_name),
Key=s3_destination, ExtraArgs={'ContentType': 'image/jpeg', 'ACL': 'public-read'}
)
print(f"Uploaded: {file_uploaded_name}")
# Generate file URL
file_url = 'https://' + s3_bucket + '.s3.ap-northeast-1.amazonaws.com/' + s3_bucket_folder + '/' + file_name
# Append each file URL to file_list
file_list.append(file_url)
# Append info of each file to status, to be returned as JSON
status.append({
"uploadStatus": True,
"uploadedFileId": file_uploaded_num,
"uploadedOriginalName": file_uploaded_name,
"uploadedUpdatedName": file_name,
"fileUrl": file_url
})
else:
status.append({
"uploadStatus": False,
"uploadedFileId": 0,
"uploadedOriginalName": file_uploaded_name,
"uploadedUpdatedName": 0
})
print(f"DB file list: {file_list}")
return jsonify(status)
else:
return jsonify({"status": "no file uploaded"})
@app.route('/project/<int:project_id>/file-save', methods=('GET', 'POST'))
def file_save(project_id):
if request.method == 'POST':
# Get request JSON and parse as dict
file_url_list = request.get_json()
print(file_url_list)
"""
SAVE TO DB HERE.
Before saving, don't forget to convert file_url_list to string,
then recover using ast function
"""
project = Project.query.filter(Project.id == project_id).first()
project.img_url = str(file_url_list)
db.session.commit()
return jsonify(file_url_list)
"""
Indicator bank management
"""
@app.route('/indicator/view')
@login_required
def indicator_view():
editable = False
if current_user.is_authenticated:
if current_user.role == 'admin': # Admin only
editable = True
if editable:
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_URI'])
mongodb = client.impactflowjp
indicators = mongodb.indicator.find({})
return render_template('indicator-view.html', indicators=indicators)
else:
return render_template('owner-error.html', project=project)
"""
Search
"""
@app.route('/indicator/api', methods=('GET', 'POST'))
def indicator_api():
if request.method == 'GET':
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_URI'])
mongodb = client.impactflowjp
indicators = []
for result in mongodb.indicator.find({}):
indicators.append({
"category": result['category'],
"subcategory": result['Sub-category'],
"indicator": result['indicator'],
"source": result['source'],
"content": result['Sub-category'] + " : " + result['indicator'] + " (" + result['source'] + ")"
})
return jsonify(indicators) | 37.171296 | 269 | 0.657201 | from flask import render_template, redirect, url_for, flash, request, json, jsonify
from flask_login import current_user, login_user, logout_user, login_required
from flask_mail import Message
from app import app, db, mail
from app.models import User, Project, Stakeholder, Activity, Output, Outcome
from app.forms import SignupForm, LoginForm, ForgetForm, PasswordChangeForm, PasswordResetForm, ProjectForm, StakeholderForm, ActivityForm, OutputForm, OutcomeForm
import pymongo
import random
from datetime import datetime
from threading import Thread
import boto3
import pytz
import os
import ast
from sqlalchemy.orm.attributes import flag_modified
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
def datetimefilter(value, format="%d-%m-%Y %H:%M:%S"):
tz = pytz.timezone('Asia/Tokyo')
utc = pytz.timezone('UTC')
value = utc.localize(value, is_dst=None).astimezone(pytz.utc)
local_dt = value.astimezone(tz)
return local_dt.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
def img_convert(img_url):
if len(img_url) > 2:
img_list = ast.literal_eval(img_url)
img = img_list[0]
else:
img = None
return img
app.jinja_env.filters['img_convert'] = img_convert
@app.errorhandler(404)
def page_not_found(e):
if request.path.split('/')[1] == 'project':
return render_template('404.html', project=[]), 404
else:
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
if request.path.split('/')[1] == 'project':
print('This is 500 error from /project')
return render_template('500.html', project=[]), 500
else:
print('This is 500 error')
return render_template('500.html'), 500
@app.route('/')
def home():
if current_user.is_authenticated:
projects = Project.query.filter(Project.user_id == current_user.id).order_by(Project.create_dt.desc()).all()
projects_published = Project.query.filter(Project.published == True).order_by(Project.create_dt.desc()).all()
projects_unpublished = Project.query.filter(Project.published == False).order_by(Project.create_dt.desc()).all()
return render_template('home.html', projects=projects, projects_published=projects_published, projects_unpublished=projects_unpublished)
else:
projects_published = Project.query.filter(Project.published == True).order_by(Project.create_dt.desc()).all()
return render_template('home.html', projects_published=projects_published)
@app.route('/project-all')
def project_all():
projects_published = Project.query.filter(Project.published == True).order_by(Project.create_dt.desc()).all()
return render_template('project-all.html', projects_published=projects_published)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/signup', methods=('GET', 'POST'))
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = SignupForm()
if form.validate_on_submit():
name = form.name.data
email = form.email.data
password = form.password.data
password_check = form.password_check.data
email_exist = User.query.filter_by(email=email).first()
if email_exist:
comment = f"{email} このメールアドレスはすでに登録されています。"
return render_template('signup-error.html', comment=comment)
if password == password_check:
password_final = password
else:
comment = "2つのパスワードが一致する必要があります。"
return render_template('signup-error.html', comment=comment)
new_user = User(name=name, email=email)
new_user.set_password(password_final)
db.session.add(new_user)
db.session.commit()
user = User.query.filter_by(email=form.email.data).first()
login_user(user)
flash("登録が完了し、すでにログインしています")
return redirect('/profile')
return render_template('signup.html', form=form)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
@app.route('/forget', methods=('GET', 'POST'))
def forget():
form = ForgetForm()
if form.validate_on_submit():
email = form.email.data
user = User.query.filter_by(email=email).first()
if user is None:
comment = "電子メールが見つかりません。"
error_type = "wrong_email"
return render_template('forget-result.html', comment=comment, error_type=error_type)
else:
rand_universe = [1,2,3,4,5,6,7,8,9,"a","b","c","d","e","f","g","A","B","C","D","E","F","G"]
rand_str = ""
rand_list = random.sample(rand_universe, k=12)
password_reset_id = rand_str.join([str(i) for i in rand_list])
user.password_reset_id = password_reset_id
db.session.commit()
"""
!!! MUST CUSTOMISE MESSAGE BODY IN IMPLEMENTATION !!!
"""
msg = Message(subject='[jp.ImpactFlow.org] パスワードの再設定',
sender = 'support@cfapp.org',
recipients = [email])
msg.body = ("https://jp.impactflow.org/password-reset/" + password_reset_id + " 上記リンクより、パスワードの再設定をお願いします。")
Thread(target=send_async_email, args=(app, msg)).start()
comment = "パスワード再設定のご案内をメールにてお送りしました。"
return render_template('forget-result.html', comment=comment)
return render_template('forget.html', form=form)
@app.route('/password-reset/<string:password_reset_id>')
def password_reset(password_reset_id):
user = User.query.filter_by(password_reset_id=password_reset_id).first()
if user is None:
flash("パスワード再設定用リンクが無効、または使用されています。")
return redirect('/')
else:
form = PasswordResetForm()
return render_template('password-reset.html', password_reset_id=password_reset_id, form=form)
@app.route('/password-reset-result', methods=('GET', 'POST'))
def password_reset_result():
form = PasswordResetForm()
if form.validate_on_submit():
password_reset_id = form.password_reset_id.data
password_new = form.password_new.data
password_new_check = form.password_new_check.data
user = User.query.filter_by(password_reset_id=password_reset_id).first()
if password_new != password_new_check:
comment = "2つのパスワードが一致する必要があります。"
error_type = "unmatched_password_check_reset"
return render_template('password-change-result.html', comment=comment, error_type=error_type, password_reset_id=password_reset_id)
else:
user.set_password(password_new)
user.password_reset_id = ""
db.session.commit()
# Login user instantly
login_user(user)
flash("正常にログインしました")
# Return comment
comment = "次回から新しいパスワードでログインしてください。"
return render_template('password-change-result.html', comment=comment)
return render_template('password-change-result.html')
@app.route('/login', methods=('GET', 'POST'))
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
return render_template('fail.html')
login_user(user)
# Update lastlogin_dt to the current time
user.lastlogin_dt = datetime.now()
db.session.commit()
flash("正常にログインしました")
return redirect('/')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("正常にログアウトしました")
return redirect(url_for('home'))
@app.route('/password-change', methods=('GET', 'POST'))
@login_required
def password_change():
form = PasswordChangeForm()
if form.validate_on_submit():
# Get data from form
pass_current = form.password_current.data
pass_new = form.password_new.data
pass_new_check = form.password_new_check.data
# Connect to db
user = User.query.filter_by(id=current_user.id).first()
# Check if current pass matches pass in db
if not user.check_password(pass_current):
# Return comment and error type
comment = "パスワードが正しくありません。"
error_type = "wrong_pass_current"
return render_template('password-change-result.html', comment=comment, error_type=error_type)
# Check if new passwords match each other
elif pass_new != pass_new_check:
# Return comment and error type
comment = "2つのパスワードが一致する必要があります。"
error_type = "unmatched_password_check"
return render_template('password-change-result.html', comment=comment, error_type=error_type)
# Proceed if 2 above checks passed
else:
# Generate new password hash
user.set_password(pass_new)
db.session.commit()
# Return comment
comment = "次回から新しいパスワードでログインしてください。"
return render_template('password-change-result.html', comment=comment)
return render_template('password-change.html', form=form)
@app.route('/profile')
@login_required
def profile():
user = User.query.filter_by(id=current_user.id).first()
projects = Project.query.filter(Project.user_id == user.id).order_by(Project.create_dt.desc()).all()
return render_template('profile.html', user=user, projects=projects)
@app.route('/project-create', methods=('GET', 'POST'))
@login_required
def project_create():
form = ProjectForm()
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
project = Project(
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(project)
db.session.commit()
flash("新規プロジェクトの作成が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('project-create.html', form=form)
@app.route('/project/<int:project_id>')
def project(project_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholders = Stakeholder.query.filter(Stakeholder.project_id == project_id).order_by(Stakeholder.id.asc()).all()
# Get activities for each stakeholder
activities = []
for stakeholder in stakeholders:
activities_raw = Activity.query.filter(Activity.stakeholder_id == stakeholder.id).order_by(Activity.id.asc()).all()
for activity in activities_raw:
activities.append([activity.stakeholder_id, activity])
# Get outputs for each activity
outputs = []
for activity in activities:
outputs_raw = Output.query.filter(Output.activity_id == activity[1].id).order_by(Output.id.asc()).all()
for output in outputs_raw:
outputs.append([output.activity_id, output])
# Get outcomes for each output
outcomes = []
for output in outputs:
outcomes_raw = Outcome.query.filter(Outcome.output_id == output[1].id).order_by(Outcome.id.asc()).all()
for outcome in outcomes_raw:
outcomes.append([outcome.output_id, outcome])
# Find a list of project's impact that haven't been checked in outcome stage
# 1. Create a unique list of outcomes' impactCheck fields to check against project data's impact
outcomes_forcheck = []
for output in outputs:
outcomes_raw = Outcome.query.filter(Outcome.output_id == output[1].id).order_by(Outcome.id.asc()).all()
for outcome in outcomes_raw:
outcomes_forcheck.append(outcome)
outcome_impact_check = []
for outcome in outcomes_forcheck:
for impact in outcome.json['impactCheck']:
outcome_impact_check.append(impact)
outcome_impact_set = set(outcome_impact_check)
# 2. Create a list of impact from project data
project_impact = []
for impact in project.json['impact']:
project_impact.append(impact['objective'])
project_impact_set = set(project_impact)
# 3. Find symetric difference between outcome_impact_set and project_impact_set
impact_diff = project_impact_set.symmetric_difference(outcome_impact_set)
# 4. Reorder impact diff by creating a new list against project_impact
impact_diff_ordered = []
for impact in project_impact:
if impact in impact_diff:
impact_diff_ordered.append(impact)
# Editability check
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
# Converting record.img_url from db to list
if project.img_url:
img_url_raw = project.img_url
imgs = ast.literal_eval(img_url_raw)
else:
imgs = None
return render_template('project.html', project=project, imgs=imgs, stakeholders=stakeholders, activities=activities, outputs=outputs, outcomes=outcomes, impact_diff_ordered=impact_diff_ordered, editable=editable)
@app.route('/project/<int:project_id>/edit', methods=('GET', 'POST'))
@login_required
def project_edit(project_id):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = ProjectForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
project.mod_user_id = current_user.id
project.mod_dt = now
project.published = json_data['published']
project.json = json_data
db.session.commit()
flash("プロジェクトの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขโครงการสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(project.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('project-edit.html', project=project, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/delete')
@login_required
def project_delete(project_id):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('project-delete.html', project=project, project_id=project_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/delete/confirm')
@login_required
def project_delete_confirm(project_id):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(project)
db.session.commit()
flash("プロジェクトを正常に削除しました。")
return redirect('/')
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/publish/<string:mode>')
@login_required
def project_share_toggle(project_id, mode):
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if mode == 'on':
project.json['published'] = True
project.published = True
flag_modified(project, "json")
db.session.add(project)
db.session.commit()
return redirect('/project/' + str(project_id))
elif mode == 'off':
project.json['published'] = False
project.published = False
flag_modified(project, "json")
db.session.add(project)
db.session.commit()
return redirect('/project/' + str(project_id))
@app.route('/project/<int:project_id>/stakeholder-create', methods=('GET', 'POST'))
@login_required
def stakeholder_create(project_id):
project = Project.query.filter(Project.id == project_id).first()
form = StakeholderForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
stakeholder = Stakeholder(
project_id=project_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(stakeholder)
db.session.commit()
flash("ステークホルダーの新規作成が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('stakeholder-create.html', form=form, project=project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>')
def stakeholder(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activities = Activity.query.filter(Activity.stakeholder_id == stakeholder_id).order_by(Activity.create_dt.desc()).all()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('stakeholder.html', project=project, stakeholder=stakeholder, activities=activities, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/edit', methods=('GET', 'POST'))
@login_required
def stakeholder_edit(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = StakeholderForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
stakeholder.mod_user_id = current_user.id
stakeholder.mod_dt = now
stakeholder.published = json_data['published']
stakeholder.json = json_data
db.session.commit()
flash("ステークホルダーの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขผู้มีส่วนได้เสียสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(stakeholder.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('stakeholder-edit.html', project=project, stakeholder=stakeholder, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/delete')
@login_required
def stakeholder_delete(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('stakeholder-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/delete/confirm')
@login_required
def stakeholder_delete_confirm(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
editable = False
if current_user.is_authenticated:
if stakeholder.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(stakeholder)
db.session.commit()
flash("ステークホルダーを正常に削除しました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/activity-create', methods=('GET', 'POST'))
@login_required
def activity_create(project_id, stakeholder_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
form = ActivityForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
activity = Activity(
stakeholder_id=stakeholder_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(activity)
db.session.commit()
flash("新しい活動が正常に作成されました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('activity-create.html', form=form, project=project, stakeholder=stakeholder)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>')
def activity(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('activity.html', project=project, stakeholder=stakeholder, activity=activity, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/edit', methods=('GET', 'POST'))
@login_required
def activity_edit(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = ActivityForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
activity.mod_user_id = current_user.id
activity.mod_dt = now
activity.published = json_data['published']
activity.json = json_data
db.session.commit()
flash("活動の編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขกิจกรรมสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(activity.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('activity-edit.html', project=project, stakeholder=stakeholder, activity=activity, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/delete')
@login_required
def activity_delete(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('activity-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id, activity=activity, activity_id=activity_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/delete/confirm')
@login_required
def activity_delete_confirm(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
editable = False
if current_user.is_authenticated:
if activity.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(activity)
db.session.commit()
flash("活動を正常に削除しました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/output-create', methods=('GET', 'POST'))
@login_required
def output_create(project_id, stakeholder_id, activity_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
form = OutputForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
output = Output(
activity_id=activity_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(output)
db.session.commit()
flash("新規アウトプットが正常に作成されました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
return render_template('output-create.html', form=form, project=project, stakeholder=stakeholder, activity=activity)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>')
def output(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
# Limit the view of unpublished project to owner or admin only
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('output.html', project=project, stakeholder=stakeholder, activity=activity, output=output, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/edit', methods=('GET', 'POST'))
@login_required
def output_edit(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = OutputForm()
if request.method == 'POST':
if editable:
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
output.mod_user_id = current_user.id
output.mod_dt = now
output.published = json_data['published']
output.json = json_data
db.session.commit()
flash("アウトプットの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขผลผลิตสำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(output.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('output-edit.html', project=project, stakeholder=stakeholder, activity=activity, output=output, form=form, data=data)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/delete')
@login_required
def output_delete(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('output-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id, activity=activity, activity_id=activity_id, output=output, output_id=output_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/delete/confirm')
@login_required
def output_delete_confirm(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
editable = False
if current_user.is_authenticated:
if output.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(output)
db.session.commit()
flash("アウトプットが正常に削除されました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/outcome-create', methods=('GET', 'POST'))
@login_required
def outcome_create(project_id, stakeholder_id, activity_id, output_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
form = OutcomeForm()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
if request.method == 'POST':
json_data = request.get_json() # Convert JSON to Python dict
now = datetime.now()
outcome = Outcome(
output_id=output_id,
user_id=current_user.id,
create_dt=now,
published=json_data['published'],
json=json_data
)
db.session.add(outcome)
db.session.commit()
flash("新しいアウトカムの作成が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return redirect('/')
else:
# Dump project's json to be used for objectives check
data_project = json.dumps(project.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('outcome-create.html', form=form, project=project, stakeholder=stakeholder, activity=activity, output=output, data_project=data_project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>')
def outcome(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
if project.published == False and editable == False:
return render_template('owner-error.html', project=project)
else:
return render_template('outcome.html', project=project, stakeholder=stakeholder, activity=activity, output=output, outcome=outcome, editable=editable)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>/edit', methods=('GET', 'POST'))
@login_required
def outcome_edit(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
form = OutcomeForm()
if request.method == 'POST':
if editable:
json_data = request.get_json()
now = datetime.now()
outcome.mod_user_id = current_user.id
outcome.mod_dt = now
outcome.published = json_data['published']
outcome.json = json_data
db.session.commit()
flash("アウトカムの編集が正常に行われました。")
print(json.dumps(json_data, sort_keys=False, indent=4, ensure_ascii=False))
return "แก้ไขผลลัพธ์สำเร็จแล้ว"
else:
return render_template('owner-error.html', project=project)
else:
if editable:
data = json.dumps(outcome.json, sort_keys=False, indent=4, ensure_ascii=False)
data_project = json.dumps(project.json, sort_keys=False, indent=4, ensure_ascii=False)
return render_template('outcome-edit.html', project=project, stakeholder=stakeholder, activity=activity, output=output, outcome=outcome, form=form, data=data, data_project=data_project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>/delete')
@login_required
def outcome_delete(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('outcome-delete.html', project=project, project_id=project_id, stakeholder=stakeholder, stakeholder_id=stakeholder_id, activity=activity, activity_id=activity_id, output=output, output_id=output_id, outcome=outcome, outcome_id=outcome_id)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/<int:stakeholder_id>/<int:activity_id>/<int:output_id>/<int:outcome_id>/delete/confirm')
@login_required
def outcome_delete_confirm(project_id, stakeholder_id, activity_id, output_id, outcome_id):
project = Project.query.filter(Project.id == project_id).first()
stakeholder = Stakeholder.query.filter(Stakeholder.id == stakeholder_id).first()
activity = Activity.query.filter(Activity.id == activity_id).first()
output = Output.query.filter(Output.id == output_id).first()
outcome = Outcome.query.filter(Outcome.id == outcome_id).first()
editable = False
if current_user.is_authenticated:
if outcome.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
db.session.delete(outcome)
db.session.commit()
flash("結果を正常に削除しました。")
return redirect('/project/' + str(project_id))
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/upload')
def project_upload(project_id):
form = ProjectForm()
project = Project.query.filter(Project.id == project_id).first()
editable = False
if current_user.is_authenticated:
if project.user_id == current_user.id or current_user.role == 'admin':
editable = True
if editable:
return render_template('upload.html', form=form, project=project)
else:
return render_template('owner-error.html', project=project)
@app.route('/project/<int:project_id>/file-post', methods=('GET', 'POST'))
def file_post(project_id):
if request.method == 'POST':
files = request.files.getlist("file")
if files:
file_num = 0
file_uploaded_num = 0
file_list = []
status = []
for file in files:
file_num += 1
file_uploaded_name = file.filename
print(f"To upload: {file_uploaded_name}")
# Upload file filename
if allowed_file(file_uploaded_name):
file_uploaded_num += 1
# Local and S3 folder, S3 bucket name
local_folder = "project-img"
s3_bucket_folder = "project-img"
s3_bucket = "impactflowjp"
# Standardise file filename
file_name = "project-" + str(project_id) + "-" + str(file_uploaded_num) + os.path.splitext(file_uploaded_name)[1]
# Upload to server. NEED app.route_path FOR os.path.join TO WORK
file.save(os.path.join(app.root_path, 'static/' + local_folder, file_name))
# Upload to S3
s3_destination = (s3_bucket_folder + '/' + file_name)
s3_testmultiupload = boto3.resource('s3').Bucket(s3_bucket)
s3_testmultiupload.upload_file(
Filename=os.path.join(app.root_path, 'static/' + local_folder, file_name),
Key=s3_destination, ExtraArgs={'ContentType': 'image/jpeg', 'ACL': 'public-read'}
)
print(f"Uploaded: {file_uploaded_name}")
# Generate file URL
file_url = 'https://' + s3_bucket + '.s3.ap-northeast-1.amazonaws.com/' + s3_bucket_folder + '/' + file_name
# Append each file URL to file_list
file_list.append(file_url)
# Append info of each file to status, to be returned as JSON
status.append({
"uploadStatus": True,
"uploadedFileId": file_uploaded_num,
"uploadedOriginalName": file_uploaded_name,
"uploadedUpdatedName": file_name,
"fileUrl": file_url
})
else:
status.append({
"uploadStatus": False,
"uploadedFileId": 0,
"uploadedOriginalName": file_uploaded_name,
"uploadedUpdatedName": 0
})
print(f"DB file list: {file_list}")
return jsonify(status)
else:
return jsonify({"status": "no file uploaded"})
@app.route('/project/<int:project_id>/file-save', methods=('GET', 'POST'))
def file_save(project_id):
if request.method == 'POST':
# Get request JSON and parse as dict
file_url_list = request.get_json()
print(file_url_list)
project = Project.query.filter(Project.id == project_id).first()
project.img_url = str(file_url_list)
db.session.commit()
return jsonify(file_url_list)
@app.route('/indicator/view')
@login_required
def indicator_view():
editable = False
if current_user.is_authenticated:
if current_user.role == 'admin': # Admin only
editable = True
if editable:
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_URI'])
mongodb = client.impactflowjp
indicators = mongodb.indicator.find({})
return render_template('indicator-view.html', indicators=indicators)
else:
return render_template('owner-error.html', project=project)
@app.route('/indicator/api', methods=('GET', 'POST'))
def indicator_api():
if request.method == 'GET':
# Connect and define the database
client = pymongo.MongoClient(app.config['DB_URI'])
mongodb = client.impactflowjp
indicators = []
for result in mongodb.indicator.find({}):
indicators.append({
"category": result['category'],
"subcategory": result['Sub-category'],
"indicator": result['indicator'],
"source": result['source'],
"content": result['Sub-category'] + " : " + result['indicator'] + " (" + result['source'] + ")"
})
return jsonify(indicators) | true | true |
f7f4efe85a0fc60243e9ad471ae14f11b921d19c | 705 | py | Python | network.py | internetlab-br/acompanhamento_candidatos | a3d0e9f38e9af6dadc2daada6d3778b11f94ffc3 | [
"MIT"
] | 13 | 2018-08-17T16:07:05.000Z | 2021-01-29T22:12:56.000Z | network.py | jvsouto/acompanhamento_candidatos | a3d0e9f38e9af6dadc2daada6d3778b11f94ffc3 | [
"MIT"
] | null | null | null | network.py | jvsouto/acompanhamento_candidatos | a3d0e9f38e9af6dadc2daada6d3778b11f94ffc3 | [
"MIT"
] | 3 | 2018-08-07T01:15:41.000Z | 2018-10-27T08:16:22.000Z | import database
import re
import csv
from collections import Counter
db = database.conecta_banco()
cursor = db.cursor()
sql = "SELECT * FROM bolso_tweets.Tweets where query = \" \";" # adicione qualquer query usada na captura
cursor.execute(sql)
lista = cursor.fetchall()
db.close()
with open('retweets.csv', 'w', newline="\n") as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["source", "target", "start", "end"])
for tweet in lista:
result = re.search('RT @(.*?):', tweet[1])
if result:
spamwriter.writerow([tweet[3], result.group(1), tweet[2], tweet[2]])
| 28.2 | 105 | 0.636879 | import database
import re
import csv
from collections import Counter
db = database.conecta_banco()
cursor = db.cursor()
sql = "SELECT * FROM bolso_tweets.Tweets where query = \" \";"
cursor.execute(sql)
lista = cursor.fetchall()
db.close()
with open('retweets.csv', 'w', newline="\n") as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["source", "target", "start", "end"])
for tweet in lista:
result = re.search('RT @(.*?):', tweet[1])
if result:
spamwriter.writerow([tweet[3], result.group(1), tweet[2], tweet[2]])
| true | true |
f7f4f1c975fd7fa80dd1cddb334975763cf94e79 | 962 | py | Python | ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 1,664 | 2015-01-03T09:35:21.000Z | 2022-03-31T04:55:24.000Z | ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 3,018 | 2015-02-19T20:16:10.000Z | 2021-11-13T20:47:48.000Z | ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 1,673 | 2015-01-06T14:14:42.000Z | 2022-03-31T07:22:30.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import Hook
class BeforeRestartHook(Hook):
def hook(self, env):
self.run_custom_hook('before-START')
if __name__ == "__main__":
BeforeRestartHook().execute()
| 31.032258 | 72 | 0.781705 | from resource_management import Hook
class BeforeRestartHook(Hook):
def hook(self, env):
self.run_custom_hook('before-START')
if __name__ == "__main__":
BeforeRestartHook().execute()
| true | true |
f7f4f430cb779958a5c1c44da764f6eccfdb9707 | 4,633 | py | Python | qa/rpc-tests/signrawtransactions.py | scalow2/gechcoin | 7ff9a68c7cb3d19a2c73ae1004fe78c6da0ea716 | [
"MIT"
] | null | null | null | qa/rpc-tests/signrawtransactions.py | scalow2/gechcoin | 7ff9a68c7cb3d19a2c73ae1004fe78c6da0ea716 | [
"MIT"
] | null | null | null | qa/rpc-tests/signrawtransactions.py | scalow2/gechcoin | 7ff9a68c7cb3d19a2c73ae1004fe78c6da0ea716 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The GECH Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(BitcoinTestFramework):
"""Tests transaction signing via RPC command "signrawtransaction"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def successful_signing_test(self):
"""Creates and signs a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| 42.118182 | 120 | 0.677531 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def successful_signing_test(self):
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| true | true |
f7f4f68f9502a5f8e328ff746bf39d85acb31396 | 1,936 | py | Python | recognition/arcface_paddle/deploy/pdserving/web_service.py | qaz734913414/insightface | 4101fe608ca1d38604a23d53f32314ce8a28fe79 | [
"MIT"
] | 1 | 2022-01-12T04:15:51.000Z | 2022-01-12T04:15:51.000Z | recognition/arcface_paddle/deploy/pdserving/web_service.py | qaz734913414/insightface | 4101fe608ca1d38604a23d53f32314ce8a28fe79 | [
"MIT"
] | 1 | 2021-12-02T05:22:27.000Z | 2021-12-02T05:23:11.000Z | recognition/arcface_paddle/deploy/pdserving/web_service.py | qaz734913414/insightface | 4101fe608ca1d38604a23d53f32314ce8a28fe79 | [
"MIT"
] | 1 | 2022-02-10T04:19:05.000Z | 2022-02-10T04:19:05.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_server.web_service import WebService, Op
import numpy as np
import cv2
import base64
class ArcFaceOp(Op):
def init_op(self):
pass
def preprocess(self, input_dicts, data_id, log_id):
(_, input_dict), = input_dicts.items()
data = base64.b64decode(input_dict["image"])
data = np.frombuffer(data, np.uint8)
# Note: class variables(self.var) can only be used in process op mode
img = cv2.imdecode(data, cv2.IMREAD_COLOR)
img = cv2.resize(img,(112,112))
# normalize to mean 0.5, std 0.5
img = (img - 127.5) * 0.00784313725
# BGR2RGB
img = img[:, :, ::-1]
img = img.transpose((2, 0, 1))
img = np.expand_dims(img, 0)
img = img.astype('float32')
return {"x":img.copy()}, False, None, ""
def postprocess(self, input_dicts, fetch_dict, log_id):
out = fetch_dict["save_infer_model/scale_0.tmp_1"]
out_dict = {"out": out}
return out_dict, None, ""
class ArcFaceService(WebService):
def get_pipeline_response(self, read_op):
arcface_op = ArcFaceOp(name="ArcFace", input_ops=[read_op])
return arcface_op
arcface_service = ArcFaceService(name="ArcFace")
arcface_service.prepare_pipeline_config("config.yml")
arcface_service.run_service() | 35.2 | 77 | 0.682335 |
from paddle_serving_server.web_service import WebService, Op
import numpy as np
import cv2
import base64
class ArcFaceOp(Op):
def init_op(self):
pass
def preprocess(self, input_dicts, data_id, log_id):
(_, input_dict), = input_dicts.items()
data = base64.b64decode(input_dict["image"])
data = np.frombuffer(data, np.uint8)
img = cv2.imdecode(data, cv2.IMREAD_COLOR)
img = cv2.resize(img,(112,112))
img = (img - 127.5) * 0.00784313725
img = img[:, :, ::-1]
img = img.transpose((2, 0, 1))
img = np.expand_dims(img, 0)
img = img.astype('float32')
return {"x":img.copy()}, False, None, ""
def postprocess(self, input_dicts, fetch_dict, log_id):
out = fetch_dict["save_infer_model/scale_0.tmp_1"]
out_dict = {"out": out}
return out_dict, None, ""
class ArcFaceService(WebService):
def get_pipeline_response(self, read_op):
arcface_op = ArcFaceOp(name="ArcFace", input_ops=[read_op])
return arcface_op
arcface_service = ArcFaceService(name="ArcFace")
arcface_service.prepare_pipeline_config("config.yml")
arcface_service.run_service() | true | true |
f7f4f6a5d626554e5319c3ee7f24cd379fb2c8fa | 22,595 | py | Python | pyscf/pbc/mp/kmp2.py | seunghoonlee89/pyscf-ecCC-TCC | 2091566fb83c1474e40bf74f271be2ce4611f60c | [
"Apache-2.0"
] | 2 | 2021-09-17T06:10:17.000Z | 2022-01-22T23:37:22.000Z | pyscf/pbc/mp/kmp2.py | r-peng/pyscf | 9a14f9bcc63bc75f5939cb4d00eb47861d8d8989 | [
"Apache-2.0"
] | null | null | null | pyscf/pbc/mp/kmp2.py | r-peng/pyscf | 9a14f9bcc63bc75f5939cb4d00eb47861d8d8989 | [
"Apache-2.0"
] | 2 | 2021-09-16T23:37:42.000Z | 2021-10-14T23:00:39.000Z | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Timothy Berkelbach <tim.berkelbach@gmail.com>
# James McClain <jdmcclain47@gmail.com>
#
'''
kpoint-adapted and spin-adapted MP2
t2[i,j,a,b] = <ij|ab> / D_ij^ab
t2 and eris are never stored in full, only a partial
eri of size (nkpts,nocc,nocc,nvir,nvir)
'''
import time
import numpy as np
from scipy.linalg import block_diag
from pyscf import lib
from pyscf.lib import logger, einsum
from pyscf.mp import mp2
from pyscf.pbc.lib import kpts_helper
from pyscf.lib.parameters import LARGE_DENOM
from pyscf import __config__
WITH_T2 = getattr(__config__, 'mp_mp2_with_t2', True)
def kernel(mp, mo_energy, mo_coeff, verbose=logger.NOTE, with_t2=WITH_T2):
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
nkpts = mp.nkpts
eia = np.zeros((nocc,nvir))
eijab = np.zeros((nocc,nocc,nvir,nvir))
fao2mo = mp._scf.with_df.ao2mo
kconserv = mp.khelper.kconserv
emp2 = 0.
oovv_ij = np.zeros((nkpts,nocc,nocc,nvir,nvir), dtype=mo_coeff[0].dtype)
mo_e_o = [mo_energy[k][:nocc] for k in range(nkpts)]
mo_e_v = [mo_energy[k][nocc:] for k in range(nkpts)]
# Get location of non-zero/padded elements in occupied and virtual space
nonzero_opadding, nonzero_vpadding = padding_k_idx(mp, kind="split")
if with_t2:
t2 = np.zeros((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=complex)
else:
t2 = None
for ki in range(nkpts):
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[ki,ka,kj]
orbo_i = mo_coeff[ki][:,:nocc]
orbo_j = mo_coeff[kj][:,:nocc]
orbv_a = mo_coeff[ka][:,nocc:]
orbv_b = mo_coeff[kb][:,nocc:]
oovv_ij[ka] = fao2mo((orbo_i,orbv_a,orbo_j,orbv_b),
(mp.kpts[ki],mp.kpts[ka],mp.kpts[kj],mp.kpts[kb]),
compact=False).reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3) / nkpts
for ka in range(nkpts):
kb = kconserv[ki,ka,kj]
# Remove zero/padded elements from denominator
eia = LARGE_DENOM * np.ones((nocc, nvir), dtype=mo_energy[0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding[ki], nonzero_vpadding[ka])
eia[n0_ovp_ia] = (mo_e_o[ki][:,None] - mo_e_v[ka])[n0_ovp_ia]
ejb = LARGE_DENOM * np.ones((nocc, nvir), dtype=mo_energy[0].dtype)
n0_ovp_jb = np.ix_(nonzero_opadding[kj], nonzero_vpadding[kb])
ejb[n0_ovp_jb] = (mo_e_o[kj][:,None] - mo_e_v[kb])[n0_ovp_jb]
eijab = lib.direct_sum('ia,jb->ijab',eia,ejb)
t2_ijab = np.conj(oovv_ij[ka]/eijab)
if with_t2:
t2[ki, kj, ka] = t2_ijab
woovv = 2*oovv_ij[ka] - oovv_ij[kb].transpose(0,1,3,2)
emp2 += np.einsum('ijab,ijab', t2_ijab, woovv).real
emp2 /= nkpts
return emp2, t2
def _padding_k_idx(nmo, nocc, kind="split"):
"""A convention used for padding vectors, matrices and tensors in case when occupation numbers depend on the
k-point index.
Args:
nmo (Iterable): k-dependent orbital number;
nocc (Iterable): k-dependent occupation numbers;
kind (str): either "split" (occupied and virtual spaces are split) or "joint" (occupied and virtual spaces are
the joint;
Returns:
Two lists corresponding to the occupied and virtual spaces for kind="split". Each list contains integer arrays
with indexes pointing to actual non-zero entries in the padded vector/matrix/tensor. If kind="joint", a single
list of arrays is returned corresponding to the entire MO space.
"""
if kind not in ("split", "joint"):
raise ValueError("The 'kind' argument must be one of 'split', 'joint'")
if kind == "split":
indexes_o = []
indexes_v = []
else:
indexes = []
nocc = np.array(nocc)
nmo = np.array(nmo)
nvirt = nmo - nocc
dense_o = np.amax(nocc)
dense_v = np.amax(nvirt)
dense_nmo = dense_o + dense_v
for k_o, k_nmo in zip(nocc, nmo):
k_v = k_nmo - k_o
if kind == "split":
indexes_o.append(np.arange(k_o))
indexes_v.append(np.arange(dense_v - k_v, dense_v))
else:
indexes.append(np.concatenate((
np.arange(k_o),
np.arange(dense_nmo - k_v, dense_nmo),
)))
if kind == "split":
return indexes_o, indexes_v
else:
return indexes
def padding_k_idx(mp, kind="split"):
"""A convention used for padding vectors, matrices and tensors in case when occupation numbers depend on the
k-point index.
This implementation stores k-dependent Fock and other matrix in dense arrays with additional dimensions
corresponding to k-point indexes. In case when the occupation numbers depend on the k-point index (i.e. a metal) or
when some k-points have more Bloch basis functions than others the corresponding data structure has to be padded
with entries that are not used (fictitious occupied and virtual degrees of freedom). Current convention stores these
states at the Fermi level as shown in the following example.
+----+--------+--------+--------+
| | k=0 | k=1 | k=2 |
| +--------+--------+--------+
| | nocc=2 | nocc=3 | nocc=2 |
| | nvir=4 | nvir=3 | nvir=3 |
+====+========+========+========+
| v3 | k0v3 | k1v2 | k2v2 |
+----+--------+--------+--------+
| v2 | k0v2 | k1v1 | k2v1 |
+----+--------+--------+--------+
| v1 | k0v1 | k1v0 | k2v0 |
+----+--------+--------+--------+
| v0 | k0v0 | | |
+====+========+========+========+
| Fermi level |
+====+========+========+========+
| o2 | | k1o2 | |
+----+--------+--------+--------+
| o1 | k0o1 | k1o1 | k2o1 |
+----+--------+--------+--------+
| o0 | k0o0 | k1o0 | k2o0 |
+----+--------+--------+--------+
In the above example, `get_nmo(mp, per_kpoint=True) == (6, 6, 5)`, `get_nocc(mp, per_kpoint) == (2, 3, 2)`. The
resulting dense `get_nmo(mp) == 7` and `get_nocc(mp) == 3` correspond to padded dimensions. This function will
return the following indexes corresponding to the filled entries of the above table:
>>> padding_k_idx(mp, kind="split")
([(0, 1), (0, 1, 2), (0, 1)], [(0, 1, 2, 3), (1, 2, 3), (1, 2, 3)])
>>> padding_k_idx(mp, kind="joint")
[(0, 1, 3, 4, 5, 6), (0, 1, 2, 4, 5, 6), (0, 1, 4, 5, 6)]
Args:
mp (:class:`MP2`): An instantiation of an SCF or post-Hartree-Fock object.
kind (str): either "split" (occupied and virtual spaces are split) or "joint" (occupied and virtual spaces are
the joint;
Returns:
Two lists corresponding to the occupied and virtual spaces for kind="split". Each list contains integer arrays
with indexes pointing to actual non-zero entries in the padded vector/matrix/tensor. If kind="joint", a single
list of arrays is returned corresponding to the entire MO space.
"""
return _padding_k_idx(mp.get_nmo(per_kpoint=True), mp.get_nocc(per_kpoint=True), kind=kind)
def padded_mo_energy(mp, mo_energy):
"""
Pads energies of active MOs.
Args:
mp (:class:`MP2`): An instantiation of an SCF or post-Hartree-Fock object.
mo_energy (ndarray): original non-padded molecular energies;
Returns:
Padded molecular energies.
"""
frozen_mask = get_frozen_mask(mp)
padding_convention = padding_k_idx(mp, kind="joint")
nkpts = mp.nkpts
result = np.zeros((nkpts, mp.nmo), dtype=mo_energy[0].dtype)
for k in range(nkpts):
result[np.ix_([k], padding_convention[k])] = mo_energy[k][frozen_mask[k]]
return result
def padded_mo_coeff(mp, mo_coeff):
"""
Pads coefficients of active MOs.
Args:
mp (:class:`MP2`): An instantiation of an SCF or post-Hartree-Fock object.
mo_coeff (ndarray): original non-padded molecular coefficients;
Returns:
Padded molecular coefficients.
"""
frozen_mask = get_frozen_mask(mp)
padding_convention = padding_k_idx(mp, kind="joint")
nkpts = mp.nkpts
result = np.zeros((nkpts, mo_coeff[0].shape[0], mp.nmo), dtype=mo_coeff[0].dtype)
for k in range(nkpts):
result[np.ix_([k], np.arange(result.shape[1]), padding_convention[k])] = mo_coeff[k][:, frozen_mask[k]]
return result
def _frozen_sanity_check(frozen, mo_occ, kpt_idx):
'''Performs a few sanity checks on the frozen array and mo_occ.
Specific tests include checking for duplicates within the frozen array.
Args:
frozen (array_like of int): The orbital indices that will be frozen.
mo_occ (:obj:`ndarray` of int): The occupuation number for each orbital
resulting from a mean-field-like calculation.
kpt_idx (int): The k-point that `mo_occ` and `frozen` belong to.
'''
frozen = np.array(frozen)
nocc = np.count_nonzero(mo_occ > 0)
nvir = len(mo_occ) - nocc
assert nocc, 'No occupied orbitals?\n\nnocc = %s\nmo_occ = %s' % (nocc, mo_occ)
all_frozen_unique = (len(frozen) - len(np.unique(frozen))) == 0
if not all_frozen_unique:
raise RuntimeError('Frozen orbital list contains duplicates!\n\nkpt_idx %s\n'
'frozen %s' % (kpt_idx, frozen))
if len(frozen) > 0 and np.max(frozen) > len(mo_occ) - 1:
raise RuntimeError('Freezing orbital not in MO list!\n\nkpt_idx %s\n'
'frozen %s\nmax orbital idx %s' % (kpt_idx, frozen, len(mo_occ) - 1))
def get_nocc(mp, per_kpoint=False):
'''Number of occupied orbitals for k-point calculations.
Number of occupied orbitals for use in a calculation with k-points, taking into
account frozen orbitals.
Args:
mp (:class:`MP2`): An instantiation of an SCF or post-Hartree-Fock object.
per_kpoint (bool, optional): True returns the number of occupied
orbitals at each k-point. False gives the max of this list.
Returns:
nocc (int, list of int): Number of occupied orbitals. For return type, see description of arg
`per_kpoint`.
'''
for i, moocc in enumerate(mp.mo_occ):
if np.any(moocc % 1 != 0):
raise RuntimeError("Fractional occupation numbers encountered @ kp={:d}: {}. This may have been caused by "
"smearing of occupation numbers in the mean-field calculation. If so, consider "
"executing mf.smearing_method = False; mf.mo_occ = mf.get_occ() prior to calling "
"this".format(i, moocc))
if mp._nocc is not None:
return mp._nocc
elif mp.frozen is None:
nocc = [np.count_nonzero(mp.mo_occ[ikpt]) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen, (int, np.integer)):
nocc = [(np.count_nonzero(mp.mo_occ[ikpt]) - mp.frozen) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen[0], (int, np.integer)):
[_frozen_sanity_check(mp.frozen, mp.mo_occ[ikpt], ikpt) for ikpt in range(mp.nkpts)]
nocc = []
for ikpt in range(mp.nkpts):
max_occ_idx = np.max(np.where(mp.mo_occ[ikpt] > 0))
frozen_nocc = np.sum(np.array(mp.frozen) <= max_occ_idx)
nocc.append(np.count_nonzero(mp.mo_occ[ikpt]) - frozen_nocc)
elif isinstance(mp.frozen[0], (list, np.ndarray)):
nkpts = len(mp.frozen)
if nkpts != mp.nkpts:
raise RuntimeError('Frozen list has a different number of k-points (length) than passed in mean-field/'
'correlated calculation. \n\nCalculation nkpts = %d, frozen list = %s '
'(length = %d)' % (mp.nkpts, mp.frozen, nkpts))
[_frozen_sanity_check(frozen, mo_occ, ikpt) for ikpt, frozen, mo_occ in zip(range(nkpts), mp.frozen, mp.mo_occ)]
nocc = []
for ikpt, frozen in enumerate(mp.frozen):
max_occ_idx = np.max(np.where(mp.mo_occ[ikpt] > 0))
frozen_nocc = np.sum(np.array(frozen) <= max_occ_idx)
nocc.append(np.count_nonzero(mp.mo_occ[ikpt]) - frozen_nocc)
else:
raise NotImplementedError
assert any(np.array(nocc) > 0), ('Must have occupied orbitals! \n\nnocc %s\nfrozen %s\nmo_occ %s' %
(nocc, mp.frozen, mp.mo_occ))
if not per_kpoint:
nocc = np.amax(nocc)
return nocc
def get_nmo(mp, per_kpoint=False):
'''Number of orbitals for k-point calculations.
Number of orbitals for use in a calculation with k-points, taking into account
frozen orbitals.
Note:
If `per_kpoint` is False, then the number of orbitals here is equal to max(nocc) + max(nvir),
where each max is done over all k-points. Otherwise the number of orbitals is returned
as a list of number of orbitals at each k-point.
Args:
mp (:class:`MP2`): An instantiation of an SCF or post-Hartree-Fock object.
per_kpoint (bool, optional): True returns the number of orbitals at each k-point.
For a description of False, see Note.
Returns:
nmo (int, list of int): Number of orbitals. For return type, see description of arg
`per_kpoint`.
'''
if mp._nmo is not None:
return mp._nmo
if mp.frozen is None:
nmo = [len(mp.mo_occ[ikpt]) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen, (int, np.integer)):
nmo = [len(mp.mo_occ[ikpt]) - mp.frozen for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen[0], (int, np.integer)):
[_frozen_sanity_check(mp.frozen, mp.mo_occ[ikpt], ikpt) for ikpt in range(mp.nkpts)]
nmo = [len(mp.mo_occ[ikpt]) - len(mp.frozen) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen, (list, np.ndarray)):
nkpts = len(mp.frozen)
if nkpts != mp.nkpts:
raise RuntimeError('Frozen list has a different number of k-points (length) than passed in mean-field/'
'correlated calculation. \n\nCalculation nkpts = %d, frozen list = %s '
'(length = %d)' % (mp.nkpts, mp.frozen, nkpts))
[_frozen_sanity_check(fro, mo_occ, ikpt) for ikpt, fro, mo_occ in zip(range(nkpts), mp.frozen, mp.mo_occ)]
nmo = [len(mp.mo_occ[ikpt]) - len(mp.frozen[ikpt]) for ikpt in range(nkpts)]
else:
raise NotImplementedError
assert all(np.array(nmo) > 0), ('Must have a positive number of orbitals!\n\nnmo %s\nfrozen %s\nmo_occ %s' %
(nmo, mp.frozen, mp.mo_occ))
if not per_kpoint:
# Depending on whether there are more occupied bands, we want to make sure that
# nmo has enough room for max(nocc) + max(nvir) number of orbitals for occupied
# and virtual space
nocc = mp.get_nocc(per_kpoint=True)
nmo = np.max(nocc) + np.max(np.array(nmo) - np.array(nocc))
return nmo
def get_frozen_mask(mp):
'''Boolean mask for orbitals in k-point post-HF method.
Creates a boolean mask to remove frozen orbitals and keep other orbitals for post-HF
calculations.
Args:
mp (:class:`MP2`): An instantiation of an SCF or post-Hartree-Fock object.
Returns:
moidx (list of :obj:`ndarray` of `np.bool`): Boolean mask of orbitals to include.
'''
moidx = [np.ones(x.size, dtype=np.bool) for x in mp.mo_occ]
if mp.frozen is None:
pass
elif isinstance(mp.frozen, (int, np.integer)):
for idx in moidx:
idx[:mp.frozen] = False
elif isinstance(mp.frozen[0], (int, np.integer)):
frozen = list(mp.frozen)
for idx in moidx:
idx[frozen] = False
elif isinstance(mp.frozen[0], (list, np.ndarray)):
nkpts = len(mp.frozen)
if nkpts != mp.nkpts:
raise RuntimeError('Frozen list has a different number of k-points (length) than passed in mean-field/'
'correlated calculation. \n\nCalculation nkpts = %d, frozen list = %s '
'(length = %d)' % (mp.nkpts, mp.frozen, nkpts))
[_frozen_sanity_check(fro, mo_occ, ikpt) for ikpt, fro, mo_occ in zip(range(nkpts), mp.frozen, mp.mo_occ)]
for ikpt, kpt_occ in enumerate(moidx):
kpt_occ[mp.frozen[ikpt]] = False
else:
raise NotImplementedError
return moidx
def _add_padding(mp, mo_coeff, mo_energy):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
nkpts = mp.nkpts
# Check if these are padded mo coefficients and energies
if not np.all([x.shape[0] == nmo for x in mo_coeff]):
mo_coeff = padded_mo_coeff(mp, mo_coeff)
if not np.all([x.shape[0] == nmo for x in mo_energy]):
mo_energy = padded_mo_energy(mp, mo_energy)
return mo_coeff, mo_energy
def make_rdm1(mp, t2=None, kind="compact"):
r"""
Spin-traced one-particle density matrix in the MO basis representation.
The occupied-virtual orbital response is not included.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
Args:
mp (KMP2): a KMP2 kernel object;
t2 (ndarray): a t2 MP2 tensor;
kind (str): either 'compact' or 'padded' - defines behavior for k-dependent MO basis sizes;
Returns:
A k-dependent single-particle density matrix.
"""
if kind not in ("compact", "padded"):
raise ValueError("The 'kind' argument should be either 'compact' or 'padded'")
d_imds = _gamma1_intermediates(mp, t2=t2)
result = []
padding_idxs = padding_k_idx(mp, kind="joint")
for (oo, vv), idxs in zip(zip(*d_imds), padding_idxs):
oo += np.eye(*oo.shape)
d = block_diag(oo, vv)
d += d.conj().T
if kind == "padded":
result.append(d)
else:
result.append(d[np.ix_(idxs, idxs)])
return result
def _gamma1_intermediates(mp, t2=None):
# Memory optimization should be here
if t2 is None:
t2 = mp.t2
if t2 is None:
raise NotImplementedError("Run kmp2.kernel with `with_t2=True`")
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
nkpts = mp.nkpts
dtype = t2.dtype
dm1occ = np.zeros((nkpts, nocc, nocc), dtype=dtype)
dm1vir = np.zeros((nkpts, nvir, nvir), dtype=dtype)
for ki in range(nkpts):
for kj in range(nkpts):
for ka in range(nkpts):
kb = mp.khelper.kconserv[ki, ka, kj]
dm1vir[kb] += einsum('ijax,ijay->yx', t2[ki][kj][ka].conj(), t2[ki][kj][ka]) * 2 -\
einsum('ijax,ijya->yx', t2[ki][kj][ka].conj(), t2[ki][kj][kb])
dm1occ[kj] += einsum('ixab,iyab->xy', t2[ki][kj][ka].conj(), t2[ki][kj][ka]) * 2 -\
einsum('ixab,iyba->xy', t2[ki][kj][ka].conj(), t2[ki][kj][kb])
return -dm1occ, dm1vir
class KMP2(mp2.MP2):
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.frozen = frozen
##################################################
# don't modify the following attributes, they are not input options
self.kpts = mf.kpts
self.mo_energy = mf.mo_energy
self.nkpts = len(self.kpts)
self.khelper = kpts_helper.KptsHelper(mf.cell, mf.kpts)
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self._nocc = None
self._nmo = None
self.e_corr = None
self.e_hf = None
self.t2 = None
self._keys = set(self.__dict__.keys())
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
make_rdm1 = make_rdm1
def kernel(self, mo_energy=None, mo_coeff=None, with_t2=WITH_T2):
if mo_energy is None:
mo_energy = self.mo_energy
if mo_coeff is None:
mo_coeff = self.mo_coeff
if mo_energy is None or mo_coeff is None:
log = logger.Logger(self.stdout, self.verbose)
log.warn('mo_coeff, mo_energy are not given.\n'
'You may need to call mf.kernel() to generate them.')
raise RuntimeError
mo_coeff, mo_energy = _add_padding(self, mo_coeff, mo_energy)
# TODO: compute e_hf for non-canonical SCF
self.e_hf = self._scf.e_tot
self.e_corr, self.t2 = \
kernel(self, mo_energy, mo_coeff, verbose=self.verbose, with_t2=with_t2)
logger.log(self, 'KMP2 energy = %.15g', self.e_corr)
return self.e_corr, self.t2
KRMP2 = KMP2
from pyscf.pbc import scf
scf.khf.KRHF.MP2 = lib.class_as_method(KRMP2)
scf.kghf.KGHF.MP2 = None
scf.krohf.KROHF.MP2 = None
if __name__ == '__main__':
from pyscf.pbc import gto, scf, mp
cell = gto.Cell()
cell.atom='''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
# Running HF and MP2 with 1x1x2 Monkhorst-Pack k-point mesh
kmf = scf.KRHF(cell, kpts=cell.make_kpts([1,1,2]), exxdiv=None)
ehf = kmf.kernel()
mymp = mp.KMP2(kmf)
emp2, t2 = mymp.kernel()
print(emp2 - -0.204721432828996)
| 37.784281 | 120 | 0.608365 |
import time
import numpy as np
from scipy.linalg import block_diag
from pyscf import lib
from pyscf.lib import logger, einsum
from pyscf.mp import mp2
from pyscf.pbc.lib import kpts_helper
from pyscf.lib.parameters import LARGE_DENOM
from pyscf import __config__
WITH_T2 = getattr(__config__, 'mp_mp2_with_t2', True)
def kernel(mp, mo_energy, mo_coeff, verbose=logger.NOTE, with_t2=WITH_T2):
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
nkpts = mp.nkpts
eia = np.zeros((nocc,nvir))
eijab = np.zeros((nocc,nocc,nvir,nvir))
fao2mo = mp._scf.with_df.ao2mo
kconserv = mp.khelper.kconserv
emp2 = 0.
oovv_ij = np.zeros((nkpts,nocc,nocc,nvir,nvir), dtype=mo_coeff[0].dtype)
mo_e_o = [mo_energy[k][:nocc] for k in range(nkpts)]
mo_e_v = [mo_energy[k][nocc:] for k in range(nkpts)]
nonzero_opadding, nonzero_vpadding = padding_k_idx(mp, kind="split")
if with_t2:
t2 = np.zeros((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=complex)
else:
t2 = None
for ki in range(nkpts):
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[ki,ka,kj]
orbo_i = mo_coeff[ki][:,:nocc]
orbo_j = mo_coeff[kj][:,:nocc]
orbv_a = mo_coeff[ka][:,nocc:]
orbv_b = mo_coeff[kb][:,nocc:]
oovv_ij[ka] = fao2mo((orbo_i,orbv_a,orbo_j,orbv_b),
(mp.kpts[ki],mp.kpts[ka],mp.kpts[kj],mp.kpts[kb]),
compact=False).reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3) / nkpts
for ka in range(nkpts):
kb = kconserv[ki,ka,kj]
eia = LARGE_DENOM * np.ones((nocc, nvir), dtype=mo_energy[0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding[ki], nonzero_vpadding[ka])
eia[n0_ovp_ia] = (mo_e_o[ki][:,None] - mo_e_v[ka])[n0_ovp_ia]
ejb = LARGE_DENOM * np.ones((nocc, nvir), dtype=mo_energy[0].dtype)
n0_ovp_jb = np.ix_(nonzero_opadding[kj], nonzero_vpadding[kb])
ejb[n0_ovp_jb] = (mo_e_o[kj][:,None] - mo_e_v[kb])[n0_ovp_jb]
eijab = lib.direct_sum('ia,jb->ijab',eia,ejb)
t2_ijab = np.conj(oovv_ij[ka]/eijab)
if with_t2:
t2[ki, kj, ka] = t2_ijab
woovv = 2*oovv_ij[ka] - oovv_ij[kb].transpose(0,1,3,2)
emp2 += np.einsum('ijab,ijab', t2_ijab, woovv).real
emp2 /= nkpts
return emp2, t2
def _padding_k_idx(nmo, nocc, kind="split"):
if kind not in ("split", "joint"):
raise ValueError("The 'kind' argument must be one of 'split', 'joint'")
if kind == "split":
indexes_o = []
indexes_v = []
else:
indexes = []
nocc = np.array(nocc)
nmo = np.array(nmo)
nvirt = nmo - nocc
dense_o = np.amax(nocc)
dense_v = np.amax(nvirt)
dense_nmo = dense_o + dense_v
for k_o, k_nmo in zip(nocc, nmo):
k_v = k_nmo - k_o
if kind == "split":
indexes_o.append(np.arange(k_o))
indexes_v.append(np.arange(dense_v - k_v, dense_v))
else:
indexes.append(np.concatenate((
np.arange(k_o),
np.arange(dense_nmo - k_v, dense_nmo),
)))
if kind == "split":
return indexes_o, indexes_v
else:
return indexes
def padding_k_idx(mp, kind="split"):
return _padding_k_idx(mp.get_nmo(per_kpoint=True), mp.get_nocc(per_kpoint=True), kind=kind)
def padded_mo_energy(mp, mo_energy):
frozen_mask = get_frozen_mask(mp)
padding_convention = padding_k_idx(mp, kind="joint")
nkpts = mp.nkpts
result = np.zeros((nkpts, mp.nmo), dtype=mo_energy[0].dtype)
for k in range(nkpts):
result[np.ix_([k], padding_convention[k])] = mo_energy[k][frozen_mask[k]]
return result
def padded_mo_coeff(mp, mo_coeff):
frozen_mask = get_frozen_mask(mp)
padding_convention = padding_k_idx(mp, kind="joint")
nkpts = mp.nkpts
result = np.zeros((nkpts, mo_coeff[0].shape[0], mp.nmo), dtype=mo_coeff[0].dtype)
for k in range(nkpts):
result[np.ix_([k], np.arange(result.shape[1]), padding_convention[k])] = mo_coeff[k][:, frozen_mask[k]]
return result
def _frozen_sanity_check(frozen, mo_occ, kpt_idx):
frozen = np.array(frozen)
nocc = np.count_nonzero(mo_occ > 0)
nvir = len(mo_occ) - nocc
assert nocc, 'No occupied orbitals?\n\nnocc = %s\nmo_occ = %s' % (nocc, mo_occ)
all_frozen_unique = (len(frozen) - len(np.unique(frozen))) == 0
if not all_frozen_unique:
raise RuntimeError('Frozen orbital list contains duplicates!\n\nkpt_idx %s\n'
'frozen %s' % (kpt_idx, frozen))
if len(frozen) > 0 and np.max(frozen) > len(mo_occ) - 1:
raise RuntimeError('Freezing orbital not in MO list!\n\nkpt_idx %s\n'
'frozen %s\nmax orbital idx %s' % (kpt_idx, frozen, len(mo_occ) - 1))
def get_nocc(mp, per_kpoint=False):
for i, moocc in enumerate(mp.mo_occ):
if np.any(moocc % 1 != 0):
raise RuntimeError("Fractional occupation numbers encountered @ kp={:d}: {}. This may have been caused by "
"smearing of occupation numbers in the mean-field calculation. If so, consider "
"executing mf.smearing_method = False; mf.mo_occ = mf.get_occ() prior to calling "
"this".format(i, moocc))
if mp._nocc is not None:
return mp._nocc
elif mp.frozen is None:
nocc = [np.count_nonzero(mp.mo_occ[ikpt]) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen, (int, np.integer)):
nocc = [(np.count_nonzero(mp.mo_occ[ikpt]) - mp.frozen) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen[0], (int, np.integer)):
[_frozen_sanity_check(mp.frozen, mp.mo_occ[ikpt], ikpt) for ikpt in range(mp.nkpts)]
nocc = []
for ikpt in range(mp.nkpts):
max_occ_idx = np.max(np.where(mp.mo_occ[ikpt] > 0))
frozen_nocc = np.sum(np.array(mp.frozen) <= max_occ_idx)
nocc.append(np.count_nonzero(mp.mo_occ[ikpt]) - frozen_nocc)
elif isinstance(mp.frozen[0], (list, np.ndarray)):
nkpts = len(mp.frozen)
if nkpts != mp.nkpts:
raise RuntimeError('Frozen list has a different number of k-points (length) than passed in mean-field/'
'correlated calculation. \n\nCalculation nkpts = %d, frozen list = %s '
'(length = %d)' % (mp.nkpts, mp.frozen, nkpts))
[_frozen_sanity_check(frozen, mo_occ, ikpt) for ikpt, frozen, mo_occ in zip(range(nkpts), mp.frozen, mp.mo_occ)]
nocc = []
for ikpt, frozen in enumerate(mp.frozen):
max_occ_idx = np.max(np.where(mp.mo_occ[ikpt] > 0))
frozen_nocc = np.sum(np.array(frozen) <= max_occ_idx)
nocc.append(np.count_nonzero(mp.mo_occ[ikpt]) - frozen_nocc)
else:
raise NotImplementedError
assert any(np.array(nocc) > 0), ('Must have occupied orbitals! \n\nnocc %s\nfrozen %s\nmo_occ %s' %
(nocc, mp.frozen, mp.mo_occ))
if not per_kpoint:
nocc = np.amax(nocc)
return nocc
def get_nmo(mp, per_kpoint=False):
if mp._nmo is not None:
return mp._nmo
if mp.frozen is None:
nmo = [len(mp.mo_occ[ikpt]) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen, (int, np.integer)):
nmo = [len(mp.mo_occ[ikpt]) - mp.frozen for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen[0], (int, np.integer)):
[_frozen_sanity_check(mp.frozen, mp.mo_occ[ikpt], ikpt) for ikpt in range(mp.nkpts)]
nmo = [len(mp.mo_occ[ikpt]) - len(mp.frozen) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen, (list, np.ndarray)):
nkpts = len(mp.frozen)
if nkpts != mp.nkpts:
raise RuntimeError('Frozen list has a different number of k-points (length) than passed in mean-field/'
'correlated calculation. \n\nCalculation nkpts = %d, frozen list = %s '
'(length = %d)' % (mp.nkpts, mp.frozen, nkpts))
[_frozen_sanity_check(fro, mo_occ, ikpt) for ikpt, fro, mo_occ in zip(range(nkpts), mp.frozen, mp.mo_occ)]
nmo = [len(mp.mo_occ[ikpt]) - len(mp.frozen[ikpt]) for ikpt in range(nkpts)]
else:
raise NotImplementedError
assert all(np.array(nmo) > 0), ('Must have a positive number of orbitals!\n\nnmo %s\nfrozen %s\nmo_occ %s' %
(nmo, mp.frozen, mp.mo_occ))
if not per_kpoint:
nocc = mp.get_nocc(per_kpoint=True)
nmo = np.max(nocc) + np.max(np.array(nmo) - np.array(nocc))
return nmo
def get_frozen_mask(mp):
moidx = [np.ones(x.size, dtype=np.bool) for x in mp.mo_occ]
if mp.frozen is None:
pass
elif isinstance(mp.frozen, (int, np.integer)):
for idx in moidx:
idx[:mp.frozen] = False
elif isinstance(mp.frozen[0], (int, np.integer)):
frozen = list(mp.frozen)
for idx in moidx:
idx[frozen] = False
elif isinstance(mp.frozen[0], (list, np.ndarray)):
nkpts = len(mp.frozen)
if nkpts != mp.nkpts:
raise RuntimeError('Frozen list has a different number of k-points (length) than passed in mean-field/'
'correlated calculation. \n\nCalculation nkpts = %d, frozen list = %s '
'(length = %d)' % (mp.nkpts, mp.frozen, nkpts))
[_frozen_sanity_check(fro, mo_occ, ikpt) for ikpt, fro, mo_occ in zip(range(nkpts), mp.frozen, mp.mo_occ)]
for ikpt, kpt_occ in enumerate(moidx):
kpt_occ[mp.frozen[ikpt]] = False
else:
raise NotImplementedError
return moidx
def _add_padding(mp, mo_coeff, mo_energy):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
nkpts = mp.nkpts
if not np.all([x.shape[0] == nmo for x in mo_coeff]):
mo_coeff = padded_mo_coeff(mp, mo_coeff)
if not np.all([x.shape[0] == nmo for x in mo_energy]):
mo_energy = padded_mo_energy(mp, mo_energy)
return mo_coeff, mo_energy
def make_rdm1(mp, t2=None, kind="compact"):
if kind not in ("compact", "padded"):
raise ValueError("The 'kind' argument should be either 'compact' or 'padded'")
d_imds = _gamma1_intermediates(mp, t2=t2)
result = []
padding_idxs = padding_k_idx(mp, kind="joint")
for (oo, vv), idxs in zip(zip(*d_imds), padding_idxs):
oo += np.eye(*oo.shape)
d = block_diag(oo, vv)
d += d.conj().T
if kind == "padded":
result.append(d)
else:
result.append(d[np.ix_(idxs, idxs)])
return result
def _gamma1_intermediates(mp, t2=None):
if t2 is None:
t2 = mp.t2
if t2 is None:
raise NotImplementedError("Run kmp2.kernel with `with_t2=True`")
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
nkpts = mp.nkpts
dtype = t2.dtype
dm1occ = np.zeros((nkpts, nocc, nocc), dtype=dtype)
dm1vir = np.zeros((nkpts, nvir, nvir), dtype=dtype)
for ki in range(nkpts):
for kj in range(nkpts):
for ka in range(nkpts):
kb = mp.khelper.kconserv[ki, ka, kj]
dm1vir[kb] += einsum('ijax,ijay->yx', t2[ki][kj][ka].conj(), t2[ki][kj][ka]) * 2 -\
einsum('ijax,ijya->yx', t2[ki][kj][ka].conj(), t2[ki][kj][kb])
dm1occ[kj] += einsum('ixab,iyab->xy', t2[ki][kj][ka].conj(), t2[ki][kj][ka]) * 2 -\
einsum('ixab,iyba->xy', t2[ki][kj][ka].conj(), t2[ki][kj][kb])
return -dm1occ, dm1vir
class KMP2(mp2.MP2):
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.frozen = frozen
y, mo_coeff, verbose=self.verbose, with_t2=with_t2)
logger.log(self, 'KMP2 energy = %.15g', self.e_corr)
return self.e_corr, self.t2
KRMP2 = KMP2
from pyscf.pbc import scf
scf.khf.KRHF.MP2 = lib.class_as_method(KRMP2)
scf.kghf.KGHF.MP2 = None
scf.krohf.KROHF.MP2 = None
if __name__ == '__main__':
from pyscf.pbc import gto, scf, mp
cell = gto.Cell()
cell.atom='''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
# Running HF and MP2 with 1x1x2 Monkhorst-Pack k-point mesh
kmf = scf.KRHF(cell, kpts=cell.make_kpts([1,1,2]), exxdiv=None)
ehf = kmf.kernel()
mymp = mp.KMP2(kmf)
emp2, t2 = mymp.kernel()
print(emp2 - -0.204721432828996)
| true | true |
f7f4f6a738168913062c259772d5a47a91133493 | 498 | py | Python | array/89 merge sorted array.py | windowssocket/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T02:29:40.000Z | 2020-02-05T03:28:16.000Z | array/89 merge sorted array.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 1 | 2019-03-08T13:22:32.000Z | 2019-03-08T13:22:32.000Z | array/89 merge sorted array.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T11:50:24.000Z | 2018-11-27T12:31:01.000Z | class Solution(object):
def merge(self, nums1, m: int, nums2, n: int):
"""
Do not return anything, modify nums1 in-place instead.
"""
i = m - 1
j = n - 1
while i >= 0 and j >= 0:
if nums1[i] >= nums2[j]:
nums1[i + j + 1] = nums1[i]
i -= 1
else:
nums1[i + j + 1] = nums2[j]
j -= 1
while j >= 0:
nums1[j] = nums2[j]
j -= 1
| 21.652174 | 62 | 0.365462 | class Solution(object):
def merge(self, nums1, m: int, nums2, n: int):
i = m - 1
j = n - 1
while i >= 0 and j >= 0:
if nums1[i] >= nums2[j]:
nums1[i + j + 1] = nums1[i]
i -= 1
else:
nums1[i + j + 1] = nums2[j]
j -= 1
while j >= 0:
nums1[j] = nums2[j]
j -= 1
| true | true |
f7f4f6a9109ee9b63302fe2a0cad9efe27dbbf93 | 1,820 | py | Python | softlearning/environments/dm_control/suite/wrappers/action_scale_test.py | kpertsch/softlearning | 51dc7ccfea077aa7f8cca6fe3c70aff9ae740242 | [
"MIT"
] | 1 | 2020-02-20T21:12:34.000Z | 2020-02-20T21:12:34.000Z | softlearning/environments/dm_control/suite/wrappers/action_scale_test.py | AmiMem/softlearning | e437995b707771f745e1fe4ca464e076292756ca | [
"MIT"
] | 9 | 2020-09-26T00:34:00.000Z | 2022-03-12T00:10:52.000Z | softlearning/environments/dm_control/suite/wrappers/action_scale_test.py | AmiMem/softlearning | e437995b707771f745e1fe4ca464e076292756ca | [
"MIT"
] | null | null | null | import pytest
import numpy as np
from dm_control import suite
from action_scale import Wrapper as ActionScaleWrapper
def test_scale_action():
seed = 0
unwrapped_env = suite.load(
domain_name="quadruped", task_name="run",
task_kwargs={"random": seed})
assert np.any(np.not_equal(unwrapped_env.action_spec().minimum, -1.0))
assert np.any(np.not_equal(unwrapped_env.action_spec().maximum, 1.0))
wrapped_env = ActionScaleWrapper(
suite.load(
domain_name="quadruped",
task_name="run",
task_kwargs={"random": seed}),
minimum=-1,
maximum=1)
assert np.all(np.equal(wrapped_env.action_spec().minimum, -1.0))
assert np.all(np.equal(wrapped_env.action_spec().maximum, 1.0))
timestep_unwrapped = unwrapped_env.reset()
timestep_wrapped = wrapped_env.reset()
assert (set(timestep_unwrapped.observation.keys())
== set(timestep_wrapped.observation.keys()))
for key in timestep_unwrapped.observation.keys():
np.testing.assert_allclose(
timestep_unwrapped.observation[key],
timestep_wrapped.observation[key])
timestep_unwrapped = unwrapped_env.step(
unwrapped_env.action_spec().maximum)
assert np.any(
wrapped_env.action_spec().maximum < unwrapped_env.action_spec().maximum)
with pytest.raises(AssertionError):
wrapped_env.step(unwrapped_env.action_spec().maximum)
timestep_wrapped = wrapped_env.step(
np.ones_like(unwrapped_env.action_spec().maximum))
for key in timestep_unwrapped.observation.keys():
np.testing.assert_allclose(
timestep_unwrapped.observation[key],
timestep_wrapped.observation[key])
assert np.allclose(timestep_wrapped.reward, timestep_unwrapped.reward)
| 33.703704 | 80 | 0.693407 | import pytest
import numpy as np
from dm_control import suite
from action_scale import Wrapper as ActionScaleWrapper
def test_scale_action():
seed = 0
unwrapped_env = suite.load(
domain_name="quadruped", task_name="run",
task_kwargs={"random": seed})
assert np.any(np.not_equal(unwrapped_env.action_spec().minimum, -1.0))
assert np.any(np.not_equal(unwrapped_env.action_spec().maximum, 1.0))
wrapped_env = ActionScaleWrapper(
suite.load(
domain_name="quadruped",
task_name="run",
task_kwargs={"random": seed}),
minimum=-1,
maximum=1)
assert np.all(np.equal(wrapped_env.action_spec().minimum, -1.0))
assert np.all(np.equal(wrapped_env.action_spec().maximum, 1.0))
timestep_unwrapped = unwrapped_env.reset()
timestep_wrapped = wrapped_env.reset()
assert (set(timestep_unwrapped.observation.keys())
== set(timestep_wrapped.observation.keys()))
for key in timestep_unwrapped.observation.keys():
np.testing.assert_allclose(
timestep_unwrapped.observation[key],
timestep_wrapped.observation[key])
timestep_unwrapped = unwrapped_env.step(
unwrapped_env.action_spec().maximum)
assert np.any(
wrapped_env.action_spec().maximum < unwrapped_env.action_spec().maximum)
with pytest.raises(AssertionError):
wrapped_env.step(unwrapped_env.action_spec().maximum)
timestep_wrapped = wrapped_env.step(
np.ones_like(unwrapped_env.action_spec().maximum))
for key in timestep_unwrapped.observation.keys():
np.testing.assert_allclose(
timestep_unwrapped.observation[key],
timestep_wrapped.observation[key])
assert np.allclose(timestep_wrapped.reward, timestep_unwrapped.reward)
| true | true |
f7f4f8349884d051b5fc797ef4af297ac7fe74ca | 2,038 | py | Python | docs/conf.py | abourget/kombu | ed67ff19660d43cb91274286b03254cc0160b6c3 | [
"BSD-3-Clause"
] | 2 | 2017-06-02T10:18:09.000Z | 2019-06-14T08:48:02.000Z | docs/conf.py | abourget/kombu | ed67ff19660d43cb91274286b03254cc0160b6c3 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | abourget/kombu | ed67ff19660d43cb91274286b03254cc0160b6c3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.join(os.pardir, "tests"))
import kombu
# General configuration
# ---------------------
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kombu'
copyright = u'2009-2010, Ask Solem'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(map(str, kombu.VERSION[0:2]))
# The full version, including alpha/beta/rc tags.
release = kombu.__version__
exclude_trees = ['.build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
latex_documents = [
('index', 'Kombu.tex', ur'Kombu Documentation',
ur'Ask Solem', 'manual'),
]
html_theme = "celery"
html_theme_path = ["_theme"]
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html'],
}
| 28.305556 | 78 | 0.715407 |
import sys
import os
sys.path.append(os.path.join(os.pardir, "tests"))
import kombu
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage']
templates_path = ['.templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Kombu'
copyright = u'2009-2010, Ask Solem'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(map(str, kombu.VERSION[0:2]))
# The full version, including alpha/beta/rc tags.
release = kombu.__version__
exclude_trees = ['.build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
latex_documents = [
('index', 'Kombu.tex', ur'Kombu Documentation',
ur'Ask Solem', 'manual'),
]
html_theme = "celery"
html_theme_path = ["_theme"]
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html'],
}
| false | true |
f7f4f92d1525b94014a4caa28e4fb516442ce3cb | 523 | py | Python | Models/SoundModel.py | xuhaoteoh/car-sound-classification-with-keras | 7c71c6e8b200aac24da78462b2820baceec9e087 | [
"MIT"
] | null | null | null | Models/SoundModel.py | xuhaoteoh/car-sound-classification-with-keras | 7c71c6e8b200aac24da78462b2820baceec9e087 | [
"MIT"
] | null | null | null | Models/SoundModel.py | xuhaoteoh/car-sound-classification-with-keras | 7c71c6e8b200aac24da78462b2820baceec9e087 | [
"MIT"
] | null | null | null | import librosa
import os
class SoundModel:
def split_file(self, file, output_file, duration=0.4):
if os.path.isfile(file):
os.system(
"ffmpeg -i " + file + " -f segment -segment_time " + str(
duration) + " -c copy " + output_file)
else:
print(" *** File", file, "does not exist. Skipping.")
return
def load_file(self, file):
# loading as mono
data, sr = librosa.load(file, sr=None)
return data, sr
| 26.15 | 73 | 0.529637 | import librosa
import os
class SoundModel:
def split_file(self, file, output_file, duration=0.4):
if os.path.isfile(file):
os.system(
"ffmpeg -i " + file + " -f segment -segment_time " + str(
duration) + " -c copy " + output_file)
else:
print(" *** File", file, "does not exist. Skipping.")
return
def load_file(self, file):
data, sr = librosa.load(file, sr=None)
return data, sr
| true | true |
f7f4f9a6cc861bdd92c5e8337e3919b013fcee67 | 295 | py | Python | source/likelihood/__init__.py | nayyarv/PyCudaIntro | 9bf445633d697a15d5fefd79a185724b3f32ce39 | [
"MIT"
] | 1 | 2018-07-26T11:30:55.000Z | 2018-07-26T11:30:55.000Z | source/likelihood/__init__.py | nayyarv/PyCudaIntro | 9bf445633d697a15d5fefd79a185724b3f32ce39 | [
"MIT"
] | null | null | null | source/likelihood/__init__.py | nayyarv/PyCudaIntro | 9bf445633d697a15d5fefd79a185724b3f32ce39 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Varun Nayyar <nayyarv@gmail.com>"
from .simple import SingleCoreLL, SingleCoreLLFast
try:
from .scikitLL import ScikitLL
except ImportError:
pass
try:
from .cudaLL import GPU_LL as GPULL
except ImportError:
pass
| 16.388889 | 50 | 0.708475 |
__author__ = "Varun Nayyar <nayyarv@gmail.com>"
from .simple import SingleCoreLL, SingleCoreLLFast
try:
from .scikitLL import ScikitLL
except ImportError:
pass
try:
from .cudaLL import GPU_LL as GPULL
except ImportError:
pass
| true | true |
f7f4f9c2c66e3048b4ff753a5631c61494cefd84 | 8,389 | py | Python | petridish/analysis/old/model_analysis.py | Bhaskers-Blu-Org2/petridishnn | bf800c695a7f0774106968a0fadc5150074269ad | [
"MIT"
] | 121 | 2019-06-04T08:30:53.000Z | 2021-12-17T13:27:54.000Z | petridish/analysis/old/model_analysis.py | arita37/petridishnn | bf800c695a7f0774106968a0fadc5150074269ad | [
"MIT"
] | 1 | 2019-11-21T04:29:09.000Z | 2019-11-21T04:29:09.000Z | petridish/analysis/old/model_analysis.py | arita37/petridishnn | bf800c695a7f0774106968a0fadc5150074269ad | [
"MIT"
] | 22 | 2019-10-10T15:35:47.000Z | 2021-09-13T12:46:09.000Z | import numpy as np
import re
import os
import bisect
from petridish.utils.geometry import _convex_hull_from_points
from functools import partial
import copy
import subprocess
from tensorpack.utils.serialize import loads, dumps
from petridish.analysis.old.common import (
img_dir, ann_models_logs, experiment_list_fn, exp_dir_to_eidx,
for_cust_exps, for_trial_stdouts, cust_exps_str_to_list,
ExperimentRecord, cache_dir, filter_xy)
INCLUDE_AUX_MA = False
REQUIRED_EPOCH = 500
FORCE_LOAD = False
def multi_add_from_log(log_fn):
multi_add = 0.0
n_params = -1.0
with open(log_fn, 'rt') as fin:
for line in fin:
reret = re.search(r'(.*)multi-add.* ([0-9\.]*)$', line.strip())
if reret:
try:
prefix_reret = re.search(r'aux_preprocess', reret.group(1))
if not prefix_reret or INCLUDE_AUX_MA:
multi_add += float(reret.group(2))
continue
except:
pass
reret = re.search(r'#params=([0-9]*),', line)
if reret:
n_params = float(reret.group(1))
break
return multi_add, n_params
def val_err_from_log(log_fn):
def tail(f, n):
cmd = "egrep \"Epoch ([0-9]*)|val_err: ([0-9\\.]*)$\" {}".format(f)
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE)
lines = proc.stdout.readlines()
return [line.decode('utf-8') for line in lines]
lines = tail(log_fn, 100)
min_ve_epoch = -1
epoch = -1
min_ve = 2.0
for line in lines:
reret = re.search(r'Epoch ([0-9]*)|val_err: ([0-9\.]*)', line)
if reret:
if reret.group(1) is not None:
try:
new_epoch = int(reret.group(1))
if min_ve_epoch == -1 and min_ve < 1.0:
min_ve_epoch = new_epoch - 1
epoch = new_epoch
except:
pass
elif reret.group(2) is not None:
try:
ve = float(reret.group(2))
if ve <= min_ve:
min_ve = ve
min_ve_epoch = epoch
except:
pass
return min_ve, min_ve_epoch
def perf_from_log(log_fn):
"""
Args:
log_fn : a stdout file xxx/stdout/triali/stdout.txt
"""
dn = os.path.dirname(log_fn)
cache_fn = dn.replace('/', '__')
cache_fn = os.path.join(cache_dir, cache_fn)
if os.path.exists(cache_fn):
with open(cache_fn, 'rb') as fin:
ss = fin.read()
try:
ret = loads(ss)
except:
pass
if ret and not FORCE_LOAD:
return ret
if os.path.exists(log_fn):
min_ve, min_ve_epoch = val_err_from_log(log_fn)
multi_add, n_params = multi_add_from_log(log_fn)
ret = (min_ve, multi_add * 2. * 1e-9, min_ve_epoch)
with open(cache_fn, 'wb') as fout:
fout.write(dumps(ret))
return ret
else:
return 2.0, -1.0, -1
def init_state_for_model_perf():
# min_ve, multi_add, epoch when min_ve
return []
def func_stdout_for_model_perf(log_fn, state, context):
if context is not None:
record = copy.deepcopy(context)
else:
context = ExperimentRecord()
ve, fp, ep = perf_from_log(log_fn)
record.set_new(ve=ve, fp=fp, ep=ep)
if ve < 1.0:
print(record)
state.append(record)
return state
def merge_state(state, required_epoch=REQUIRED_EPOCH):
state = [x for x in state if x.ep > required_epoch]
state.sort(key=lambda x : x.eidx)
cnt = 0.0
avg_ve = 0.0
min_ve = 2.0
ret = dict()
for idx, x in enumerate(state):
avg_ve += x.ve
min_ve = min(min_ve, x.ve)
cnt += 1.0
if idx == len(state) - 1 or x.eidx != state[idx+1].eidx:
avg_ve /= cnt + int(cnt < 1)
ret[x.eidx] = (x.fp, avg_ve, min_ve)
cnt = 0.0
avg_ve = 0.0
min_ve = 2.0
return ret
def amoeba_A_scatter_xys():
def xys_to_xs_ys(xys, name):
return xys[::2], xys[1::2], name
amoeba_a_xys = xys_to_xs_ys(
[
0.8243604811532328, 3.717231330127244,
0.8561252293632995, 3.576010697624216,
0.9252184498039169, 3.4887507045800703,
0.9434581389491863, 3.4586750296823094,
0.9624620126404664, 3.6313001451135123
],
name='Amoeba-A')
amoeba_rl_xys = xys_to_xs_ys(
[
1.092913063813967, 3.527370087427893,
1.1475961526929952, 3.474680690308576,
1.169610712015639, 3.5061571303503114,
1.2213824160800164, 3.491012556516316,
1.2270612714821967, 3.566073420241536,
],
name='Amoeba-RL')
amoeba_rand_xys = xys_to_xs_ys(
[
0.950659606874303, 3.9451434944772914,
0.9871001283235192, 3.925532782461653,
0.9842045740738521, 3.94656104961443,
0.9486779079668519, 4.012716021251334,
0.9352593454301237, 4.012749601237663,
],
name='Amoeba-Rand')
return [amoeba_a_xys, amoeba_rl_xys, amoeba_rand_xys]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--find_min', default=False, action='store_true')
parser.add_argument('--stdout_fn', type=str, default=None)
parser.add_argument('--log_root', type=str, default=None)
parser.add_argument('--app_dir', type=str, default=None)
parser.add_argument('--cust_exps', type=str, default=None)
parser.add_argument('--include_aux_ma', default=False, action='store_true')
parser.add_argument('--force_load', default=False, action='store_true')
parser.add_argument('--required_epochs', default=REQUIRED_EPOCH, type=int)
parser.add_argument('--plot_scatter', default=False, action='store_true')
parser.add_argument('--plot_x_min', default=None, type=float)
parser.add_argument('--plot_x_max', default=None, type=float)
parser.add_argument('--plot_y_min', default=None, type=float)
parser.add_argument('--plot_y_max', default=None, type=float)
args = parser.parse_args()
args.cust_exps = cust_exps_str_to_list(args.cust_exps)
FORCE_LOAD = args.force_load
REQUIRED_EPOCH = args.required_epochs
INCLUDE_AUX_MA = args.include_aux_ma
if args.cust_exps:
func_exp_dir_for_model_perf = partial(
for_trial_stdouts,
func_stdout=func_stdout_for_model_perf
)
state = for_cust_exps(
args.cust_exps,
func_exp_dir_for_model_perf,
init_state_for_model_perf())
ret_dict = merge_state(state)
#
if args.plot_scatter:
import matplotlib.pyplot as plt
plt.close('all')
fig, ax = plt.subplots()
eindices = list(ret_dict.keys())
fp_idx, ve_idx, min_ve_idx = 0, 1, 2
xs = [ret_dict[eidx][fp_idx] for eidx in eindices]
y_multiplier = 100.
ys = [ret_dict[eidx][ve_idx] * y_multiplier for eidx in eindices]
remain_indices = filter_xy(xs, ys, args)
xs = [xs[i] for i in remain_indices]
ys = [ys[i] for i in remain_indices]
eindices = [eindices[i] for i in remain_indices]
ax.scatter(xs, ys, label='Ours')
for i, eidx in enumerate(eindices):
ax.annotate(str(eidx), (xs[i], ys[i]))
baselines = amoeba_A_scatter_xys()
for baseline in baselines:
_xs, _ys, _name = baseline
ax.scatter(_xs, _ys, label=_name)
plt.grid()
plt.xlabel('GFLOPS')
plt.ylabel('Test Error')
plt.legend()
plt.savefig(
os.path.join(
img_dir,
'cust_exps_{}_scatter.png'.format(
'_'.join(args.cust_exps))
),
dpi=plt.gcf().dpi, bbox_inches='tight'
)
print(ret_dict)
with open('./temp/model_analysis_ret.bin', 'wb') as fout:
fout.write(dumps(ret_dict))
| 31.656604 | 79 | 0.573847 | import numpy as np
import re
import os
import bisect
from petridish.utils.geometry import _convex_hull_from_points
from functools import partial
import copy
import subprocess
from tensorpack.utils.serialize import loads, dumps
from petridish.analysis.old.common import (
img_dir, ann_models_logs, experiment_list_fn, exp_dir_to_eidx,
for_cust_exps, for_trial_stdouts, cust_exps_str_to_list,
ExperimentRecord, cache_dir, filter_xy)
INCLUDE_AUX_MA = False
REQUIRED_EPOCH = 500
FORCE_LOAD = False
def multi_add_from_log(log_fn):
multi_add = 0.0
n_params = -1.0
with open(log_fn, 'rt') as fin:
for line in fin:
reret = re.search(r'(.*)multi-add.* ([0-9\.]*)$', line.strip())
if reret:
try:
prefix_reret = re.search(r'aux_preprocess', reret.group(1))
if not prefix_reret or INCLUDE_AUX_MA:
multi_add += float(reret.group(2))
continue
except:
pass
reret = re.search(r'#params=([0-9]*),', line)
if reret:
n_params = float(reret.group(1))
break
return multi_add, n_params
def val_err_from_log(log_fn):
def tail(f, n):
cmd = "egrep \"Epoch ([0-9]*)|val_err: ([0-9\\.]*)$\" {}".format(f)
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE)
lines = proc.stdout.readlines()
return [line.decode('utf-8') for line in lines]
lines = tail(log_fn, 100)
min_ve_epoch = -1
epoch = -1
min_ve = 2.0
for line in lines:
reret = re.search(r'Epoch ([0-9]*)|val_err: ([0-9\.]*)', line)
if reret:
if reret.group(1) is not None:
try:
new_epoch = int(reret.group(1))
if min_ve_epoch == -1 and min_ve < 1.0:
min_ve_epoch = new_epoch - 1
epoch = new_epoch
except:
pass
elif reret.group(2) is not None:
try:
ve = float(reret.group(2))
if ve <= min_ve:
min_ve = ve
min_ve_epoch = epoch
except:
pass
return min_ve, min_ve_epoch
def perf_from_log(log_fn):
dn = os.path.dirname(log_fn)
cache_fn = dn.replace('/', '__')
cache_fn = os.path.join(cache_dir, cache_fn)
if os.path.exists(cache_fn):
with open(cache_fn, 'rb') as fin:
ss = fin.read()
try:
ret = loads(ss)
except:
pass
if ret and not FORCE_LOAD:
return ret
if os.path.exists(log_fn):
min_ve, min_ve_epoch = val_err_from_log(log_fn)
multi_add, n_params = multi_add_from_log(log_fn)
ret = (min_ve, multi_add * 2. * 1e-9, min_ve_epoch)
with open(cache_fn, 'wb') as fout:
fout.write(dumps(ret))
return ret
else:
return 2.0, -1.0, -1
def init_state_for_model_perf():
return []
def func_stdout_for_model_perf(log_fn, state, context):
if context is not None:
record = copy.deepcopy(context)
else:
context = ExperimentRecord()
ve, fp, ep = perf_from_log(log_fn)
record.set_new(ve=ve, fp=fp, ep=ep)
if ve < 1.0:
print(record)
state.append(record)
return state
def merge_state(state, required_epoch=REQUIRED_EPOCH):
state = [x for x in state if x.ep > required_epoch]
state.sort(key=lambda x : x.eidx)
cnt = 0.0
avg_ve = 0.0
min_ve = 2.0
ret = dict()
for idx, x in enumerate(state):
avg_ve += x.ve
min_ve = min(min_ve, x.ve)
cnt += 1.0
if idx == len(state) - 1 or x.eidx != state[idx+1].eidx:
avg_ve /= cnt + int(cnt < 1)
ret[x.eidx] = (x.fp, avg_ve, min_ve)
cnt = 0.0
avg_ve = 0.0
min_ve = 2.0
return ret
def amoeba_A_scatter_xys():
def xys_to_xs_ys(xys, name):
return xys[::2], xys[1::2], name
amoeba_a_xys = xys_to_xs_ys(
[
0.8243604811532328, 3.717231330127244,
0.8561252293632995, 3.576010697624216,
0.9252184498039169, 3.4887507045800703,
0.9434581389491863, 3.4586750296823094,
0.9624620126404664, 3.6313001451135123
],
name='Amoeba-A')
amoeba_rl_xys = xys_to_xs_ys(
[
1.092913063813967, 3.527370087427893,
1.1475961526929952, 3.474680690308576,
1.169610712015639, 3.5061571303503114,
1.2213824160800164, 3.491012556516316,
1.2270612714821967, 3.566073420241536,
],
name='Amoeba-RL')
amoeba_rand_xys = xys_to_xs_ys(
[
0.950659606874303, 3.9451434944772914,
0.9871001283235192, 3.925532782461653,
0.9842045740738521, 3.94656104961443,
0.9486779079668519, 4.012716021251334,
0.9352593454301237, 4.012749601237663,
],
name='Amoeba-Rand')
return [amoeba_a_xys, amoeba_rl_xys, amoeba_rand_xys]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--find_min', default=False, action='store_true')
parser.add_argument('--stdout_fn', type=str, default=None)
parser.add_argument('--log_root', type=str, default=None)
parser.add_argument('--app_dir', type=str, default=None)
parser.add_argument('--cust_exps', type=str, default=None)
parser.add_argument('--include_aux_ma', default=False, action='store_true')
parser.add_argument('--force_load', default=False, action='store_true')
parser.add_argument('--required_epochs', default=REQUIRED_EPOCH, type=int)
parser.add_argument('--plot_scatter', default=False, action='store_true')
parser.add_argument('--plot_x_min', default=None, type=float)
parser.add_argument('--plot_x_max', default=None, type=float)
parser.add_argument('--plot_y_min', default=None, type=float)
parser.add_argument('--plot_y_max', default=None, type=float)
args = parser.parse_args()
args.cust_exps = cust_exps_str_to_list(args.cust_exps)
FORCE_LOAD = args.force_load
REQUIRED_EPOCH = args.required_epochs
INCLUDE_AUX_MA = args.include_aux_ma
if args.cust_exps:
func_exp_dir_for_model_perf = partial(
for_trial_stdouts,
func_stdout=func_stdout_for_model_perf
)
state = for_cust_exps(
args.cust_exps,
func_exp_dir_for_model_perf,
init_state_for_model_perf())
ret_dict = merge_state(state)
if args.plot_scatter:
import matplotlib.pyplot as plt
plt.close('all')
fig, ax = plt.subplots()
eindices = list(ret_dict.keys())
fp_idx, ve_idx, min_ve_idx = 0, 1, 2
xs = [ret_dict[eidx][fp_idx] for eidx in eindices]
y_multiplier = 100.
ys = [ret_dict[eidx][ve_idx] * y_multiplier for eidx in eindices]
remain_indices = filter_xy(xs, ys, args)
xs = [xs[i] for i in remain_indices]
ys = [ys[i] for i in remain_indices]
eindices = [eindices[i] for i in remain_indices]
ax.scatter(xs, ys, label='Ours')
for i, eidx in enumerate(eindices):
ax.annotate(str(eidx), (xs[i], ys[i]))
baselines = amoeba_A_scatter_xys()
for baseline in baselines:
_xs, _ys, _name = baseline
ax.scatter(_xs, _ys, label=_name)
plt.grid()
plt.xlabel('GFLOPS')
plt.ylabel('Test Error')
plt.legend()
plt.savefig(
os.path.join(
img_dir,
'cust_exps_{}_scatter.png'.format(
'_'.join(args.cust_exps))
),
dpi=plt.gcf().dpi, bbox_inches='tight'
)
print(ret_dict)
with open('./temp/model_analysis_ret.bin', 'wb') as fout:
fout.write(dumps(ret_dict))
| true | true |
f7f4fb118a6e17906f547c5b09a169363941b7c3 | 442 | py | Python | test_fizzbuzz.py | simplymanas/python-learning | 75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0 | [
"Apache-2.0"
] | 4 | 2020-08-18T05:29:38.000Z | 2021-03-13T19:01:10.000Z | test_fizzbuzz.py | simplymanas/python-learning | 75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0 | [
"Apache-2.0"
] | null | null | null | test_fizzbuzz.py | simplymanas/python-learning | 75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0 | [
"Apache-2.0"
] | 1 | 2020-08-29T12:57:17.000Z | 2020-08-29T12:57:17.000Z | import unittest
from fizzbuzz import checkio
class TestFizzBuzz(unittest.TestCase):
def test_fizz(self):
for i in [3, 6, 9, 18]:
print('testing', i)
assert checkio(i) == 'Fizz'
def test_buzz(self):
for i in [5, 10, 50]:
print('testing', i)
assert checkio(i) == 'Buzz'
def test_fizzbuzz(self):
for i in [15, 30, 75]:
print('testing', i)
assert checkio(i) == 'FizzBuzz'
if __name__ == '__main__':
unittest.main()
| 21.047619 | 38 | 0.644796 | import unittest
from fizzbuzz import checkio
class TestFizzBuzz(unittest.TestCase):
def test_fizz(self):
for i in [3, 6, 9, 18]:
print('testing', i)
assert checkio(i) == 'Fizz'
def test_buzz(self):
for i in [5, 10, 50]:
print('testing', i)
assert checkio(i) == 'Buzz'
def test_fizzbuzz(self):
for i in [15, 30, 75]:
print('testing', i)
assert checkio(i) == 'FizzBuzz'
if __name__ == '__main__':
unittest.main()
| true | true |
f7f4fc795a2634de761a6b017cc1ad7ceeda7391 | 1,301 | py | Python | mandalka/__init__.py | squirrelinhell/mandalka | e731fa9d6e77898e8da780eeed02271f79b8577a | [
"Unlicense",
"MIT"
] | null | null | null | mandalka/__init__.py | squirrelinhell/mandalka | e731fa9d6e77898e8da780eeed02271f79b8577a | [
"Unlicense",
"MIT"
] | null | null | null | mandalka/__init__.py | squirrelinhell/mandalka | e731fa9d6e77898e8da780eeed02271f79b8577a | [
"Unlicense",
"MIT"
] | null | null | null |
# Copyright (c) 2017 SquirrelInHell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
version = (2, 16)
from .node import (
node,
is_node,
unique_id,
touch,
evaluate,
describe,
arguments,
inputs,
config,
lazy,
)
from .threads import (
threads,
)
| 32.525 | 80 | 0.744043 |
version = (2, 16)
from .node import (
node,
is_node,
unique_id,
touch,
evaluate,
describe,
arguments,
inputs,
config,
lazy,
)
from .threads import (
threads,
)
| true | true |
f7f4ff92537a9bbc306c181b9974b2e55f840907 | 100,621 | py | Python | certbot-apache/certbot_apache/configurator.py | Raklyon/certbot | f5b23361bd3691b5480bee648a3afdb43060dd92 | [
"Apache-2.0"
] | 4 | 2020-04-09T21:57:23.000Z | 2020-04-11T13:26:54.000Z | certbot-apache/certbot_apache/configurator.py | hanxiong123/certbot | cf29e89366c7eb57dd48a99a06fe05ceaa9057fa | [
"Apache-2.0"
] | 32 | 2019-02-20T14:51:48.000Z | 2019-02-27T10:11:34.000Z | certbot-apache/certbot_apache/configurator.py | hanxiong123/certbot | cf29e89366c7eb57dd48a99a06fe05ceaa9057fa | [
"Apache-2.0"
] | null | null | null | """Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import copy
import fnmatch
import logging
import os
import pkg_resources
import re
import six
import socket
import time
import zope.component
import zope.interface
from acme import challenges
from acme.magic_typing import Any, DefaultDict, Dict, List, Set, Union # pylint: disable=unused-import, no-name-in-module
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge # pylint: disable=unused-import
from certbot.plugins import common
from certbot.plugins.util import path_surgery
from certbot.plugins.enhancements import AutoHSTSEnhancement
from certbot_apache import apache_util
from certbot_apache import augeas_configurator
from certbot_apache import constants
from certbot_apache import display_ops
from certbot_apache import http_01
from certbot_apache import obj
from certbot_apache import parser
from certbot_apache import tls_sni_01
from collections import defaultdict
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
@zope.interface.implementer(interfaces.IAuthenticator, interfaces.IInstaller)
@zope.interface.provider(interfaces.IPluginFactory)
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~certbot.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~certbot_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~certbot_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
description = "Apache Web Server plugin"
if os.environ.get("CERTBOT_DOCS") == "1":
description += ( # pragma: no cover
" (Please note that the default values of the Apache plugin options"
" change depending on the operating system Certbot is run on.)"
)
OS_DEFAULTS = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/sites-available",
vhost_files="*",
logs_root="/var/log/apache2",
ctl="apache2ctl",
version_cmd=['apache2ctl', '-v'],
restart_cmd=['apache2ctl', 'graceful'],
conftest_cmd=['apache2ctl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_modules=False,
handle_sites=False,
challenge_location="/etc/apache2",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"certbot_apache", "options-ssl-apache.conf")
)
def option(self, key):
"""Get a value from options"""
return self.options.get(key)
def _prepare_options(self):
"""
Set the values possibly changed by command line parameters to
OS_DEFAULTS constant dictionary
"""
opts = ["enmod", "dismod", "le_vhost_ext", "server_root", "vhost_root",
"logs_root", "challenge_location", "handle_modules", "handle_sites",
"ctl"]
for o in opts:
# Config options use dashes instead of underscores
if self.conf(o.replace("_", "-")) is not None:
self.options[o] = self.conf(o.replace("_", "-"))
else:
self.options[o] = self.OS_DEFAULTS[o]
# Special cases
self.options["version_cmd"][0] = self.option("ctl")
self.options["restart_cmd"][0] = self.option("ctl")
self.options["conftest_cmd"][0] = self.option("ctl")
@classmethod
def add_parser_arguments(cls, add):
# When adding, modifying or deleting command line arguments, be sure to
# include the changes in the list used in method _prepare_options() to
# ensure consistent behavior.
# Respect CERTBOT_DOCS environment variable and use default values from
# base class regardless of the underlying distribution (overrides).
if os.environ.get("CERTBOT_DOCS") == "1":
DEFAULTS = ApacheConfigurator.OS_DEFAULTS
else:
# cls.OS_DEFAULTS can be distribution specific, see override classes
DEFAULTS = cls.OS_DEFAULTS
add("enmod", default=DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary")
add("dismod", default=DEFAULTS["dismod"],
help="Path to the Apache 'a2dismod' binary")
add("le-vhost-ext", default=DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension")
add("server-root", default=DEFAULTS["server_root"],
help="Apache server root directory")
add("vhost-root", default=None,
help="Apache server VirtualHost configuration root")
add("logs-root", default=DEFAULTS["logs_root"],
help="Apache server logs directory")
add("challenge-location",
default=DEFAULTS["challenge_location"],
help="Directory path for challenge configuration")
add("handle-modules", default=DEFAULTS["handle_modules"],
help="Let installer handle enabling required modules for you " +
"(Only Ubuntu/Debian currently)")
add("handle-sites", default=DEFAULTS["handle_sites"],
help="Let installer handle enabling sites for you " +
"(Only Ubuntu/Debian currently)")
add("ctl", default=DEFAULTS["ctl"],
help="Full path to Apache control script")
util.add_deprecated_argument(
add, argument_name="init-script", nargs=1)
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict() # type: Dict[str, obj.VirtualHost]
# Outstanding challenges
self._chall_out = set() # type: Set[KeyAuthorizationAnnotatedChallenge]
# List of vhosts configured per wildcard domain on this run.
# used by deploy_cert() and enhance()
self._wildcard_vhosts = dict() # type: Dict[str, List[obj.VirtualHost]]
# Maps enhancements to vhosts we've enabled the enhancement for
self._enhanced_vhosts = defaultdict(set) # type: DefaultDict[str, Set[obj.VirtualHost]]
# Temporary state for AutoHSTS enhancement
self._autohsts = {} # type: Dict[str, Dict[str, Union[int, float]]]
# These will be set in the prepare function
self._prepared = False
self.parser = None
self.version = version
self.vhosts = None
self.options = copy.deepcopy(self.OS_DEFAULTS)
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header,
"staple-ocsp": self._enable_ocsp_stapling}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir,
constants.MOD_SSL_CONF_DEST)
@property
def updated_mod_ssl_conf_digest(self):
"""Full absolute path to digest of updated SSL configuration file."""
return os.path.join(self.config.config_dir, constants.UPDATED_MOD_SSL_CONF_DIGEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Perform the actual Augeas initialization to be able to react
try:
self.init_augeas()
except ImportError:
raise errors.NoInstallationError("Problem in Augeas installation")
self._prepare_options()
# Verify Apache is installed
self._verify_exe_availability(self.option("ctl"))
# Make sure configuration is valid
self.config_test()
# Set Version
if self.version is None:
self.version = self.get_version()
logger.debug('Apache version is %s',
'.'.join(str(i) for i in self.version))
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
if not self._check_aug_version():
raise errors.NotSupportedError(
"Apache plugin support requires libaugeas0 and augeas-lenses "
"version 1.2.0 or higher, please make sure you have you have "
"those installed.")
self.parser = self.get_parser()
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
self.install_ssl_options_conf(self.mod_ssl_conf,
self.updated_mod_ssl_conf_digest)
# Prevent two Apache plugins from modifying a config at once
try:
util.lock_dir_until_exit(self.option("server_root"))
except (OSError, errors.LockError):
logger.debug("Encountered error:", exc_info=True)
raise errors.PluginError(
"Unable to lock %s", self.option("server_root"))
self._prepared = True
def _verify_exe_availability(self, exe):
"""Checks availability of Apache executable"""
if not util.exe_exists(exe):
if not path_surgery(exe):
raise errors.NoInstallationError(
'Cannot find Apache executable {0}'.format(exe))
def _check_aug_version(self):
""" Checks that we have recent enough version of libaugeas.
If augeas version is recent enough, it will support case insensitive
regexp matching"""
self.aug.set("/test/path/testing/arg", "aRgUMeNT")
try:
matches = self.aug.match(
"/test//*[self::arg=~regexp('argument', 'i')]")
except RuntimeError:
self.aug.remove("/test/path")
return False
self.aug.remove("/test/path")
return matches
def get_parser(self):
"""Initializes the ApacheParser"""
# If user provided vhost_root value in command line, use it
return parser.ApacheParser(
self.aug, self.option("server_root"), self.conf("vhost-root"),
self.version, configurator=self)
def _wildcard_domain(self, domain):
"""
Checks if domain is a wildcard domain
:param str domain: Domain to check
:returns: If the domain is wildcard domain
:rtype: bool
"""
if isinstance(domain, six.text_type):
wildcard_marker = u"*."
else:
wildcard_marker = b"*."
return domain.startswith(wildcard_marker)
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the certificate
in the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies
that it has located the three directives and finally modifies them
to point to the correct destination. After the certificate is
installed, the VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within certbot though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhosts = self.choose_vhosts(domain)
for vhost in vhosts:
self._deploy_cert(vhost, cert_path, key_path, chain_path, fullchain_path)
def choose_vhosts(self, domain, create_if_no_ssl=True):
"""
Finds VirtualHosts that can be used with the provided domain
:param str domain: Domain name to match VirtualHosts to
:param bool create_if_no_ssl: If found VirtualHost doesn't have a HTTPS
counterpart, should one get created
:returns: List of VirtualHosts or None
:rtype: `list` of :class:`~certbot_apache.obj.VirtualHost`
"""
if self._wildcard_domain(domain):
if domain in self._wildcard_vhosts:
# Vhosts for a wildcard domain were already selected
return self._wildcard_vhosts[domain]
# Ask user which VHosts to support.
# Returned objects are guaranteed to be ssl vhosts
return self._choose_vhosts_wildcard(domain, create_if_no_ssl)
else:
return [self.choose_vhost(domain, create_if_no_ssl)]
def _vhosts_for_wildcard(self, domain):
"""
Get VHost objects for every VirtualHost that the user wants to handle
with the wildcard certificate.
"""
# Collect all vhosts that match the name
matched = set()
for vhost in self.vhosts:
for name in vhost.get_names():
if self._in_wildcard_scope(name, domain):
matched.add(vhost)
return list(matched)
def _in_wildcard_scope(self, name, domain):
"""
Helper method for _vhosts_for_wildcard() that makes sure that the domain
is in the scope of wildcard domain.
eg. in scope: domain = *.wild.card, name = 1.wild.card
not in scope: domain = *.wild.card, name = 1.2.wild.card
"""
if len(name.split(".")) == len(domain.split(".")):
return fnmatch.fnmatch(name, domain)
def _choose_vhosts_wildcard(self, domain, create_ssl=True):
"""Prompts user to choose vhosts to install a wildcard certificate for"""
# Get all vhosts that are covered by the wildcard domain
vhosts = self._vhosts_for_wildcard(domain)
# Go through the vhosts, making sure that we cover all the names
# present, but preferring the SSL vhosts
filtered_vhosts = dict()
for vhost in vhosts:
for name in vhost.get_names():
if vhost.ssl:
# Always prefer SSL vhosts
filtered_vhosts[name] = vhost
elif name not in filtered_vhosts and create_ssl:
# Add if not in list previously
filtered_vhosts[name] = vhost
# Only unique VHost objects
dialog_input = set([vhost for vhost in filtered_vhosts.values()])
# Ask the user which of names to enable, expect list of names back
dialog_output = display_ops.select_vhost_multiple(list(dialog_input))
if not dialog_output:
logger.error(
"No vhost exists with servername or alias for domain %s. "
"No vhost was selected. Please specify ServerName or ServerAlias "
"in the Apache config.",
domain)
raise errors.PluginError("No vhost selected")
# Make sure we create SSL vhosts for the ones that are HTTP only
# if requested.
return_vhosts = list()
for vhost in dialog_output:
if not vhost.ssl:
return_vhosts.append(self.make_vhost_ssl(vhost))
else:
return_vhosts.append(vhost)
self._wildcard_vhosts[domain] = return_vhosts
return return_vhosts
def _deploy_cert(self, vhost, cert_path, key_path, chain_path, fullchain_path):
"""
Helper function for deploy_cert() that handles the actual deployment
this exists because we might want to do multiple deployments per
domain originally passed for deploy_cert(). This is especially true
with wildcard certificates
"""
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
# Add directives and remove duplicates
self._add_dummy_ssl_directives(vhost.path)
self._clean_vhost(vhost)
path = {"cert_path": self.parser.find_dir("SSLCertificateFile",
None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile",
None, vhost.path)}
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
# Handle errors when certificate/key directives cannot be found
if not path["cert_path"]:
logger.warning(
"Cannot find an SSLCertificateFile directive in %s. "
"VirtualHost was not modified", vhost.path)
raise errors.PluginError(
"Unable to find an SSLCertificateFile directive")
elif not path["cert_key"]:
logger.warning(
"Cannot find an SSLCertificateKeyFile directive for "
"certificate in %s. VirtualHost was not modified", vhost.path)
raise errors.PluginError(
"Unable to find an SSLCertificateKeyFile directive for "
"certificate")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your "
"version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path "
"option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Enable the new vhost if needed
if not vhost.enabled:
self.enable_site(vhost)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
def choose_vhost(self, target_name, create_if_no_ssl=True):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
The returned vhost is guaranteed to have TLS enabled unless
create_if_no_ssl is set to False, in which case there is no such guarantee
and the result is not cached.
:param str target_name: domain name
:param bool create_if_no_ssl: If found VirtualHost doesn't have a HTTPS
counterpart, should one get created
:returns: vhost associated with name
:rtype: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if not create_if_no_ssl:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
# Negate create_if_no_ssl value to indicate if we want a SSL vhost
# to get created if a non-ssl vhost is selected.
return self._choose_vhost_from_list(target_name, temp=not create_if_no_ssl)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of %s. "
"No vhost was selected. Please specify ServerName or ServerAlias "
"in the Apache config.",
target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for
vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
def domain_in_names(self, names, target_name):
"""Checks if target domain is covered by one or more of the provided
names. The target name is matched by wildcard as well as exact match.
:param names: server aliases
:type names: `collections.Iterable` of `str`
:param str target_name: name to compare with wildcards
:returns: True if target_name is covered by a wildcard,
otherwise, False
:rtype: bool
"""
# use lowercase strings because fnmatch can be case sensitive
target_name = target_name.lower()
for name in names:
name = name.lower()
# fnmatch treats "[seq]" specially and [ or ] characters aren't
# valid in Apache but Apache doesn't error out if they are present
if "[" not in name and fnmatch.fnmatch(target_name, name):
return True
return False
def find_best_http_vhost(self, target, filter_defaults, port="80"):
"""Returns non-HTTPS vhost objects found from the Apache config
:param str target: Domain name of the desired VirtualHost
:param bool filter_defaults: whether _default_ vhosts should be
included if it is the best match
:param str port: port number the vhost should be listening on
:returns: VirtualHost object that's the best match for target name
:rtype: `obj.VirtualHost` or None
"""
filtered_vhosts = []
for vhost in self.vhosts:
if any(a.is_wildcard() or a.get_port() == port for a in vhost.addrs) and not vhost.ssl:
filtered_vhosts.append(vhost)
return self._find_best_vhost(target, filtered_vhosts, filter_defaults)
def _find_best_vhost(self, target_name, vhosts=None, filter_defaults=True):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:param str target_name: domain handled by the desired vhost
:param vhosts: vhosts to consider
:type vhosts: `collections.Iterable` of :class:`~certbot_apache.obj.VirtualHost`
:param bool filter_defaults: whether a vhost with a _default_
addr is acceptable
:returns: VHost or None
"""
# Points 6 - Servername SSL
# Points 5 - Wildcard SSL
# Points 4 - Address name with SSL
# Points 3 - Servername no SSL
# Points 2 - Wildcard no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
if vhosts is None:
vhosts = self.vhosts
for vhost in vhosts:
if vhost.modmacro is True:
continue
names = vhost.get_names()
if target_name in names:
points = 3
elif self.domain_in_names(names, target_name):
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 3
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
if filter_defaults:
vhosts = self._non_default_vhosts(vhosts)
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self, vhosts):
"""Return all non _default_ only vhosts."""
return [vh for vh in vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set() # type: Set[str]
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)), force_interactive=True)
return util.get_filtered_names(all_names)
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _get_vhost_names(self, path):
"""Helper method for getting the ServerName and
ServerAlias values from vhost in path
:param path: Path to read ServerName and ServerAliases from
:returns: Tuple including ServerName and `list` of ServerAlias strings
"""
servername_match = self.parser.find_dir(
"ServerName", None, start=path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=path, exclude=False)
serveraliases = []
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
serveraliases.append(serveralias)
servername = None
if servername_match:
# Get last ServerName as each overwrites the previous
servername = self.parser.get_arg(servername_match[-1])
return (servername, serveraliases)
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~certbot_apache.obj.VirtualHost`
"""
servername, serveraliases = self._get_vhost_names(host.path)
for alias in serveraliases:
if not host.modmacro:
host.aliases.add(alias)
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~certbot_apache.obj.VirtualHost`
"""
addrs = set()
try:
args = self.aug.match(path + "/arg")
except RuntimeError:
logger.warning("Encountered a problem while parsing file: %s, skipping", path)
return None
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = apache_util.get_file_path(
self.aug.get("/augeas/files%s/path" % apache_util.get_file_path(path)))
if filename is None:
return None
macro = False
if "/macro/" in path.lower():
macro = True
vhost_enabled = self.parser.parsed_in_original(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
vhost_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~certbot_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search base config, and all included paths for VirtualHosts
file_paths = {} # type: Dict[str, str]
internal_paths = defaultdict(set) # type: DefaultDict[str, Set[str]]
vhs = []
# Make a list of parser paths because the parser_paths
# dictionary may be modified during the loop.
for vhost_path in list(self.parser.parser_paths):
paths = self.aug.match(
("/files%s//*[label()=~regexp('%s')]" %
(vhost_path, parser.case_i("VirtualHost"))))
paths = [path for path in paths if
"virtualhost" in os.path.basename(path).lower()]
for path in paths:
new_vhost = self._create_vhost(path)
if not new_vhost:
continue
internal_path = apache_util.get_internal_aug_path(new_vhost.path)
realpath = os.path.realpath(new_vhost.filep)
if realpath not in file_paths:
file_paths[realpath] = new_vhost.filep
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif (realpath == new_vhost.filep and
realpath != file_paths[realpath]):
# Prefer "real" vhost paths instead of symlinked ones
# ex: sites-enabled/vh.conf -> sites-available/vh.conf
# remove old (most likely) symlinked one
new_vhs = []
for v in vhs:
if v.filep == file_paths[realpath]:
internal_paths[realpath].remove(
apache_util.get_internal_aug_path(v.path))
else:
new_vhs.append(v)
vhs = new_vhs
file_paths[realpath] = realpath
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif internal_path not in internal_paths[realpath]:
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param certbot_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~certbot_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
self.prepare_https_modules(temp)
self.ensure_listen(port, https=True)
def ensure_listen(self, port, https=False):
"""Make sure that Apache is listening on the port. Checks if the
Listen statement for the port already exists, and adds it to the
configuration if necessary.
:param str port: Port number to check and add Listen for if not in
place already
:param bool https: If the port will be used for HTTPS
"""
# If HTTPS requested for nonstandard port, add service definition
if https and port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
listens = [self.parser.get_arg(x).split()[0] for
x in self.parser.find_dir("Listen")]
# Listen already in place
if self._has_port_already(listens, port):
return
listen_dirs = set(listens)
if not listens:
listen_dirs.add(port_service)
for listen in listens:
# For any listen statement, check if the machine also listens on
# the given port. If not, add such a listen statement.
if len(listen.split(":")) == 1:
# Its listening to all interfaces
if port not in listen_dirs and port_service not in listen_dirs:
listen_dirs.add(port_service)
else:
# The Listen statement specifies an ip
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port_service) not in listen_dirs and (
"%s:%s" % (ip, port_service) not in listen_dirs):
listen_dirs.add("%s:%s" % (ip, port_service))
if https:
self._add_listens_https(listen_dirs, listens, port)
else:
self._add_listens_http(listen_dirs, listens, port)
def _add_listens_http(self, listens, listens_orig, port):
"""Helper method for ensure_listen to figure out which new
listen statements need adding for listening HTTP on port
:param set listens: Set of all needed Listen statements
:param list listens_orig: List of existing listen statements
:param string port: Port number we're adding
"""
new_listens = listens.difference(listens_orig)
if port in new_listens:
# We have wildcard, skip the rest
self.parser.add_dir(parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
else:
for listen in new_listens:
self.parser.add_dir(parser.get_aug_path(
self.parser.loc["listen"]), "Listen", listen.split(" "))
self.save_notes += ("Added Listen %s directive to "
"%s\n") % (listen,
self.parser.loc["listen"])
def _add_listens_https(self, listens, listens_orig, port):
"""Helper method for ensure_listen to figure out which new
listen statements need adding for listening HTTPS on port
:param set listens: Set of all needed Listen statements
:param list listens_orig: List of existing listen statements
:param string port: Port number we're adding
"""
# Add service definition for non-standard ports
if port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
new_listens = listens.difference(listens_orig)
if port in new_listens or port_service in new_listens:
# We have wildcard, skip the rest
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port_service.split(" "))
self.save_notes += "Added Listen %s directive to %s\n" % (
port_service, self.parser.loc["listen"])
else:
for listen in new_listens:
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", listen.split(" "))
self.save_notes += ("Added Listen %s directive to "
"%s\n") % (listen,
self.parser.loc["listen"])
def _has_port_already(self, listens, port):
"""Helper method for prepare_server_https to find out if user
already has an active Listen statement for the port we need
:param list listens: List of listen variables
:param string port: Port in question
"""
if port in listens:
return True
# Check if Apache is already listening on a specific IP
for listen in listens:
if len(listen.split(":")) > 1:
# Ugly but takes care of protocol def, eg: 1.1.1.1:443 https
if listen.split(":")[-1].split(" ")[0] == port:
return True
def prepare_https_modules(self, temp):
"""Helper method for prepare_server_https, taking care of enabling
needed modules
:param boolean temp: If the change is temporary
"""
if self.option("handle_modules"):
if self.version >= (2, 4) and ("socache_shmcb_module" not in
self.parser.modules):
self.enable_mod("socache_shmcb", temp=temp)
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~certbot_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``self.option("le_vhost_ext")``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
orig_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
self._copy_create_ssl_vhost_skeleton(nonssl_vhost, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
new_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
# The vhost was not found on the currently parsed paths
# Make Augeas aware of the new vhost
self.parser.parse_file(ssl_fp)
# Try to search again
new_matches = self.aug.match(
"/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
raise errors.PluginError(
"Could not reverse map the HTTPS VirtualHost to the original")
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
ssl_vhost.ancestor = nonssl_vhost
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_new_vh_path(self, orig_matches, new_matches):
""" Helper method for make_vhost_ssl for matching augeas paths. Returns
VirtualHost path from new_matches that's not present in orig_matches.
Paths are normalized, because augeas leaves indices out for paths
with only single directive with a similar key """
orig_matches = [i.replace("[1]", "") for i in orig_matches]
for match in new_matches:
if match.replace("[1]", "") not in orig_matches:
# Return the unmodified path
return match
return None
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
""" Get a file path for SSL vhost, uses user defined path as priority,
but if the value is invalid or not defined, will fall back to non-ssl
vhost filepath.
:param str non_ssl_vh_fp: Filepath of non-SSL vhost
:returns: Filepath for SSL vhost
:rtype: str
"""
if self.conf("vhost-root") and os.path.exists(self.conf("vhost-root")):
fp = os.path.join(os.path.realpath(self.option("vhost_root")),
os.path.basename(non_ssl_vh_fp))
else:
# Use non-ssl filepath
fp = os.path.realpath(non_ssl_vh_fp)
if fp.endswith(".conf"):
return fp[:-(len(".conf"))] + self.option("le_vhost_ext")
else:
return fp + self.option("le_vhost_ext")
def _sift_rewrite_rule(self, line):
"""Decides whether a line should be copied to a SSL vhost.
A canonical example of when sifting a line is required:
When the http vhost contains a RewriteRule that unconditionally
redirects any request to the https version of the same site.
e.g:
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [L,QSA,R=permanent]
Copying the above line to the ssl vhost would cause a
redirection loop.
:param str line: a line extracted from the http vhost.
:returns: True - don't copy line from http vhost to SSL vhost.
:rtype: bool
"""
if not line.lower().lstrip().startswith("rewriterule"):
return False
# According to: http://httpd.apache.org/docs/2.4/rewrite/flags.html
# The syntax of a RewriteRule is:
# RewriteRule pattern target [Flag1,Flag2,Flag3]
# i.e. target is required, so it must exist.
target = line.split()[2].strip()
# target may be surrounded with quotes
if target[0] in ("'", '"') and target[0] == target[-1]:
target = target[1:-1]
# Sift line if it redirects the request to a HTTPS site
return target.startswith("https://")
def _copy_create_ssl_vhost_skeleton(self, vhost, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param obj.VirtualHost vhost: Original VirtualHost object
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
if os.path.exists(ssl_fp):
notes = "Appended new VirtualHost directive to file %s" % ssl_fp
files = set()
files.add(ssl_fp)
self.reverter.add_to_checkpoint(files, notes)
else:
self.reverter.register_file_creation(False, ssl_fp)
sift = False
try:
orig_contents = self._get_vhost_block(vhost)
ssl_vh_contents, sift = self._sift_rewrite_rules(orig_contents)
with open(ssl_fp, "a") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
new_file.write("\n".join(ssl_vh_contents))
# The content does not include the closing tag, so add it
new_file.write("</VirtualHost>\n")
new_file.write("</IfModule>\n")
# Add new file to augeas paths if we're supposed to handle
# activation (it's not included as default)
if not self.parser.parsed_in_current(ssl_fp):
self.parser.parse_file(ssl_fp)
except IOError:
logger.critical("Error writing/reading to file in make_vhost_ssl", exc_info=True)
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
if sift:
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(
"Some rewrite rules copied from {0} were disabled in the "
"vhost for your HTTPS site located at {1} because they have "
"the potential to create redirection loops.".format(
vhost.filep, ssl_fp), reporter.MEDIUM_PRIORITY)
self.aug.set("/augeas/files%s/mtime" % (self._escape(ssl_fp)), "0")
self.aug.set("/augeas/files%s/mtime" % (self._escape(vhost.filep)), "0")
def _sift_rewrite_rules(self, contents):
""" Helper function for _copy_create_ssl_vhost_skeleton to prepare the
new HTTPS VirtualHost contents. Currently disabling the rewrites """
result = []
sift = False
contents = iter(contents)
comment = ("# Some rewrite rules in this file were "
"disabled on your HTTPS site,\n"
"# because they have the potential to create "
"redirection loops.\n")
for line in contents:
A = line.lower().lstrip().startswith("rewritecond")
B = line.lower().lstrip().startswith("rewriterule")
if not (A or B):
result.append(line)
continue
# A RewriteRule that doesn't need filtering
if B and not self._sift_rewrite_rule(line):
result.append(line)
continue
# A RewriteRule that does need filtering
if B and self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append("# " + line)
continue
# We save RewriteCond(s) and their corresponding
# RewriteRule in 'chunk'.
# We then decide whether we comment out the entire
# chunk based on its RewriteRule.
chunk = []
if A:
chunk.append(line)
line = next(contents)
# RewriteCond(s) must be followed by one RewriteRule
while not line.lower().lstrip().startswith("rewriterule"):
chunk.append(line)
line = next(contents)
# Now, current line must start with a RewriteRule
chunk.append(line)
if self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append('\n'.join(
['# ' + l for l in chunk]))
continue
else:
result.append('\n'.join(chunk))
continue
return result, sift
def _get_vhost_block(self, vhost):
""" Helper method to get VirtualHost contents from the original file.
This is done with help of augeas span, which returns the span start and
end positions
:returns: `list` of VirtualHost block content lines without closing tag
"""
try:
span_val = self.aug.span(vhost.path)
except ValueError:
logger.critical("Error while reading the VirtualHost %s from "
"file %s", vhost.name, vhost.filep, exc_info=True)
raise errors.PluginError("Unable to read VirtualHost from file")
span_filep = span_val[0]
span_start = span_val[5]
span_end = span_val[6]
with open(span_filep, 'r') as fh:
fh.seek(span_start)
vh_contents = fh.read(span_end-span_start).split("\n")
self._remove_closing_vhost_tag(vh_contents)
return vh_contents
def _remove_closing_vhost_tag(self, vh_contents):
"""Removes the closing VirtualHost tag if it exists.
This method modifies vh_contents directly to remove the closing
tag. If the closing vhost tag is found, everything on the line
after it is also removed. Whether or not this tag is included
in the result of span depends on the Augeas version.
:param list vh_contents: VirtualHost block contents to check
"""
for offset, line in enumerate(reversed(vh_contents)):
if line:
line_index = line.lower().find("</virtualhost>")
if line_index != -1:
content_index = len(vh_contents) - offset - 1
vh_contents[content_index] = line[:line_index]
break
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile",
"SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
# Only include the TLS configuration if not already included
existing_inc = self.parser.find_dir("Include", self.mod_ssl_conf, vh_path)
if not existing_inc:
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_servername_alias(self, target_name, vhost):
vh_path = vhost.path
sname, saliases = self._get_vhost_names(vh_path)
if target_name == sname or target_name in saliases:
return
if self._has_matching_wildcard(vh_path, target_name):
return
if not self.parser.find_dir("ServerName", None,
start=vh_path, exclude=False):
self.parser.add_dir(vh_path, "ServerName", target_name)
else:
self.parser.add_dir(vh_path, "ServerAlias", target_name)
self._add_servernames(vhost)
def _has_matching_wildcard(self, vh_path, target_name):
"""Is target_name already included in a wildcard in the vhost?
:param str vh_path: Augeas path to the vhost
:param str target_name: name to compare with wildcards
:returns: True if there is a wildcard covering target_name in
the vhost in vhost_path, otherwise, False
:rtype: bool
"""
matches = self.parser.find_dir(
"ServerAlias", start=vh_path, exclude=False)
aliases = (self.aug.get(match) for match in matches)
return self.domain_in_names(aliases, target_name)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
# In Apache 2.2, when a NameVirtualHost directive is not
# set, "*" and "_default_" will conflict when sharing a port
addrs = set((addr,))
if addr.get_addr() in ("*", "_default_"):
addrs.update(obj.Addr((a, addr.get_port(),))
for a in ("*", "_default_"))
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr in addrs for
test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
break
if need_to_save:
self.save()
def find_vhost_by_id(self, id_str):
"""
Searches through VirtualHosts and tries to match the id in a comment
:param str id_str: Id string for matching
:returns: The matched VirtualHost or None
:rtype: :class:`~certbot_apache.obj.VirtualHost` or None
:raises .errors.PluginError: If no VirtualHost is found
"""
for vh in self.vhosts:
if self._find_vhost_id(vh) == id_str:
return vh
msg = "No VirtualHost with ID {} was found.".format(id_str)
logger.warning(msg)
raise errors.PluginError(msg)
def _find_vhost_id(self, vhost):
"""Tries to find the unique ID from the VirtualHost comments. This is
used for keeping track of VirtualHost directive over time.
:param vhost: Virtual host to add the id
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: The unique ID or None
:rtype: str or None
"""
# Strip the {} off from the format string
search_comment = constants.MANAGED_COMMENT_ID.format("")
id_comment = self.parser.find_comments(search_comment, vhost.path)
if id_comment:
# Use the first value, multiple ones shouldn't exist
comment = self.parser.get_arg(id_comment[0])
return comment.split(" ")[-1]
return None
def add_vhost_id(self, vhost):
"""Adds an unique ID to the VirtualHost as a comment for mapping back
to it on later invocations, as the config file order might have changed.
If ID already exists, returns that instead.
:param vhost: Virtual host to add or find the id
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: The unique ID for vhost
:rtype: str or None
"""
vh_id = self._find_vhost_id(vhost)
if vh_id:
return vh_id
id_string = apache_util.unique_id()
comment = constants.MANAGED_COMMENT_ID.format(id_string)
self.parser.add_comment(vhost.path, comment)
return id_string
def _escape(self, fp):
fp = fp.replace(",", "\\,")
fp = fp.replace("[", "\\[")
fp = fp.replace("]", "\\]")
fp = fp.replace("|", "\\|")
fp = fp.replace("=", "\\=")
fp = fp.replace("(", "\\(")
fp = fp.replace(")", "\\)")
fp = fp.replace("!", "\\!")
return fp
######################################################################
# Enhancements
######################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect", "ensure-http-header", "staple-ocsp"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~certbot.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~certbot.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
matched_vhosts = self.choose_vhosts(domain, create_if_no_ssl=False)
# We should be handling only SSL vhosts for enhancements
vhosts = [vhost for vhost in matched_vhosts if vhost.ssl]
if not vhosts:
msg_tmpl = ("Certbot was not able to find SSL VirtualHost for a "
"domain {0} for enabling enhancement \"{1}\". The requested "
"enhancement was not configured.")
msg_enhancement = enhancement
if options:
msg_enhancement += ": " + options
msg = msg_tmpl.format(domain, msg_enhancement)
logger.warning(msg)
raise errors.PluginError(msg)
try:
for vhost in vhosts:
func(vhost, options)
except errors.PluginError:
logger.warning("Failed %s for %s", enhancement, domain)
raise
def _autohsts_increase(self, vhost, id_str, nextstep):
"""Increase the AutoHSTS max-age value
:param vhost: Virtual host object to modify
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:param str id_str: The unique ID string of VirtualHost
:param int nextstep: Next AutoHSTS max-age value index
"""
nextstep_value = constants.AUTOHSTS_STEPS[nextstep]
self._autohsts_write(vhost, nextstep_value)
self._autohsts[id_str] = {"laststep": nextstep, "timestamp": time.time()}
def _autohsts_write(self, vhost, nextstep_value):
"""
Write the new HSTS max-age value to the VirtualHost file
"""
hsts_dirpath = None
header_path = self.parser.find_dir("Header", None, vhost.path)
if header_path:
pat = '(?:[ "]|^)(strict-transport-security)(?:[ "]|$)'
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
hsts_dirpath = match
if not hsts_dirpath:
err_msg = ("Certbot was unable to find the existing HSTS header "
"from the VirtualHost at path {0}.").format(vhost.filep)
raise errors.PluginError(err_msg)
# Prepare the HSTS header value
hsts_maxage = "\"max-age={0}\"".format(nextstep_value)
# Update the header
# Our match statement was for string strict-transport-security, but
# we need to update the value instead. The next index is for the value
hsts_dirpath = hsts_dirpath.replace("arg[3]", "arg[4]")
self.aug.set(hsts_dirpath, hsts_maxage)
note_msg = ("Increasing HSTS max-age value to {0} for VirtualHost "
"in {1}\n".format(nextstep_value, vhost.filep))
logger.debug(note_msg)
self.save_notes += note_msg
self.save(note_msg)
def _autohsts_fetch_state(self):
"""
Populates the AutoHSTS state from the pluginstorage
"""
try:
self._autohsts = self.storage.fetch("autohsts")
except KeyError:
self._autohsts = dict()
def _autohsts_save_state(self):
"""
Saves the state of AutoHSTS object to pluginstorage
"""
self.storage.put("autohsts", self._autohsts)
self.storage.save()
def _autohsts_vhost_in_lineage(self, vhost, lineage):
"""
Searches AutoHSTS managed VirtualHosts that belong to the lineage.
Matches the private key path.
"""
return bool(
self.parser.find_dir("SSLCertificateKeyFile",
lineage.key_path, vhost.path))
def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
"""Enables OCSP Stapling
In OCSP, each client (e.g. browser) would have to query the
OCSP Responder to validate that the site certificate was not revoked.
Enabling OCSP Stapling, would allow the web-server to query the OCSP
Responder, and staple its response to the offered certificate during
TLS. i.e. clients would not have to query the OCSP responder.
OCSP Stapling enablement on Apache implicitly depends on
SSLCertificateChainFile being set by other code.
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache.obj.VirtualHost`)
"""
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_module" not in self.parser.modules:
self.enable_mod("socache_shmcb")
# Check if there's an existing SSLUseStapling directive on.
use_stapling_aug_path = self.parser.find_dir("SSLUseStapling",
"on", start=ssl_vhost.path)
if not use_stapling_aug_path:
self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on")
ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))
# Check if there's an existing SSLStaplingCache directive.
stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',
None, ssl_vhost_aug_path)
# We'll simply delete the directive, so that we'll have a
# consistent OCSP cache path.
if stapling_cache_aug_path:
self.aug.remove(
re.sub(r"/\w*$", "", stapling_cache_aug_path[0]))
self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,
"SSLStaplingCache",
["shmcb:/var/run/apache2/stapling_cache(128000)"])
msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%(
ssl_vhost.filep)
self.save_notes += msg
self.save()
logger.info(msg)
def _set_http_header(self, ssl_vhost, header_substring):
"""Enables header that is identified by header_substring on ssl_vhost.
If the header identified by header_substring is not already set,
a new Header directive is placed in ssl_vhost's configuration with
arguments from: constants.HTTP_HEADER[header_substring]
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
set with header header_substring.
"""
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
"""Checks to see if an there is an existing Header directive that
contains the string header_substring.
:param ssl_vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: boolean
:rtype: (bool)
:raises errors.PluginEnhancementAlreadyPresent When header
header_substring exists
"""
header_path = self.parser.find_dir("Header", None,
start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
if general_vh in self._enhanced_vhosts["redirect"]:
logger.debug("Already enabled redirect for this vhost")
return
# Check if Certbot redirection already exists
self._verify_no_certbot_redirect(general_vh)
# Note: if code flow gets here it means we didn't find the exact
# certbot RewriteRule config for redirection. Finding
# another RewriteRule is likely to be fine in most or all cases,
# but redirect loops are possible in very obscure cases; see #1620
# for reasoning.
if self._is_rewrite_exists(general_vh):
logger.warning("Added an HTTP->HTTPS rewrite in addition to "
"other RewriteRules; you may wish to check for "
"overall consistency.")
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
if not self._is_rewrite_engine_on(general_vh):
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
names = ssl_vhost.get_names()
for idx, name in enumerate(names):
args = ["%{SERVER_NAME}", "={0}".format(name), "[OR]"]
if idx == len(names) - 1:
args.pop()
self.parser.add_dir(general_vh.path, "RewriteCond", args)
self._set_https_redirection_rewrite_rule(general_vh)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
self._enhanced_vhosts["redirect"].add(general_vh)
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _set_https_redirection_rewrite_rule(self, vhost):
if self.get_version() >= (2, 3, 9):
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS_WITH_END)
else:
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
def _verify_no_certbot_redirect(self, vhost):
"""Checks to see if a redirect was already installed by certbot.
Checks to see if virtualhost already contains a rewrite rule that is
identical to Certbot's redirection rewrite rule.
For graceful transition to new rewrite rules for HTTPS redireciton we
delete certbot's old rewrite rules and set the new one instead.
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:raises errors.PluginEnhancementAlreadyPresent: When the exact
certbot redirection WriteRule exists in virtual host.
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
# There can be other RewriteRule directive lines in vhost config.
# rewrite_args_dict keys are directive ids and the corresponding value
# for each is a list of arguments to that directive.
rewrite_args_dict = defaultdict(list) # type: DefaultDict[str, List[str]]
pat = r'(.*directive\[\d+\]).*'
for match in rewrite_path:
m = re.match(pat, match)
if m:
dir_path = m.group(1)
rewrite_args_dict[dir_path].append(match)
if rewrite_args_dict:
redirect_args = [constants.REWRITE_HTTPS_ARGS,
constants.REWRITE_HTTPS_ARGS_WITH_END]
for dir_path, args_paths in rewrite_args_dict.items():
arg_vals = [self.aug.get(x) for x in args_paths]
# Search for past redirection rule, delete it, set the new one
if arg_vals in constants.OLD_REWRITE_HTTPS_ARGS:
self.aug.remove(dir_path)
self._set_https_redirection_rewrite_rule(vhost)
self.save()
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
if arg_vals in redirect_args:
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
def _is_rewrite_exists(self, vhost):
"""Checks if there exists a RewriteRule directive in vhost
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: True if a RewriteRule directive exists.
:rtype: bool
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
return bool(rewrite_path)
def _is_rewrite_engine_on(self, vhost):
"""Checks if a RewriteEngine directive is on
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
"""
rewrite_engine_path_list = self.parser.find_dir("RewriteEngine", "on",
start=vhost.path)
if rewrite_engine_path_list:
for re_path in rewrite_engine_path_list:
# A RewriteEngine directive may also be included in per
# directory .htaccess files. We only care about the VirtualHost.
if 'virtualhost' in re_path.lower():
return self.parser.get_arg(re_path)
return False
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~certbot_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(self._escape(redirect_filepath)))
self.vhosts.append(new_vhost)
self._enhanced_vhosts["redirect"].add(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
rewrite_rule_args = [] # type: List[str]
if self.get_version() >= (2, 3, 9):
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END
else:
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog %s/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for
addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(rewrite_rule_args),
self.option("logs_root")))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(self.option("vhost_root"),
redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
# Add new include to configuration if it doesn't exist yet
if not self.parser.parsed_in_current(redirect_filepath):
self.parser.parse_file(redirect_filepath)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
if ssl_vhost.ancestor:
return ssl_vhost.ancestor
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
# Third filter - if none with same names, return generic
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost, generic=True):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"):
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def enable_site(self, vhost):
"""Enables an available site, Apache reload required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. note:: The distribution specific override replaces functionality
of this method where available.
:param vhost: vhost to enable
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if vhost.enabled:
return
if not self.parser.parsed_in_original(vhost.filep):
# Add direct include to root conf
logger.info("Enabling site %s by adding Include to root configuration",
vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
self.parser.add_include(self.parser.loc["default"], vhost.filep)
vhost.enabled = True
return
def enable_mod(self, mod_name, temp=False): # pylint: disable=unused-argument
"""Enables module in Apache.
Both enables and reloads Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
.. note:: The distribution specific override replaces functionality
of this method where available.
:raises .errors.MisconfigurationError: We cannot enable modules in
generic fashion.
"""
mod_message = ("Apache needs to have module \"{0}\" active for the " +
"requested installation options. Unfortunately Certbot is unable " +
"to install or enable it for you. Please install the module, and " +
"run Certbot again.")
raise errors.MisconfigurationError(mod_message.format(mod_name))
def restart(self):
"""Runs a config test and reloads the Apache server.
:raises .errors.MisconfigurationError: If either the config test
or reload fails.
"""
self.config_test()
self._reload()
def _reload(self):
"""Reloads the Apache server.
:raises .errors.MisconfigurationError: If reload fails
"""
error = ""
try:
util.run_script(self.option("restart_cmd"))
except errors.SubprocessError as err:
logger.info("Unable to restart apache using %s",
self.option("restart_cmd"))
alt_restart = self.option("restart_cmd_alt")
if alt_restart:
logger.debug("Trying alternative restart command: %s",
alt_restart)
# There is an alternative restart command available
# This usually is "restart" verb while original is "graceful"
try:
util.run_script(self.option(
"restart_cmd_alt"))
return
except errors.SubprocessError as secerr:
error = str(secerr)
else:
error = str(err)
raise errors.MisconfigurationError(error)
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
util.run_script(self.option("conftest_cmd"))
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = util.run_script(self.option("version_cmd"))
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" %
self.option("version_cmd"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.HTTP01, challenges.TLSSNI01]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
http_doer = http_01.ApacheHttp01(self)
sni_doer = tls_sni_01.ApacheTlsSni01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
if isinstance(achall.chall, challenges.HTTP01):
http_doer.add_chall(achall, i)
else: # tls-sni-01
sni_doer.add_chall(achall, i)
http_response = http_doer.perform()
sni_response = sni_doer.perform()
if http_response or sni_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
self._update_responses(responses, http_response, http_doer)
self._update_responses(responses, sni_response, sni_doer)
return responses
def _update_responses(self, responses, chall_response, chall_doer):
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(chall_response):
responses[chall_doer.indices[i]] = resp
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.reset_modules()
def install_ssl_options_conf(self, options_ssl, options_ssl_digest):
"""Copy Certbot's SSL options file into the system's config dir if required."""
# XXX if we ever try to enforce a local privilege boundary (eg, running
# certbot for unprivileged users via setuid), this function will need
# to be modified.
return common.install_version_controlled_file(options_ssl, options_ssl_digest,
self.option("MOD_SSL_CONF_SRC"), constants.ALL_SSL_OPTIONS_HASHES)
def enable_autohsts(self, _unused_lineage, domains):
"""
Enable the AutoHSTS enhancement for defined domains
:param _unused_lineage: Certificate lineage object, unused
:type _unused_lineage: certbot.storage.RenewableCert
:param domains: List of domains in certificate to enhance
:type domains: str
"""
self._autohsts_fetch_state()
_enhanced_vhosts = []
for d in domains:
matched_vhosts = self.choose_vhosts(d, create_if_no_ssl=False)
# We should be handling only SSL vhosts for AutoHSTS
vhosts = [vhost for vhost in matched_vhosts if vhost.ssl]
if not vhosts:
msg_tmpl = ("Certbot was not able to find SSL VirtualHost for a "
"domain {0} for enabling AutoHSTS enhancement.")
msg = msg_tmpl.format(d)
logger.warning(msg)
raise errors.PluginError(msg)
for vh in vhosts:
try:
self._enable_autohsts_domain(vh)
_enhanced_vhosts.append(vh)
except errors.PluginEnhancementAlreadyPresent:
if vh in _enhanced_vhosts:
continue
msg = ("VirtualHost for domain {0} in file {1} has a " +
"String-Transport-Security header present, exiting.")
raise errors.PluginEnhancementAlreadyPresent(
msg.format(d, vh.filep))
if _enhanced_vhosts:
note_msg = "Enabling AutoHSTS"
self.save(note_msg)
logger.info(note_msg)
self.restart()
# Save the current state to pluginstorage
self._autohsts_save_state()
def _enable_autohsts_domain(self, ssl_vhost):
"""Do the initial AutoHSTS deployment to a vhost
:param ssl_vhost: The VirtualHost object to deploy the AutoHSTS
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost` or None
:raises errors.PluginEnhancementAlreadyPresent: When already enhanced
"""
# This raises the exception
self._verify_no_matching_http_header(ssl_vhost,
"Strict-Transport-Security")
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Prepare the HSTS header value
hsts_header = constants.HEADER_ARGS["Strict-Transport-Security"][:-1]
initial_maxage = constants.AUTOHSTS_STEPS[0]
hsts_header.append("\"max-age={0}\"".format(initial_maxage))
# Add ID to the VirtualHost for mapping back to it later
uniq_id = self.add_vhost_id(ssl_vhost)
self.save_notes += "Adding unique ID {0} to VirtualHost in {1}\n".format(
uniq_id, ssl_vhost.filep)
# Add the actual HSTS header
self.parser.add_dir(ssl_vhost.path, "Header", hsts_header)
note_msg = ("Adding gradually increasing HSTS header with initial value "
"of {0} to VirtualHost in {1}\n".format(
initial_maxage, ssl_vhost.filep))
self.save_notes += note_msg
# Save the current state to pluginstorage
self._autohsts[uniq_id] = {"laststep": 0, "timestamp": time.time()}
def update_autohsts(self, _unused_domain):
"""
Increase the AutoHSTS values of VirtualHosts that the user has enabled
this enhancement for.
:param _unused_domain: Not currently used
:type _unused_domain: Not Available
"""
self._autohsts_fetch_state()
if not self._autohsts:
# No AutoHSTS enabled for any domain
return
curtime = time.time()
save_and_restart = False
for id_str, config in list(self._autohsts.items()):
if config["timestamp"] + constants.AUTOHSTS_FREQ > curtime:
# Skip if last increase was < AUTOHSTS_FREQ ago
continue
nextstep = config["laststep"] + 1
if nextstep < len(constants.AUTOHSTS_STEPS):
# If installer hasn't been prepared yet, do it now
if not self._prepared:
self.prepare()
# Have not reached the max value yet
try:
vhost = self.find_vhost_by_id(id_str)
except errors.PluginError:
msg = ("Could not find VirtualHost with ID {0}, disabling "
"AutoHSTS for this VirtualHost").format(id_str)
logger.warning(msg)
# Remove the orphaned AutoHSTS entry from pluginstorage
self._autohsts.pop(id_str)
continue
self._autohsts_increase(vhost, id_str, nextstep)
msg = ("Increasing HSTS max-age value for VirtualHost with id "
"{0}").format(id_str)
self.save_notes += msg
save_and_restart = True
if save_and_restart:
self.save("Increased HSTS max-age values")
self.restart()
self._autohsts_save_state()
def deploy_autohsts(self, lineage):
"""
Checks if autohsts vhost has reached maximum auto-increased value
and changes the HSTS max-age to a high value.
:param lineage: Certificate lineage object
:type lineage: certbot.storage.RenewableCert
"""
self._autohsts_fetch_state()
if not self._autohsts:
# No autohsts enabled for any vhost
return
vhosts = []
affected_ids = []
# Copy, as we are removing from the dict inside the loop
for id_str, config in list(self._autohsts.items()):
if config["laststep"]+1 >= len(constants.AUTOHSTS_STEPS):
# max value reached, try to make permanent
try:
vhost = self.find_vhost_by_id(id_str)
except errors.PluginError:
msg = ("VirtualHost with id {} was not found, unable to "
"make HSTS max-age permanent.").format(id_str)
logger.warning(msg)
self._autohsts.pop(id_str)
continue
if self._autohsts_vhost_in_lineage(vhost, lineage):
vhosts.append(vhost)
affected_ids.append(id_str)
save_and_restart = False
for vhost in vhosts:
self._autohsts_write(vhost, constants.AUTOHSTS_PERMANENT)
msg = ("Strict-Transport-Security max-age value for "
"VirtualHost in {0} was made permanent.").format(vhost.filep)
logger.debug(msg)
self.save_notes += msg+"\n"
save_and_restart = True
if save_and_restart:
self.save("Made HSTS max-age permanent")
self.restart()
for id_str in affected_ids:
self._autohsts.pop(id_str)
# Update AutoHSTS storage (We potentially removed vhosts from managed)
self._autohsts_save_state()
AutoHSTSEnhancement.register(ApacheConfigurator) # pylint: disable=no-member
| 40.040191 | 122 | 0.601316 |
import copy
import fnmatch
import logging
import os
import pkg_resources
import re
import six
import socket
import time
import zope.component
import zope.interface
from acme import challenges
from acme.magic_typing import Any, DefaultDict, Dict, List, Set, Union
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge
from certbot.plugins import common
from certbot.plugins.util import path_surgery
from certbot.plugins.enhancements import AutoHSTSEnhancement
from certbot_apache import apache_util
from certbot_apache import augeas_configurator
from certbot_apache import constants
from certbot_apache import display_ops
from certbot_apache import http_01
from certbot_apache import obj
from certbot_apache import parser
from certbot_apache import tls_sni_01
from collections import defaultdict
logger = logging.getLogger(__name__)
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
@zope.interface.implementer(interfaces.IAuthenticator, interfaces.IInstaller)
@zope.interface.provider(interfaces.IPluginFactory)
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
description = "Apache Web Server plugin"
if os.environ.get("CERTBOT_DOCS") == "1":
description += (
" (Please note that the default values of the Apache plugin options"
" change depending on the operating system Certbot is run on.)"
)
OS_DEFAULTS = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/sites-available",
vhost_files="*",
logs_root="/var/log/apache2",
ctl="apache2ctl",
version_cmd=['apache2ctl', '-v'],
restart_cmd=['apache2ctl', 'graceful'],
conftest_cmd=['apache2ctl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_modules=False,
handle_sites=False,
challenge_location="/etc/apache2",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"certbot_apache", "options-ssl-apache.conf")
)
def option(self, key):
return self.options.get(key)
def _prepare_options(self):
opts = ["enmod", "dismod", "le_vhost_ext", "server_root", "vhost_root",
"logs_root", "challenge_location", "handle_modules", "handle_sites",
"ctl"]
for o in opts:
if self.conf(o.replace("_", "-")) is not None:
self.options[o] = self.conf(o.replace("_", "-"))
else:
self.options[o] = self.OS_DEFAULTS[o]
self.options["version_cmd"][0] = self.option("ctl")
self.options["restart_cmd"][0] = self.option("ctl")
self.options["conftest_cmd"][0] = self.option("ctl")
@classmethod
def add_parser_arguments(cls, add):
if os.environ.get("CERTBOT_DOCS") == "1":
DEFAULTS = ApacheConfigurator.OS_DEFAULTS
else:
DEFAULTS = cls.OS_DEFAULTS
add("enmod", default=DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary")
add("dismod", default=DEFAULTS["dismod"],
help="Path to the Apache 'a2dismod' binary")
add("le-vhost-ext", default=DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension")
add("server-root", default=DEFAULTS["server_root"],
help="Apache server root directory")
add("vhost-root", default=None,
help="Apache server VirtualHost configuration root")
add("logs-root", default=DEFAULTS["logs_root"],
help="Apache server logs directory")
add("challenge-location",
default=DEFAULTS["challenge_location"],
help="Directory path for challenge configuration")
add("handle-modules", default=DEFAULTS["handle_modules"],
help="Let installer handle enabling required modules for you " +
"(Only Ubuntu/Debian currently)")
add("handle-sites", default=DEFAULTS["handle_sites"],
help="Let installer handle enabling sites for you " +
"(Only Ubuntu/Debian currently)")
add("ctl", default=DEFAULTS["ctl"],
help="Full path to Apache control script")
util.add_deprecated_argument(
add, argument_name="init-script", nargs=1)
def __init__(self, *args, **kwargs):
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
self.assoc = dict()
self._chall_out = set()
self._wildcard_vhosts = dict()
self._enhanced_vhosts = defaultdict(set) # type: DefaultDict[str, Set[obj.VirtualHost]]
# Temporary state for AutoHSTS enhancement
self._autohsts = {} # type: Dict[str, Dict[str, Union[int, float]]]
# These will be set in the prepare function
self._prepared = False
self.parser = None
self.version = version
self.vhosts = None
self.options = copy.deepcopy(self.OS_DEFAULTS)
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header,
"staple-ocsp": self._enable_ocsp_stapling}
@property
def mod_ssl_conf(self):
return os.path.join(self.config.config_dir,
constants.MOD_SSL_CONF_DEST)
@property
def updated_mod_ssl_conf_digest(self):
return os.path.join(self.config.config_dir, constants.UPDATED_MOD_SSL_CONF_DIGEST)
def prepare(self):
# Perform the actual Augeas initialization to be able to react
try:
self.init_augeas()
except ImportError:
raise errors.NoInstallationError("Problem in Augeas installation")
self._prepare_options()
# Verify Apache is installed
self._verify_exe_availability(self.option("ctl"))
# Make sure configuration is valid
self.config_test()
# Set Version
if self.version is None:
self.version = self.get_version()
logger.debug('Apache version is %s',
'.'.join(str(i) for i in self.version))
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
if not self._check_aug_version():
raise errors.NotSupportedError(
"Apache plugin support requires libaugeas0 and augeas-lenses "
"version 1.2.0 or higher, please make sure you have you have "
"those installed.")
self.parser = self.get_parser()
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
self.install_ssl_options_conf(self.mod_ssl_conf,
self.updated_mod_ssl_conf_digest)
# Prevent two Apache plugins from modifying a config at once
try:
util.lock_dir_until_exit(self.option("server_root"))
except (OSError, errors.LockError):
logger.debug("Encountered error:", exc_info=True)
raise errors.PluginError(
"Unable to lock %s", self.option("server_root"))
self._prepared = True
def _verify_exe_availability(self, exe):
if not util.exe_exists(exe):
if not path_surgery(exe):
raise errors.NoInstallationError(
'Cannot find Apache executable {0}'.format(exe))
def _check_aug_version(self):
self.aug.set("/test/path/testing/arg", "aRgUMeNT")
try:
matches = self.aug.match(
"/test//*[self::arg=~regexp('argument', 'i')]")
except RuntimeError:
self.aug.remove("/test/path")
return False
self.aug.remove("/test/path")
return matches
def get_parser(self):
# If user provided vhost_root value in command line, use it
return parser.ApacheParser(
self.aug, self.option("server_root"), self.conf("vhost-root"),
self.version, configurator=self)
def _wildcard_domain(self, domain):
if isinstance(domain, six.text_type):
wildcard_marker = u"*."
else:
wildcard_marker = b"*."
return domain.startswith(wildcard_marker)
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None):
vhosts = self.choose_vhosts(domain)
for vhost in vhosts:
self._deploy_cert(vhost, cert_path, key_path, chain_path, fullchain_path)
def choose_vhosts(self, domain, create_if_no_ssl=True):
if self._wildcard_domain(domain):
if domain in self._wildcard_vhosts:
# Vhosts for a wildcard domain were already selected
return self._wildcard_vhosts[domain]
# Ask user which VHosts to support.
# Returned objects are guaranteed to be ssl vhosts
return self._choose_vhosts_wildcard(domain, create_if_no_ssl)
else:
return [self.choose_vhost(domain, create_if_no_ssl)]
def _vhosts_for_wildcard(self, domain):
# Collect all vhosts that match the name
matched = set()
for vhost in self.vhosts:
for name in vhost.get_names():
if self._in_wildcard_scope(name, domain):
matched.add(vhost)
return list(matched)
def _in_wildcard_scope(self, name, domain):
if len(name.split(".")) == len(domain.split(".")):
return fnmatch.fnmatch(name, domain)
def _choose_vhosts_wildcard(self, domain, create_ssl=True):
# Get all vhosts that are covered by the wildcard domain
vhosts = self._vhosts_for_wildcard(domain)
# Go through the vhosts, making sure that we cover all the names
# present, but preferring the SSL vhosts
filtered_vhosts = dict()
for vhost in vhosts:
for name in vhost.get_names():
if vhost.ssl:
# Always prefer SSL vhosts
filtered_vhosts[name] = vhost
elif name not in filtered_vhosts and create_ssl:
# Add if not in list previously
filtered_vhosts[name] = vhost
# Only unique VHost objects
dialog_input = set([vhost for vhost in filtered_vhosts.values()])
# Ask the user which of names to enable, expect list of names back
dialog_output = display_ops.select_vhost_multiple(list(dialog_input))
if not dialog_output:
logger.error(
"No vhost exists with servername or alias for domain %s. "
"No vhost was selected. Please specify ServerName or ServerAlias "
"in the Apache config.",
domain)
raise errors.PluginError("No vhost selected")
# Make sure we create SSL vhosts for the ones that are HTTP only
# if requested.
return_vhosts = list()
for vhost in dialog_output:
if not vhost.ssl:
return_vhosts.append(self.make_vhost_ssl(vhost))
else:
return_vhosts.append(vhost)
self._wildcard_vhosts[domain] = return_vhosts
return return_vhosts
def _deploy_cert(self, vhost, cert_path, key_path, chain_path, fullchain_path):
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
# Add directives and remove duplicates
self._add_dummy_ssl_directives(vhost.path)
self._clean_vhost(vhost)
path = {"cert_path": self.parser.find_dir("SSLCertificateFile",
None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile",
None, vhost.path)}
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
# Handle errors when certificate/key directives cannot be found
if not path["cert_path"]:
logger.warning(
"Cannot find an SSLCertificateFile directive in %s. "
"VirtualHost was not modified", vhost.path)
raise errors.PluginError(
"Unable to find an SSLCertificateFile directive")
elif not path["cert_key"]:
logger.warning(
"Cannot find an SSLCertificateKeyFile directive for "
"certificate in %s. VirtualHost was not modified", vhost.path)
raise errors.PluginError(
"Unable to find an SSLCertificateKeyFile directive for "
"certificate")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your "
"version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path "
"option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Enable the new vhost if needed
if not vhost.enabled:
self.enable_site(vhost)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
def choose_vhost(self, target_name, create_if_no_ssl=True):
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if not create_if_no_ssl:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
# Negate create_if_no_ssl value to indicate if we want a SSL vhost
# to get created if a non-ssl vhost is selected.
return self._choose_vhost_from_list(target_name, temp=not create_if_no_ssl)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of %s. "
"No vhost was selected. Please specify ServerName or ServerAlias "
"in the Apache config.",
target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for
vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
def domain_in_names(self, names, target_name):
# use lowercase strings because fnmatch can be case sensitive
target_name = target_name.lower()
for name in names:
name = name.lower()
# fnmatch treats "[seq]" specially and [ or ] characters aren't
if "[" not in name and fnmatch.fnmatch(target_name, name):
return True
return False
def find_best_http_vhost(self, target, filter_defaults, port="80"):
filtered_vhosts = []
for vhost in self.vhosts:
if any(a.is_wildcard() or a.get_port() == port for a in vhost.addrs) and not vhost.ssl:
filtered_vhosts.append(vhost)
return self._find_best_vhost(target, filtered_vhosts, filter_defaults)
def _find_best_vhost(self, target_name, vhosts=None, filter_defaults=True):
# Points 6 - Servername SSL
# Points 5 - Wildcard SSL
# Points 4 - Address name with SSL
# Points 3 - Servername no SSL
# Points 2 - Wildcard no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
if vhosts is None:
vhosts = self.vhosts
for vhost in vhosts:
if vhost.modmacro is True:
continue
names = vhost.get_names()
if target_name in names:
points = 3
elif self.domain_in_names(names, target_name):
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
continue # pragma: no cover
if vhost.ssl:
points += 3
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
if filter_defaults:
vhosts = self._non_default_vhosts(vhosts)
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self, vhosts):
return [vh for vh in vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
all_names = set() # type: Set[str]
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)), force_interactive=True)
return util.get_filtered_names(all_names)
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _get_vhost_names(self, path):
servername_match = self.parser.find_dir(
"ServerName", None, start=path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=path, exclude=False)
serveraliases = []
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
serveraliases.append(serveralias)
servername = None
if servername_match:
servername = self.parser.get_arg(servername_match[-1])
return (servername, serveraliases)
def _add_servernames(self, host):
servername, serveraliases = self._get_vhost_names(host.path)
for alias in serveraliases:
if not host.modmacro:
host.aliases.add(alias)
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
addrs = set()
try:
args = self.aug.match(path + "/arg")
except RuntimeError:
logger.warning("Encountered a problem while parsing file: %s, skipping", path)
return None
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = apache_util.get_file_path(
self.aug.get("/augeas/files%s/path" % apache_util.get_file_path(path)))
if filename is None:
return None
macro = False
if "/macro/" in path.lower():
macro = True
vhost_enabled = self.parser.parsed_in_original(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
vhost_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
def get_virtual_hosts(self):
file_paths = {}
internal_paths = defaultdict(set)
vhs = []
for vhost_path in list(self.parser.parser_paths):
paths = self.aug.match(
("/files%s//*[label()=~regexp('%s')]" %
(vhost_path, parser.case_i("VirtualHost"))))
paths = [path for path in paths if
"virtualhost" in os.path.basename(path).lower()]
for path in paths:
new_vhost = self._create_vhost(path)
if not new_vhost:
continue
internal_path = apache_util.get_internal_aug_path(new_vhost.path)
realpath = os.path.realpath(new_vhost.filep)
if realpath not in file_paths:
file_paths[realpath] = new_vhost.filep
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif (realpath == new_vhost.filep and
realpath != file_paths[realpath]):
new_vhs = []
for v in vhs:
if v.filep == file_paths[realpath]:
internal_paths[realpath].remove(
apache_util.get_internal_aug_path(v.path))
else:
new_vhs.append(v)
vhs = new_vhs
file_paths[realpath] = realpath
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif internal_path not in internal_paths[realpath]:
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
return vhs
def is_name_vhost(self, target_addr):
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
self.prepare_https_modules(temp)
self.ensure_listen(port, https=True)
def ensure_listen(self, port, https=False):
if https and port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
listens = [self.parser.get_arg(x).split()[0] for
x in self.parser.find_dir("Listen")]
if self._has_port_already(listens, port):
return
listen_dirs = set(listens)
if not listens:
listen_dirs.add(port_service)
for listen in listens:
if len(listen.split(":")) == 1:
if port not in listen_dirs and port_service not in listen_dirs:
listen_dirs.add(port_service)
else:
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port_service) not in listen_dirs and (
"%s:%s" % (ip, port_service) not in listen_dirs):
listen_dirs.add("%s:%s" % (ip, port_service))
if https:
self._add_listens_https(listen_dirs, listens, port)
else:
self._add_listens_http(listen_dirs, listens, port)
def _add_listens_http(self, listens, listens_orig, port):
new_listens = listens.difference(listens_orig)
if port in new_listens:
self.parser.add_dir(parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
else:
for listen in new_listens:
self.parser.add_dir(parser.get_aug_path(
self.parser.loc["listen"]), "Listen", listen.split(" "))
self.save_notes += ("Added Listen %s directive to "
"%s\n") % (listen,
self.parser.loc["listen"])
def _add_listens_https(self, listens, listens_orig, port):
if port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
new_listens = listens.difference(listens_orig)
if port in new_listens or port_service in new_listens:
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port_service.split(" "))
self.save_notes += "Added Listen %s directive to %s\n" % (
port_service, self.parser.loc["listen"])
else:
for listen in new_listens:
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", listen.split(" "))
self.save_notes += ("Added Listen %s directive to "
"%s\n") % (listen,
self.parser.loc["listen"])
def _has_port_already(self, listens, port):
if port in listens:
return True
for listen in listens:
if len(listen.split(":")) > 1:
if listen.split(":")[-1].split(" ")[0] == port:
return True
def prepare_https_modules(self, temp):
if self.option("handle_modules"):
if self.version >= (2, 4) and ("socache_shmcb_module" not in
self.parser.modules):
self.enable_mod("socache_shmcb", temp=temp)
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
def make_addrs_sni_ready(self, addrs):
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost):
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
orig_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
self._copy_create_ssl_vhost_skeleton(nonssl_vhost, ssl_fp)
self.aug.load()
new_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
self.parser.parse_file(ssl_fp)
new_matches = self.aug.match(
"/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
raise errors.PluginError(
"Could not reverse map the HTTPS VirtualHost to the original")
self._update_ssl_vhosts_addrs(vh_p)
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
ssl_vhost = self._create_vhost(vh_p)
ssl_vhost.ancestor = nonssl_vhost
self.vhosts.append(ssl_vhost)
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_new_vh_path(self, orig_matches, new_matches):
orig_matches = [i.replace("[1]", "") for i in orig_matches]
for match in new_matches:
if match.replace("[1]", "") not in orig_matches:
return match
return None
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
if self.conf("vhost-root") and os.path.exists(self.conf("vhost-root")):
fp = os.path.join(os.path.realpath(self.option("vhost_root")),
os.path.basename(non_ssl_vh_fp))
else:
fp = os.path.realpath(non_ssl_vh_fp)
if fp.endswith(".conf"):
return fp[:-(len(".conf"))] + self.option("le_vhost_ext")
else:
return fp + self.option("le_vhost_ext")
def _sift_rewrite_rule(self, line):
if not line.lower().lstrip().startswith("rewriterule"):
return False
target = line.split()[2].strip()
if target[0] in ("'", '"') and target[0] == target[-1]:
target = target[1:-1]
# Sift line if it redirects the request to a HTTPS site
return target.startswith("https://")
def _copy_create_ssl_vhost_skeleton(self, vhost, ssl_fp):
# First register the creation so that it is properly removed if
# configuration is rolled back
if os.path.exists(ssl_fp):
notes = "Appended new VirtualHost directive to file %s" % ssl_fp
files = set()
files.add(ssl_fp)
self.reverter.add_to_checkpoint(files, notes)
else:
self.reverter.register_file_creation(False, ssl_fp)
sift = False
try:
orig_contents = self._get_vhost_block(vhost)
ssl_vh_contents, sift = self._sift_rewrite_rules(orig_contents)
with open(ssl_fp, "a") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
new_file.write("\n".join(ssl_vh_contents))
# The content does not include the closing tag, so add it
new_file.write("</VirtualHost>\n")
new_file.write("</IfModule>\n")
# Add new file to augeas paths if we're supposed to handle
# activation (it's not included as default)
if not self.parser.parsed_in_current(ssl_fp):
self.parser.parse_file(ssl_fp)
except IOError:
logger.critical("Error writing/reading to file in make_vhost_ssl", exc_info=True)
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
if sift:
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(
"Some rewrite rules copied from {0} were disabled in the "
"vhost for your HTTPS site located at {1} because they have "
"the potential to create redirection loops.".format(
vhost.filep, ssl_fp), reporter.MEDIUM_PRIORITY)
self.aug.set("/augeas/files%s/mtime" % (self._escape(ssl_fp)), "0")
self.aug.set("/augeas/files%s/mtime" % (self._escape(vhost.filep)), "0")
def _sift_rewrite_rules(self, contents):
result = []
sift = False
contents = iter(contents)
comment = ("# Some rewrite rules in this file were "
"disabled on your HTTPS site,\n"
"# because they have the potential to create "
"redirection loops.\n")
for line in contents:
A = line.lower().lstrip().startswith("rewritecond")
B = line.lower().lstrip().startswith("rewriterule")
if not (A or B):
result.append(line)
continue
# A RewriteRule that doesn't need filtering
if B and not self._sift_rewrite_rule(line):
result.append(line)
continue
# A RewriteRule that does need filtering
if B and self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append("
continue
# We save RewriteCond(s) and their corresponding
# RewriteRule in 'chunk'.
# We then decide whether we comment out the entire
# chunk based on its RewriteRule.
chunk = []
if A:
chunk.append(line)
line = next(contents)
# RewriteCond(s) must be followed by one RewriteRule
while not line.lower().lstrip().startswith("rewriterule"):
chunk.append(line)
line = next(contents)
# Now, current line must start with a RewriteRule
chunk.append(line)
if self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append('\n'.join(
['# ' + l for l in chunk]))
continue
else:
result.append('\n'.join(chunk))
continue
return result, sift
def _get_vhost_block(self, vhost):
try:
span_val = self.aug.span(vhost.path)
except ValueError:
logger.critical("Error while reading the VirtualHost %s from "
"file %s", vhost.name, vhost.filep, exc_info=True)
raise errors.PluginError("Unable to read VirtualHost from file")
span_filep = span_val[0]
span_start = span_val[5]
span_end = span_val[6]
with open(span_filep, 'r') as fh:
fh.seek(span_start)
vh_contents = fh.read(span_end-span_start).split("\n")
self._remove_closing_vhost_tag(vh_contents)
return vh_contents
def _remove_closing_vhost_tag(self, vh_contents):
for offset, line in enumerate(reversed(vh_contents)):
if line:
line_index = line.lower().find("</virtualhost>")
if line_index != -1:
content_index = len(vh_contents) - offset - 1
vh_contents[content_index] = line[:line_index]
break
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile",
"SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
# Only include the TLS configuration if not already included
existing_inc = self.parser.find_dir("Include", self.mod_ssl_conf, vh_path)
if not existing_inc:
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_servername_alias(self, target_name, vhost):
vh_path = vhost.path
sname, saliases = self._get_vhost_names(vh_path)
if target_name == sname or target_name in saliases:
return
if self._has_matching_wildcard(vh_path, target_name):
return
if not self.parser.find_dir("ServerName", None,
start=vh_path, exclude=False):
self.parser.add_dir(vh_path, "ServerName", target_name)
else:
self.parser.add_dir(vh_path, "ServerAlias", target_name)
self._add_servernames(vhost)
def _has_matching_wildcard(self, vh_path, target_name):
matches = self.parser.find_dir(
"ServerAlias", start=vh_path, exclude=False)
aliases = (self.aug.get(match) for match in matches)
return self.domain_in_names(aliases, target_name)
def _add_name_vhost_if_necessary(self, vhost):
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
# In Apache 2.2, when a NameVirtualHost directive is not
# set, "*" and "_default_" will conflict when sharing a port
addrs = set((addr,))
if addr.get_addr() in ("*", "_default_"):
addrs.update(obj.Addr((a, addr.get_port(),))
for a in ("*", "_default_"))
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr in addrs for
test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
break
if need_to_save:
self.save()
def find_vhost_by_id(self, id_str):
for vh in self.vhosts:
if self._find_vhost_id(vh) == id_str:
return vh
msg = "No VirtualHost with ID {} was found.".format(id_str)
logger.warning(msg)
raise errors.PluginError(msg)
def _find_vhost_id(self, vhost):
# Strip the {} off from the format string
search_comment = constants.MANAGED_COMMENT_ID.format("")
id_comment = self.parser.find_comments(search_comment, vhost.path)
if id_comment:
# Use the first value, multiple ones shouldn't exist
comment = self.parser.get_arg(id_comment[0])
return comment.split(" ")[-1]
return None
def add_vhost_id(self, vhost):
vh_id = self._find_vhost_id(vhost)
if vh_id:
return vh_id
id_string = apache_util.unique_id()
comment = constants.MANAGED_COMMENT_ID.format(id_string)
self.parser.add_comment(vhost.path, comment)
return id_string
def _escape(self, fp):
fp = fp.replace(",", "\\,")
fp = fp.replace("[", "\\[")
fp = fp.replace("]", "\\]")
fp = fp.replace("|", "\\|")
fp = fp.replace("=", "\\=")
fp = fp.replace("(", "\\(")
fp = fp.replace(")", "\\)")
fp = fp.replace("!", "\\!")
return fp
######################################################################
# Enhancements
######################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
return ["redirect", "ensure-http-header", "staple-ocsp"]
def enhance(self, domain, enhancement, options=None):
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
matched_vhosts = self.choose_vhosts(domain, create_if_no_ssl=False)
# We should be handling only SSL vhosts for enhancements
vhosts = [vhost for vhost in matched_vhosts if vhost.ssl]
if not vhosts:
msg_tmpl = ("Certbot was not able to find SSL VirtualHost for a "
"domain {0} for enabling enhancement \"{1}\". The requested "
"enhancement was not configured.")
msg_enhancement = enhancement
if options:
msg_enhancement += ": " + options
msg = msg_tmpl.format(domain, msg_enhancement)
logger.warning(msg)
raise errors.PluginError(msg)
try:
for vhost in vhosts:
func(vhost, options)
except errors.PluginError:
logger.warning("Failed %s for %s", enhancement, domain)
raise
def _autohsts_increase(self, vhost, id_str, nextstep):
nextstep_value = constants.AUTOHSTS_STEPS[nextstep]
self._autohsts_write(vhost, nextstep_value)
self._autohsts[id_str] = {"laststep": nextstep, "timestamp": time.time()}
def _autohsts_write(self, vhost, nextstep_value):
hsts_dirpath = None
header_path = self.parser.find_dir("Header", None, vhost.path)
if header_path:
pat = '(?:[ "]|^)(strict-transport-security)(?:[ "]|$)'
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
hsts_dirpath = match
if not hsts_dirpath:
err_msg = ("Certbot was unable to find the existing HSTS header "
"from the VirtualHost at path {0}.").format(vhost.filep)
raise errors.PluginError(err_msg)
# Prepare the HSTS header value
hsts_maxage = "\"max-age={0}\"".format(nextstep_value)
# Update the header
# Our match statement was for string strict-transport-security, but
# we need to update the value instead. The next index is for the value
hsts_dirpath = hsts_dirpath.replace("arg[3]", "arg[4]")
self.aug.set(hsts_dirpath, hsts_maxage)
note_msg = ("Increasing HSTS max-age value to {0} for VirtualHost "
"in {1}\n".format(nextstep_value, vhost.filep))
logger.debug(note_msg)
self.save_notes += note_msg
self.save(note_msg)
def _autohsts_fetch_state(self):
try:
self._autohsts = self.storage.fetch("autohsts")
except KeyError:
self._autohsts = dict()
def _autohsts_save_state(self):
self.storage.put("autohsts", self._autohsts)
self.storage.save()
def _autohsts_vhost_in_lineage(self, vhost, lineage):
return bool(
self.parser.find_dir("SSLCertificateKeyFile",
lineage.key_path, vhost.path))
def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_module" not in self.parser.modules:
self.enable_mod("socache_shmcb")
# Check if there's an existing SSLUseStapling directive on.
use_stapling_aug_path = self.parser.find_dir("SSLUseStapling",
"on", start=ssl_vhost.path)
if not use_stapling_aug_path:
self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on")
ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))
# Check if there's an existing SSLStaplingCache directive.
stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',
None, ssl_vhost_aug_path)
# We'll simply delete the directive, so that we'll have a
# consistent OCSP cache path.
if stapling_cache_aug_path:
self.aug.remove(
re.sub(r"/\w*$", "", stapling_cache_aug_path[0]))
self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,
"SSLStaplingCache",
["shmcb:/var/run/apache2/stapling_cache(128000)"])
msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%(
ssl_vhost.filep)
self.save_notes += msg
self.save()
logger.info(msg)
def _set_http_header(self, ssl_vhost, header_substring):
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
header_path = self.parser.find_dir("Header", None,
start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
if general_vh in self._enhanced_vhosts["redirect"]:
logger.debug("Already enabled redirect for this vhost")
return
# Check if Certbot redirection already exists
self._verify_no_certbot_redirect(general_vh)
# Note: if code flow gets here it means we didn't find the exact
# certbot RewriteRule config for redirection. Finding
# another RewriteRule is likely to be fine in most or all cases,
# but redirect loops are possible in very obscure cases; see #1620
# for reasoning.
if self._is_rewrite_exists(general_vh):
logger.warning("Added an HTTP->HTTPS rewrite in addition to "
"other RewriteRules; you may wish to check for "
"overall consistency.")
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
if not self._is_rewrite_engine_on(general_vh):
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
names = ssl_vhost.get_names()
for idx, name in enumerate(names):
args = ["%{SERVER_NAME}", "={0}".format(name), "[OR]"]
if idx == len(names) - 1:
args.pop()
self.parser.add_dir(general_vh.path, "RewriteCond", args)
self._set_https_redirection_rewrite_rule(general_vh)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
self._enhanced_vhosts["redirect"].add(general_vh)
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _set_https_redirection_rewrite_rule(self, vhost):
if self.get_version() >= (2, 3, 9):
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS_WITH_END)
else:
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
def _verify_no_certbot_redirect(self, vhost):
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
# There can be other RewriteRule directive lines in vhost config.
# rewrite_args_dict keys are directive ids and the corresponding value
# for each is a list of arguments to that directive.
rewrite_args_dict = defaultdict(list) # type: DefaultDict[str, List[str]]
pat = r'(.*directive\[\d+\]).*'
for match in rewrite_path:
m = re.match(pat, match)
if m:
dir_path = m.group(1)
rewrite_args_dict[dir_path].append(match)
if rewrite_args_dict:
redirect_args = [constants.REWRITE_HTTPS_ARGS,
constants.REWRITE_HTTPS_ARGS_WITH_END]
for dir_path, args_paths in rewrite_args_dict.items():
arg_vals = [self.aug.get(x) for x in args_paths]
# Search for past redirection rule, delete it, set the new one
if arg_vals in constants.OLD_REWRITE_HTTPS_ARGS:
self.aug.remove(dir_path)
self._set_https_redirection_rewrite_rule(vhost)
self.save()
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
if arg_vals in redirect_args:
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
def _is_rewrite_exists(self, vhost):
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
return bool(rewrite_path)
def _is_rewrite_engine_on(self, vhost):
rewrite_engine_path_list = self.parser.find_dir("RewriteEngine", "on",
start=vhost.path)
if rewrite_engine_path_list:
for re_path in rewrite_engine_path_list:
# A RewriteEngine directive may also be included in per
# directory .htaccess files. We only care about the VirtualHost.
if 'virtualhost' in re_path.lower():
return self.parser.get_arg(re_path)
return False
def _create_redirect_vhost(self, ssl_vhost):
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(self._escape(redirect_filepath)))
self.vhosts.append(new_vhost)
self._enhanced_vhosts["redirect"].add(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
rewrite_rule_args = [] # type: List[str]
if self.get_version() >= (2, 3, 9):
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END
else:
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog %s/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for
addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(rewrite_rule_args),
self.option("logs_root")))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(self.option("vhost_root"),
redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
# Add new include to configuration if it doesn't exist yet
if not self.parser.parsed_in_current(redirect_filepath):
self.parser.parse_file(redirect_filepath)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
# First candidate vhosts filter
if ssl_vhost.ancestor:
return ssl_vhost.ancestor
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
# Third filter - if none with same names, return generic
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost, generic=True):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"):
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def enable_site(self, vhost):
if vhost.enabled:
return
if not self.parser.parsed_in_original(vhost.filep):
# Add direct include to root conf
logger.info("Enabling site %s by adding Include to root configuration",
vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
self.parser.add_include(self.parser.loc["default"], vhost.filep)
vhost.enabled = True
return
def enable_mod(self, mod_name, temp=False): # pylint: disable=unused-argument
mod_message = ("Apache needs to have module \"{0}\" active for the " +
"requested installation options. Unfortunately Certbot is unable " +
"to install or enable it for you. Please install the module, and " +
"run Certbot again.")
raise errors.MisconfigurationError(mod_message.format(mod_name))
def restart(self):
self.config_test()
self._reload()
def _reload(self):
error = ""
try:
util.run_script(self.option("restart_cmd"))
except errors.SubprocessError as err:
logger.info("Unable to restart apache using %s",
self.option("restart_cmd"))
alt_restart = self.option("restart_cmd_alt")
if alt_restart:
logger.debug("Trying alternative restart command: %s",
alt_restart)
# There is an alternative restart command available
# This usually is "restart" verb while original is "graceful"
try:
util.run_script(self.option(
"restart_cmd_alt"))
return
except errors.SubprocessError as secerr:
error = str(secerr)
else:
error = str(err)
raise errors.MisconfigurationError(error)
def config_test(self): # pylint: disable=no-self-use
try:
util.run_script(self.option("conftest_cmd"))
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
try:
stdout, _ = util.run_script(self.option("version_cmd"))
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" %
self.option("version_cmd"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
return [challenges.HTTP01, challenges.TLSSNI01]
def perform(self, achalls):
self._chall_out.update(achalls)
responses = [None] * len(achalls)
http_doer = http_01.ApacheHttp01(self)
sni_doer = tls_sni_01.ApacheTlsSni01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
if isinstance(achall.chall, challenges.HTTP01):
http_doer.add_chall(achall, i)
else: # tls-sni-01
sni_doer.add_chall(achall, i)
http_response = http_doer.perform()
sni_response = sni_doer.perform()
if http_response or sni_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
self._update_responses(responses, http_response, http_doer)
self._update_responses(responses, sni_response, sni_doer)
return responses
def _update_responses(self, responses, chall_response, chall_doer):
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(chall_response):
responses[chall_doer.indices[i]] = resp
def cleanup(self, achalls):
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.reset_modules()
def install_ssl_options_conf(self, options_ssl, options_ssl_digest):
# XXX if we ever try to enforce a local privilege boundary (eg, running
# certbot for unprivileged users via setuid), this function will need
# to be modified.
return common.install_version_controlled_file(options_ssl, options_ssl_digest,
self.option("MOD_SSL_CONF_SRC"), constants.ALL_SSL_OPTIONS_HASHES)
def enable_autohsts(self, _unused_lineage, domains):
self._autohsts_fetch_state()
_enhanced_vhosts = []
for d in domains:
matched_vhosts = self.choose_vhosts(d, create_if_no_ssl=False)
# We should be handling only SSL vhosts for AutoHSTS
vhosts = [vhost for vhost in matched_vhosts if vhost.ssl]
if not vhosts:
msg_tmpl = ("Certbot was not able to find SSL VirtualHost for a "
"domain {0} for enabling AutoHSTS enhancement.")
msg = msg_tmpl.format(d)
logger.warning(msg)
raise errors.PluginError(msg)
for vh in vhosts:
try:
self._enable_autohsts_domain(vh)
_enhanced_vhosts.append(vh)
except errors.PluginEnhancementAlreadyPresent:
if vh in _enhanced_vhosts:
continue
msg = ("VirtualHost for domain {0} in file {1} has a " +
"String-Transport-Security header present, exiting.")
raise errors.PluginEnhancementAlreadyPresent(
msg.format(d, vh.filep))
if _enhanced_vhosts:
note_msg = "Enabling AutoHSTS"
self.save(note_msg)
logger.info(note_msg)
self.restart()
# Save the current state to pluginstorage
self._autohsts_save_state()
def _enable_autohsts_domain(self, ssl_vhost):
# This raises the exception
self._verify_no_matching_http_header(ssl_vhost,
"Strict-Transport-Security")
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Prepare the HSTS header value
hsts_header = constants.HEADER_ARGS["Strict-Transport-Security"][:-1]
initial_maxage = constants.AUTOHSTS_STEPS[0]
hsts_header.append("\"max-age={0}\"".format(initial_maxage))
# Add ID to the VirtualHost for mapping back to it later
uniq_id = self.add_vhost_id(ssl_vhost)
self.save_notes += "Adding unique ID {0} to VirtualHost in {1}\n".format(
uniq_id, ssl_vhost.filep)
# Add the actual HSTS header
self.parser.add_dir(ssl_vhost.path, "Header", hsts_header)
note_msg = ("Adding gradually increasing HSTS header with initial value "
"of {0} to VirtualHost in {1}\n".format(
initial_maxage, ssl_vhost.filep))
self.save_notes += note_msg
# Save the current state to pluginstorage
self._autohsts[uniq_id] = {"laststep": 0, "timestamp": time.time()}
def update_autohsts(self, _unused_domain):
self._autohsts_fetch_state()
if not self._autohsts:
# No AutoHSTS enabled for any domain
return
curtime = time.time()
save_and_restart = False
for id_str, config in list(self._autohsts.items()):
if config["timestamp"] + constants.AUTOHSTS_FREQ > curtime:
# Skip if last increase was < AUTOHSTS_FREQ ago
continue
nextstep = config["laststep"] + 1
if nextstep < len(constants.AUTOHSTS_STEPS):
# If installer hasn't been prepared yet, do it now
if not self._prepared:
self.prepare()
# Have not reached the max value yet
try:
vhost = self.find_vhost_by_id(id_str)
except errors.PluginError:
msg = ("Could not find VirtualHost with ID {0}, disabling "
"AutoHSTS for this VirtualHost").format(id_str)
logger.warning(msg)
# Remove the orphaned AutoHSTS entry from pluginstorage
self._autohsts.pop(id_str)
continue
self._autohsts_increase(vhost, id_str, nextstep)
msg = ("Increasing HSTS max-age value for VirtualHost with id "
"{0}").format(id_str)
self.save_notes += msg
save_and_restart = True
if save_and_restart:
self.save("Increased HSTS max-age values")
self.restart()
self._autohsts_save_state()
def deploy_autohsts(self, lineage):
self._autohsts_fetch_state()
if not self._autohsts:
# No autohsts enabled for any vhost
return
vhosts = []
affected_ids = []
# Copy, as we are removing from the dict inside the loop
for id_str, config in list(self._autohsts.items()):
if config["laststep"]+1 >= len(constants.AUTOHSTS_STEPS):
# max value reached, try to make permanent
try:
vhost = self.find_vhost_by_id(id_str)
except errors.PluginError:
msg = ("VirtualHost with id {} was not found, unable to "
"make HSTS max-age permanent.").format(id_str)
logger.warning(msg)
self._autohsts.pop(id_str)
continue
if self._autohsts_vhost_in_lineage(vhost, lineage):
vhosts.append(vhost)
affected_ids.append(id_str)
save_and_restart = False
for vhost in vhosts:
self._autohsts_write(vhost, constants.AUTOHSTS_PERMANENT)
msg = ("Strict-Transport-Security max-age value for "
"VirtualHost in {0} was made permanent.").format(vhost.filep)
logger.debug(msg)
self.save_notes += msg+"\n"
save_and_restart = True
if save_and_restart:
self.save("Made HSTS max-age permanent")
self.restart()
for id_str in affected_ids:
self._autohsts.pop(id_str)
# Update AutoHSTS storage (We potentially removed vhosts from managed)
self._autohsts_save_state()
AutoHSTSEnhancement.register(ApacheConfigurator) # pylint: disable=no-member
| true | true |
f7f5013a62598bd972ecd4613d1f94601890043d | 4,189 | py | Python | froide/foirequest/feeds.py | manonthemat/froide | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | [
"MIT"
] | null | null | null | froide/foirequest/feeds.py | manonthemat/froide | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | [
"MIT"
] | null | null | null | froide/foirequest/feeds.py | manonthemat/froide | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | [
"MIT"
] | null | null | null | import re
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from django.shortcuts import get_object_or_404
from .models import FoiRequest
from .filters import FOIREQUEST_FILTER_DICT
CONTROLCHARS_RE = re.compile(r'[\x00-\x08\x0B-\x0C\x0E-\x1F]')
def clean(val):
return CONTROLCHARS_RE.sub('', val)
class LatestFoiRequestsFeed(Feed):
url_name = 'foirequest-list_feed'
def __init__(self, items, data, make_url):
self.items = items
self.data = data
self.make_url = make_url
super(LatestFoiRequestsFeed, self).__init__()
def get_filter_string(self):
by = []
if self.data.get('q'):
by.append(_('search for "%s"' % self.data['q']))
if self.data.get('category'):
by.append(_('by category %(category)s') % {'category': self.data['category'].name})
if self.data.get('status'):
by.append(_('by status %(status)s') % {
'status': FOIREQUEST_FILTER_DICT[self.data['status']][1]
})
if self.data.get('tag'):
by.append(_('by tag %(tag)s') % {'tag': self.data['tag'].name})
if self.data.get('jurisdiction'):
by.append(_('for %(juris)s') % {'juris': self.data['jurisdiction'].name})
if self.data.get('publicbody'):
by.append(_('to %(publicbody)s') % {'publicbody': self.data['publicbody'].name})
return ' '.join(str(x) for x in by)
def title(self, obj):
by = self.get_filter_string()
if by:
return clean(_("Freedom of Information Requests %(by)s on %(sitename)s") % {
"sitename": settings.SITE_NAME,
'by': by
})
return clean(_("Freedom of Information Requests on %(sitename)s") % {
"sitename": settings.SITE_NAME
})
def description(self, obj):
by = self.get_filter_string()
if by:
return clean(_("This feed contains the Freedom of Information requests %(by)s"
" that have been made through %(sitename)s.") % {
"sitename": settings.SITE_NAME,
'by': by
})
return clean(_("This feed contains the latest Freedom of Information requests"
" that have been made through %(sitename)s.") % {
"sitename": settings.SITE_NAME
})
def link(self):
return self.make_url(self.url_name)
def items(self):
return self.items.order_by("-first_message")[:15]
def item_title(self, item):
if item.public_body:
pb_name = item.public_body.name
else:
pb_name = _("Not yet known")
return clean(_("'%(title)s' to %(publicbody)s") % {
"title": item.title,
"publicbody": pb_name
})
def item_description(self, item):
return clean(item.description)
def item_pubdate(self, item):
return item.first_message
class LatestFoiRequestsFeedAtom(LatestFoiRequestsFeed):
feed_type = Atom1Feed
subtitle = LatestFoiRequestsFeed.description
url_name = 'foirequest-list_feed_atom'
class FoiRequestFeed(Feed):
def get_object(self, request, slug):
return get_object_or_404(FoiRequest, slug=slug, public=True)
def title(self, obj):
return clean(obj.title)
def link(self, obj):
return reverse('foirequest-feed', kwargs={"slug": obj.slug})
def description(self, obj):
return clean(obj.description)
def items(self, obj):
return obj.foievent_set.order_by("-timestamp")[:15]
def item_title(self, item):
return clean(item.as_text())
def item_description(self, item):
return clean(item.as_text())
def item_pubdate(self, item):
return item.timestamp
class FoiRequestFeedAtom(FoiRequestFeed):
feed_type = Atom1Feed
subtitle = FoiRequestFeed.description
def link(self, obj):
return reverse('foirequest-feed_atom', kwargs={"slug": obj.slug})
| 31.734848 | 95 | 0.612557 | import re
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from django.shortcuts import get_object_or_404
from .models import FoiRequest
from .filters import FOIREQUEST_FILTER_DICT
CONTROLCHARS_RE = re.compile(r'[\x00-\x08\x0B-\x0C\x0E-\x1F]')
def clean(val):
return CONTROLCHARS_RE.sub('', val)
class LatestFoiRequestsFeed(Feed):
url_name = 'foirequest-list_feed'
def __init__(self, items, data, make_url):
self.items = items
self.data = data
self.make_url = make_url
super(LatestFoiRequestsFeed, self).__init__()
def get_filter_string(self):
by = []
if self.data.get('q'):
by.append(_('search for "%s"' % self.data['q']))
if self.data.get('category'):
by.append(_('by category %(category)s') % {'category': self.data['category'].name})
if self.data.get('status'):
by.append(_('by status %(status)s') % {
'status': FOIREQUEST_FILTER_DICT[self.data['status']][1]
})
if self.data.get('tag'):
by.append(_('by tag %(tag)s') % {'tag': self.data['tag'].name})
if self.data.get('jurisdiction'):
by.append(_('for %(juris)s') % {'juris': self.data['jurisdiction'].name})
if self.data.get('publicbody'):
by.append(_('to %(publicbody)s') % {'publicbody': self.data['publicbody'].name})
return ' '.join(str(x) for x in by)
def title(self, obj):
by = self.get_filter_string()
if by:
return clean(_("Freedom of Information Requests %(by)s on %(sitename)s") % {
"sitename": settings.SITE_NAME,
'by': by
})
return clean(_("Freedom of Information Requests on %(sitename)s") % {
"sitename": settings.SITE_NAME
})
def description(self, obj):
by = self.get_filter_string()
if by:
return clean(_("This feed contains the Freedom of Information requests %(by)s"
" that have been made through %(sitename)s.") % {
"sitename": settings.SITE_NAME,
'by': by
})
return clean(_("This feed contains the latest Freedom of Information requests"
" that have been made through %(sitename)s.") % {
"sitename": settings.SITE_NAME
})
def link(self):
return self.make_url(self.url_name)
def items(self):
return self.items.order_by("-first_message")[:15]
def item_title(self, item):
if item.public_body:
pb_name = item.public_body.name
else:
pb_name = _("Not yet known")
return clean(_("'%(title)s' to %(publicbody)s") % {
"title": item.title,
"publicbody": pb_name
})
def item_description(self, item):
return clean(item.description)
def item_pubdate(self, item):
return item.first_message
class LatestFoiRequestsFeedAtom(LatestFoiRequestsFeed):
feed_type = Atom1Feed
subtitle = LatestFoiRequestsFeed.description
url_name = 'foirequest-list_feed_atom'
class FoiRequestFeed(Feed):
def get_object(self, request, slug):
return get_object_or_404(FoiRequest, slug=slug, public=True)
def title(self, obj):
return clean(obj.title)
def link(self, obj):
return reverse('foirequest-feed', kwargs={"slug": obj.slug})
def description(self, obj):
return clean(obj.description)
def items(self, obj):
return obj.foievent_set.order_by("-timestamp")[:15]
def item_title(self, item):
return clean(item.as_text())
def item_description(self, item):
return clean(item.as_text())
def item_pubdate(self, item):
return item.timestamp
class FoiRequestFeedAtom(FoiRequestFeed):
feed_type = Atom1Feed
subtitle = FoiRequestFeed.description
def link(self, obj):
return reverse('foirequest-feed_atom', kwargs={"slug": obj.slug})
| true | true |
f7f501490070161e55fc15693ac4df39a7f15947 | 2,029 | py | Python | asphalt/feedreader/stores/mongodb.py | asphalt-framework/asphalt-feedreader | 096df835408ecfcfde593950c9c80d130f62cc5e | [
"Apache-2.0"
] | 1 | 2017-10-30T04:28:53.000Z | 2017-10-30T04:28:53.000Z | asphalt/feedreader/stores/mongodb.py | asphalt-framework/asphalt-feedreader | 096df835408ecfcfde593950c9c80d130f62cc5e | [
"Apache-2.0"
] | null | null | null | asphalt/feedreader/stores/mongodb.py | asphalt-framework/asphalt-feedreader | 096df835408ecfcfde593950c9c80d130f62cc5e | [
"Apache-2.0"
] | null | null | null | from typing import Union
from asphalt.core import Context
from asphalt.serialization.api import Serializer
from asphalt.serialization.serializers.json import JSONSerializer
from motor.motor_asyncio import AsyncIOMotorClient
from typeguard import check_argument_types
from asphalt.feedreader.api import FeedStateStore
class MongoDBStore(FeedStateStore):
"""
Stores feed states in a MongoDB database.
:param client: a Redis client
:param serializer: a serializer or the resource name of one (creates a new JSONSerializer if
none is specified)
:param db: database to store the states in
:param collection: name of the collection in the database
"""
def __init__(self, client: Union[str, AsyncIOMotorClient] = 'default',
serializer: Union[str, Serializer] = None, db: str = 'asphalt',
collection: str = 'feed_states'):
assert check_argument_types()
self.client = client
self.serializer = serializer or JSONSerializer()
self.db = db
self.collection_name = collection
self.collection = None
async def start(self, ctx: Context):
if isinstance(self.serializer, str):
self.serializer = await ctx.request_resource(Serializer, self.serializer)
if isinstance(self.client, str):
self.client = await ctx.request_resource(AsyncIOMotorClient, self.client)
self.collection = self.client[self.db][self.collection_name]
await self.collection.create_index('feed_id')
async def store_state(self, feed_id: str, state) -> None:
serialized = self.serializer.serialize(state)
document = dict(feed_id=feed_id, state=serialized)
await self.collection.find_one_and_replace({'feed_id': feed_id}, document, upsert=True)
async def load_state(self, feed_id: str):
document = await self.collection.find_one({'feed_id': feed_id}, {'state': True})
return self.serializer.deserialize(document['state']) if document else None
| 40.58 | 96 | 0.706259 | from typing import Union
from asphalt.core import Context
from asphalt.serialization.api import Serializer
from asphalt.serialization.serializers.json import JSONSerializer
from motor.motor_asyncio import AsyncIOMotorClient
from typeguard import check_argument_types
from asphalt.feedreader.api import FeedStateStore
class MongoDBStore(FeedStateStore):
def __init__(self, client: Union[str, AsyncIOMotorClient] = 'default',
serializer: Union[str, Serializer] = None, db: str = 'asphalt',
collection: str = 'feed_states'):
assert check_argument_types()
self.client = client
self.serializer = serializer or JSONSerializer()
self.db = db
self.collection_name = collection
self.collection = None
async def start(self, ctx: Context):
if isinstance(self.serializer, str):
self.serializer = await ctx.request_resource(Serializer, self.serializer)
if isinstance(self.client, str):
self.client = await ctx.request_resource(AsyncIOMotorClient, self.client)
self.collection = self.client[self.db][self.collection_name]
await self.collection.create_index('feed_id')
async def store_state(self, feed_id: str, state) -> None:
serialized = self.serializer.serialize(state)
document = dict(feed_id=feed_id, state=serialized)
await self.collection.find_one_and_replace({'feed_id': feed_id}, document, upsert=True)
async def load_state(self, feed_id: str):
document = await self.collection.find_one({'feed_id': feed_id}, {'state': True})
return self.serializer.deserialize(document['state']) if document else None
| true | true |
f7f5024b34218ceb04d13bb351f6d2d302069bce | 5,178 | py | Python | tensorflow/contrib/timeseries/python/timeseries/state_space_models/test_utils.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/contrib/timeseries/python/timeseries/state_space_models/test_utils.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/contrib/timeseries/python/timeseries/state_space_models/test_utils.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing state space models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
def transition_power_test_template(test_case, model, num_steps):
"""Tests the transition_to_powers function of a state space model."""
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
previous_matrix = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=transition_matrix.dtype)
true_single_step_update = math_ops.matmul(previous_matrix,
transition_matrix)
model_output_tensor = model.transition_to_powers(powers=array_ops.stack(
[step_number, step_number]))
with test_case.test_session():
starting_matrix = linalg_ops.eye(
state_dimension, batch_shape=array_ops.shape(num_steps)).eval()
evaled_current_matrix = starting_matrix
for iteration_number in range(num_steps):
model_output = model_output_tensor.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
evaled_current_matrix,
model_output[0],
rtol=1e-8 if evaled_current_matrix.dtype == numpy.float64 else 1e-4)
evaled_current_matrix = true_single_step_update.eval(
feed_dict={previous_matrix: evaled_current_matrix})
def noise_accumulator_test_template(test_case, model, num_steps):
"""Tests `model`'s transition_power_noise_accumulator."""
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
noise_transform = ops.convert_to_tensor(
model.get_noise_transform(), dtype=model.dtype)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
state_noise_dimension = tensor_shape.dimension_value(noise_transform.shape[1])
gen_noise_addition = math_utils.sign_magnitude_positive_definite(
raw=random_ops.random_normal(
shape=[state_noise_dimension, state_noise_dimension],
dtype=model.dtype))
gen_starting_noise = math_utils.sign_magnitude_positive_definite(
random_ops.random_normal(
shape=[state_dimension, state_dimension], dtype=model.dtype))
starting_noise = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
starting_transitioned = math_ops.matmul(
math_ops.matmul(transition_matrix, starting_noise),
transition_matrix,
adjoint_b=True)
with test_case.test_session():
evaled_starting_noise = gen_starting_noise.eval()
current_starting_noise_transitioned = evaled_starting_noise
current_noise = evaled_starting_noise
evaled_noise_addition = gen_noise_addition.eval()
evaled_noise_addition_transformed = math_ops.matmul(
math_ops.matmul(noise_transform, evaled_noise_addition),
noise_transform,
adjoint_b=True).eval()
model.state_transition_noise_covariance = evaled_noise_addition
model._window_initializer( # pylint: disable=protected-access
times=math_ops.range(num_steps + 1)[..., None], state=(None, None, 0))
model_update = model.transition_power_noise_accumulator(
num_steps=step_number)
for iteration_number in range(num_steps):
model_new_noise = model_update.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
current_noise,
model_new_noise + current_starting_noise_transitioned,
rtol=1e-8 if current_noise.dtype == numpy.float64 else 1e-3)
current_starting_noise_transitioned = starting_transitioned.eval(
feed_dict={starting_noise: current_starting_noise_transitioned})
current_noise = (
starting_transitioned.eval(
feed_dict={starting_noise: current_noise})
+ evaled_noise_addition_transformed)
| 47.072727 | 80 | 0.751255 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
def transition_power_test_template(test_case, model, num_steps):
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
previous_matrix = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=transition_matrix.dtype)
true_single_step_update = math_ops.matmul(previous_matrix,
transition_matrix)
model_output_tensor = model.transition_to_powers(powers=array_ops.stack(
[step_number, step_number]))
with test_case.test_session():
starting_matrix = linalg_ops.eye(
state_dimension, batch_shape=array_ops.shape(num_steps)).eval()
evaled_current_matrix = starting_matrix
for iteration_number in range(num_steps):
model_output = model_output_tensor.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
evaled_current_matrix,
model_output[0],
rtol=1e-8 if evaled_current_matrix.dtype == numpy.float64 else 1e-4)
evaled_current_matrix = true_single_step_update.eval(
feed_dict={previous_matrix: evaled_current_matrix})
def noise_accumulator_test_template(test_case, model, num_steps):
transition_matrix = ops.convert_to_tensor(
model.get_state_transition(), dtype=model.dtype)
noise_transform = ops.convert_to_tensor(
model.get_noise_transform(), dtype=model.dtype)
state_dimension = tensor_shape.dimension_value(transition_matrix.shape[0])
state_noise_dimension = tensor_shape.dimension_value(noise_transform.shape[1])
gen_noise_addition = math_utils.sign_magnitude_positive_definite(
raw=random_ops.random_normal(
shape=[state_noise_dimension, state_noise_dimension],
dtype=model.dtype))
gen_starting_noise = math_utils.sign_magnitude_positive_definite(
random_ops.random_normal(
shape=[state_dimension, state_dimension], dtype=model.dtype))
starting_noise = array_ops.placeholder(
shape=[state_dimension, state_dimension], dtype=model.dtype)
step_number = array_ops.placeholder(shape=[], dtype=dtypes.int64)
starting_transitioned = math_ops.matmul(
math_ops.matmul(transition_matrix, starting_noise),
transition_matrix,
adjoint_b=True)
with test_case.test_session():
evaled_starting_noise = gen_starting_noise.eval()
current_starting_noise_transitioned = evaled_starting_noise
current_noise = evaled_starting_noise
evaled_noise_addition = gen_noise_addition.eval()
evaled_noise_addition_transformed = math_ops.matmul(
math_ops.matmul(noise_transform, evaled_noise_addition),
noise_transform,
adjoint_b=True).eval()
model.state_transition_noise_covariance = evaled_noise_addition
model._window_initializer(
times=math_ops.range(num_steps + 1)[..., None], state=(None, None, 0))
model_update = model.transition_power_noise_accumulator(
num_steps=step_number)
for iteration_number in range(num_steps):
model_new_noise = model_update.eval(
feed_dict={step_number: iteration_number})
test_case.assertAllClose(
current_noise,
model_new_noise + current_starting_noise_transitioned,
rtol=1e-8 if current_noise.dtype == numpy.float64 else 1e-3)
current_starting_noise_transitioned = starting_transitioned.eval(
feed_dict={starting_noise: current_starting_noise_transitioned})
current_noise = (
starting_transitioned.eval(
feed_dict={starting_noise: current_noise})
+ evaled_noise_addition_transformed)
| true | true |
f7f50431c30e501d55825d355936c3e807e15ed3 | 194 | py | Python | bin/bes_vfs.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | bin/bes_vfs.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | bin/bes_vfs.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.vfs.vfs_cli import vfs_cli
if __name__ == '__main__':
vfs_cli.run()
| 24.25 | 90 | 0.680412 |
from bes.vfs.vfs_cli import vfs_cli
if __name__ == '__main__':
vfs_cli.run()
| true | true |
f7f504a93c04702a2fb8beb6835df6288d5581cd | 18,983 | py | Python | core/curses_interface.py | JackieChiles/Cinch | f8b9e8c073f555ff827fa7887153e82b263a8aab | [
"MIT"
] | null | null | null | core/curses_interface.py | JackieChiles/Cinch | f8b9e8c073f555ff827fa7887153e82b263a8aab | [
"MIT"
] | 5 | 2015-01-10T02:18:54.000Z | 2015-12-07T02:07:28.000Z | core/curses_interface.py | JackieChiles/Cinch | f8b9e8c073f555ff827fa7887153e82b263a8aab | [
"MIT"
] | null | null | null | #!/usr/bin/python2
# coding=UTF-8
"""Curses interface for Cinch console."""
# currently being used for debugging.
DESC = "Cinch console curses interface class, along with driver for same."
import curses
import curses.textpad
import sys
import locale
locale.setlocale(locale.LC_ALL,"")
import threading
from time import sleep
from textwrap import TextWrapper
from Queue import Queue
import re
import argparse
import logging
import logging.config
log = logging.getLogger(__name__)
LOG_SHORT ={'d':'DEBUG', 'i':'INFO', 'w':'WARNING', 'e':'ERROR', 'c':'CRITICAL'}
class CinchScreen():
def __init__(self, main_win, global_log):
# Pass logging.getLogger(__name__) to enforce info-level logging.
# Also set other loggers to original log level.
self.log = global_log
self.main_win = main_win
self.ym, self.xm = self.getsizes()
self.PROMPT = "cinch> "
self.textwrapper = TextWrapper(width = self.xm) # used in write()
self.cmd = '' # User input command; starts blank.
self.queue = Queue() # Where parsed commands go.
# These dicts hold the names of valid commands as keys. The values of
# _valid_commands are the regexes corresponding to allowable arguments.
# The values of _command_usage are the strings to print in response to
# invalid input or to a 'help'-class command. They should be accessed
# through register_command() and deregister_command().
self._valid_commands = {}
self._command_usage = {}
self._write_lock = threading.Lock() # Make write() thread-safe.
# Define sub-window dimensions here.
# First define the functional layout.
self.DASHBOARD_HEIGHT = 12
self.COMMAND_HEIGHT = 1
self.TABLE_WIDTH = 26
self.HAND_WIDTH = 6
self.INFO_WIDTH = 30
# Now derive the newwin() calls for each window.
# 'h': height; 'w': width; 'y','x': y-x coord of top left corner
cml = {'h':self.COMMAND_HEIGHT, 'w':self.xm - len(self.PROMPT),
'y':self.ym - self.DASHBOARD_HEIGHT - self.COMMAND_HEIGHT,
'x':len(self.PROMPT) }
cpt = {'h':self.COMMAND_HEIGHT, 'w':len(self.PROMPT) + 1, 'y':cml['y'],
'x':0}
tw = {'h':cpt['y'], 'w':self.xm, 'y':0, 'x':0}
tbl = {'h':self.DASHBOARD_HEIGHT, 'w':self.TABLE_WIDTH,
'y':self.ym - self.DASHBOARD_HEIGHT, 'x':0}
hnd = {'h':self.DASHBOARD_HEIGHT, 'w':self.HAND_WIDTH, 'y':tbl['y'],
'x':self.TABLE_WIDTH}
nfo = {'h':self.DASHBOARD_HEIGHT, 'w':self.INFO_WIDTH, 'y':tbl['y'],
'x':self.TABLE_WIDTH + self.HAND_WIDTH}
# Set up the windows needed for Cinch.
# Command entry:
self.cmdline = curses.newwin(cml['h'], cml['w'], cml['y'], cml['x'])
self.cmdpad = curses.textpad.Textbox(self.cmdline)
self.cmdline.move(0,0)
self.cmdline.refresh()
# Command prompt display:
self.cmdprompt = curses.newwin(cpt['h'], cpt['w'], cpt['y'], cpt['x'])
self.cmdprompt.leaveok(False)
self.cmdprompt.move(0,0)
self.cmdprompt.addstr(self.PROMPT)
self.cmdprompt.refresh()
# Command output display:
self.text_win = curses.newwin(tw['h'], tw['w'], tw['y'], tw['x'])
self.text_win.scrollok(True)
self.text_win.leaveok(False)
# Table display:
self.table = curses.newwin(tbl['h'], tbl['w'], tbl['y'], tbl['x'])
self.table.leaveok(False)
self.table.border()
# Draw table graphic:
self.table.move(4,9)
self.table.addstr("┌──────┐")
for x in range(5,8):
self.table.move(x,9)
self.table.addstr("│ │")
self.table.move(8,9)
self.table.addstr("└──────┘")
# Add constants for writing table data:
self.TBL_NAMES = [( 9, 9), (5, 1), (2, 9), (5,17)]
self.TBL_NAME_LEN = 8
self.TBL_CARDS = [( 7,12), (6,10), (5,12), (6,14)]
self.TBL_BIDS = [(10, 9), (6, 1), (3, 9), (6,17)]
self.TBL_DEALER = [( 7,10), (5,10), (5,15), (7,15)]
self.table.refresh()
# Hand display:
self.hand = curses.newwin(hnd['h'], hnd['w'], hnd['y'], hnd['x'])
self.hand.leaveok(False)
self.hand.border()
self.hand.move(1,1)
self.hand.addstr("HAND")
self.hand.refresh()
# Info display:
self.LAST_TRICK_DISP = [(3,2), (3,5), (3,8), (3,11)]
self.LAST_TAKER_DISP = (2,2)
self.LAST_TAKER_LEN = 14
self.TRUMP_DISP = (self.DASHBOARD_HEIGHT - 2, 9)
self.US_SCORE_DISP = (3, self.INFO_WIDTH - 5)
self.THEM_SCORE_DISP = (6, self.INFO_WIDTH - 5)
self.info = curses.newwin(nfo['h'], nfo['w'], nfo['y'], nfo['x'])
self.info.leaveok(False)
self.info.border()
self.info.move(1,2)
self.info.addstr('--Last trick--')
self.info.move(self.DASHBOARD_HEIGHT - 2, 2)
self.info.addstr('Trump:')
self.SCORES_TEMPLATE = [' Scores ',
' ┌───┐',
' Us: │ │',
' └───┘',
' ┌───┐',
' Them:│ │',
' └───┘']
for x in range(1,8):
self.info.move(x, self.INFO_WIDTH - 12)
self.info.addstr(self.SCORES_TEMPLATE[x-1])
self.info.refresh()
commandline_listener = threading.Thread(target=self._console_input)
commandline_listener.daemon = True
commandline_listener.start()
def __enter__(self):
# Capture stderr.
self._old_stderr = sys.stderr
sys.stderr = self # Capture tracebacks and display nicely
# Capture all loggers.
for x in logging.Logger.manager.loggerDict:
x_log = logging.getLogger(x)
x_log.addHandler(logging.StreamHandler(self))
x_log.setLevel(self.log.level)
# INFO level logging used for most command responses; enable them.
if (self.log.level > logging.INFO) or (self.log.level == 0):
self._old_log_level = self.log.level
self.log.setLevel(logging.INFO)
return self
def __exit__(self, exc_type, exc_value, traceback):
# Reset previous logging (undo INFO level set)
try:
self.log.setLevel(self._old_log_level)
except AttributeError:
pass
# Remove cinchscreen handlers
for x in logging.Logger.manager.loggerDict:
logging.getLogger(x).handlers = []
# Reset stderr
sys.stderr = self._old_stderr
log.debug("Logged after executing self.__exit__()")
def _console_input(self):
# Run in separate input thread.
while True:
self.cmdline.refresh()
self.cmd = self.cmdpad.edit()
self._parse_command()
self.cmdline.erase()
self.cmdline.move(0,0)
self.cmdline.refresh()
def getsizes(self):
# getmaxyx() returns the dimensions, not maxY or maxX.
ym, xm = self.main_win.getmaxyx()
return ym, xm
def _parse_command(self):
'''
Called by console_input. Takes input lines from cmdpad, echoes to the
screen, parses, and adds well-formed commands to the queue. Rejects
bad syntax, but bad input parameters will be checked by the console.
'''
# FUTURE: Write parallel method to handle getch() mode.
# First, echo the command to the output window.
# Later, consider adding an option to set echo to all,
# all but chats or none.
self.write(self.PROMPT + self.cmd)
if self.cmd == '':
return
cmd_name = self.cmd.split()[0]
# Command syntax: "name <args>", where args matches the cmd regex.
if cmd_name in self._valid_commands:
# Valid command name; check arg syntax
cmd_args = self.cmd[len(cmd_name):].strip() # Rem. name & whitespace
if self._valid_commands[cmd_name].match(cmd_args):
# OK syntax; add to queue
self.queue.put({cmd_name:cmd_args})
else:
# Syntax not OK; print usage
self.write(cmd_name + ": " + self._command_usage[cmd_name]
+ " (" + cmd_args + " received)")
else:
# Not a valid command name
self.write(cmd_name + ": not a valid command")
self.cmd = '' # Unblock the listener.
def register_command(self, name, regex=r'^$', usage='usage not supplied'):
'''
The main console calls this on init to add recognized commands.
name: Name of the command. Does not include any cmdline-specific control
characters (such as '/'); this will be CinchScreen's choice.
regex: Raw string containing a regex representing valid arg strings.
User input will be parsed and rejected if it doesn't match.
usage: The console will echo the command name and this string to the
screen if invalid input is detected.
'''
self._valid_commands[name] = re.compile(regex)
self._command_usage[name] = usage
def unregister_command(self, name):
'''
Remove a command from the list of valid commands.
name: name of the command to remove.
'''
try:
del self._valid_commands[name]
del self._command_usage[name]
except KeyError:
self.write("KeyError deleting command " + name + ": not found")
def update_dash(self, msg_type, *args):
'''Display information in the dashboard. Most game events should affect
the dashboard in some way.
msg_type(str): Type of update to process. Allowed values will affect the
required *args.
Currently allowed msg_type values:
msg_type | args
----------|-------
'hand' | A len-[0..9] list of NS-style strings.
'seat' | An apparent_seat and nickname ('' to clear).
'bid' | An apparent_seat and bid (int or None).
'card' | An apparent_seat and card (NS-style or None).
'last' | A line num (0-3) and card (NS-style or None).
'taker' | A username, None, or ''.
'scores' | A 2-tuple with Us/Them scores or None.
'trump' | A suit symbol or None.
'dealer' | <not implemented yet>
'room' | An integer room number.
'''
try:
#----< Hand Window Updates >----#
if msg_type is 'hand':
h_upd = args[0]
# Do some validation on input
if type(h_upd) is not list:
log.error("update_dash(hand): hand data not list")
return
if len(h_upd) > 9:
log.error("update_dash(hand): too many cards")
return
# Pad to 9 entries to overwrite old hand
while len(h_upd) < 9:
h_upd.append(' ')
# Write cards to screen
for k,v in dict(zip(range(2,11),h_upd)).iteritems():
self.hand.move(k,2)
self.hand.addstr(v)
self.hand.refresh()
#----< Table Window Updates >----#
elif msg_type is 'seat':
apparent_seat = args[0]
nickname = args[1]
# Truncate long names
if len(nickname) > self.TBL_NAME_LEN:
nickname = nickname[:(self.TBL_NAME_LEN - 1)] + '>'
# Pad short names
while len(nickname) < self.TBL_NAME_LEN:
nickname += ' '
# Write nickname to appropriate slot
self.table.move(*self.TBL_NAMES[apparent_seat])
self.table.addstr(nickname)
self.table.refresh()
elif msg_type is 'bid':
apparent_seat = args[0]
bid = args[1]
bid_strings = {None:'', 0:'Pass', 1:'Bid 1',
2:'Bid 2', 3:'Bid 3', 4:'Bid 4', 5:'Cinch!'}
try:
bid_str = bid_strings[bid]
except KeyError:
log.exception("Unrecognized bid %s", bid)
bid_str = '????'
while len(bid_str) < self.TBL_NAME_LEN:
bid_str += ' '
self.table.move(*self.TBL_BIDS[apparent_seat])
self.table.addstr(bid_str)
self.table.refresh()
elif msg_type is 'card':
apparent_seat = args[0]
card = args[1]
if card is None:
card = ' '
self.table.move(*self.TBL_CARDS[apparent_seat])
self.table.addstr(card)
self.table.refresh()
elif msg_type is 'room':
try:
room_num = int(args[0])
if room_num < 0:
raise TypeError
except TypeError:
log.exception("Room number argument %s not +int!", args[0])
room_num = 999999
if room_num is 0:
room_str = "LOBBY "
else:
room_str = "ROOM " + str(room_num) + " "
self.table.move(1,1)
self.table.addstr(room_str)
self.table.refresh()
#----< Info Window Updates >----#
elif msg_type is 'last':
line = args[0]
card = args[1]
if card is None:
card = ' '
self.info.move(*self.LAST_TRICK_DISP[line])
self.info.addstr(card)
self.info.refresh()
elif msg_type is 'taker':
taker = args[0]
if (taker is None) or (taker is ''):
taker = ''
else:
taker = taker + ' took:'
if len(taker) > self.LAST_TAKER_LEN:
taker = taker[:self.LAST_TAKER_LEN - 7] + '> took:'
while len(taker) < self.LAST_TAKER_LEN:
taker += ' '
self.info.move(*self.LAST_TAKER_DISP)
self.info.addstr(taker)
self.info.refresh()
elif msg_type is 'scores':
log.debug('cs.update_dash--scores: %s', args)
if args[0] is None:
us_score = ' '
them_score = ' '
else:
us_score = args[0][0]
them_score = args[0][1]
us_score = (str(us_score) + ' ')[:3]
them_score = (str(them_score) + ' ')[:3]
self.info.move(*self.US_SCORE_DISP)
self.info.addstr(us_score)
self.info.move(*self.THEM_SCORE_DISP)
self.info.addstr(them_score)
self.info.refresh()
elif msg_type is 'trump':
log.debug('cs.update_dash--trump: %s', args)
if args[0] is None:
trump = ' '
else:
trump = args[0]
self.info.move(*self.TRUMP_DISP)
self.info.addstr(trump)
self.info.refresh()
elif msg_type is 'dealer':
pass #TODO
#----< Handle invalid input >----*
else:
log.error("msg_type %s not valid in update_dash", msg_type)
finally:
self.cmdline.refresh() # Set displayed cursor back to cmdline.
def write(self, *stuff):
'''Display text in the console text window, scrolling the existing
contents up as needed. This is the only method that should place text in
this window; other methods should pass str/unicode to this one.'''
with self._write_lock:
# First, parse stuff into manageable chunks.
output = []
for thing in stuff:
if type(thing) == unicode:
thing = unicode.splitlines(thing)
else:
thing = str.splitlines(thing)
for line in thing:
output += self.textwrapper.wrap(line)
# Then write each line in order.
for thing in output:
self.text_win.scroll(1)
self.text_win.move(self.text_win.getmaxyx()[0] - 1, 0)
self.text_win.addstr(thing)
self.text_win.refresh()
self.cmdline.refresh() # Set displayed cursor back to cmdline.
def driver(window, flags):
with CinchScreen(window) as cs:
cs.write('Console interface test driver.')
cs.write('------------------------------')
if flags['s']:
suits = u"Suit symbol test\n♥♦♣♠.".encode("UTF-8")
cs.write(suits)
if flags['a']:
cs.register_command('test', r'^[0-9]$', "test N (single digit only)")
if flags['b']:
cs.unregister_command('test')
if flags['w']:
cs.log.critical(cs.log.level)
cs.log.info("i: Testing curses-based logging handlers...")
cs.log.debug("d: Shouldn't see me unless -l d flag set!")
cs.log.error("e: Oh, what a night; Late December back in sixty three; What a very special time for me; As I remember, what a night!")
cs.log.info("i: Cette année-là; je chantais pour la première fois; le public ne me connaissait pas; oh quelle année cette année-la ! (Jumping jacks!)")
cs.write("cs.write call")
while True:
sleep(0.1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = DESC)
parser.add_argument("-l", "--loglevel",
help="set log level (default=WARNING)", type=str,
choices = list(LOG_SHORT.keys()), default='w')
parser.add_argument("-a", help="reg test cmd", action='store_true')
parser.add_argument("-w", help="test logging with write()", action='store_true')
parser.add_argument("-b", help="del test cmd", action='store_true')
parser.add_argument("-s", help="suittest", action='store_true')
args = parser.parse_args()
log.setLevel(LOG_SHORT[args.loglevel])
flags = {'a':args.a, 'b':args.b, 'w':args.w, 's':args.s}
log.debug("%s", flags)
curses.wrapper(driver, flags)
| 38.505071 | 163 | 0.525734 |
DESC = "Cinch console curses interface class, along with driver for same."
import curses
import curses.textpad
import sys
import locale
locale.setlocale(locale.LC_ALL,"")
import threading
from time import sleep
from textwrap import TextWrapper
from Queue import Queue
import re
import argparse
import logging
import logging.config
log = logging.getLogger(__name__)
LOG_SHORT ={'d':'DEBUG', 'i':'INFO', 'w':'WARNING', 'e':'ERROR', 'c':'CRITICAL'}
class CinchScreen():
def __init__(self, main_win, global_log):
self.log = global_log
self.main_win = main_win
self.ym, self.xm = self.getsizes()
self.PROMPT = "cinch> "
self.textwrapper = TextWrapper(width = self.xm)
self.cmd = ''
self.queue = Queue()
self._valid_commands = {}
self._command_usage = {}
self._write_lock = threading.Lock()
self.DASHBOARD_HEIGHT = 12
self.COMMAND_HEIGHT = 1
self.TABLE_WIDTH = 26
self.HAND_WIDTH = 6
self.INFO_WIDTH = 30
cml = {'h':self.COMMAND_HEIGHT, 'w':self.xm - len(self.PROMPT),
'y':self.ym - self.DASHBOARD_HEIGHT - self.COMMAND_HEIGHT,
'x':len(self.PROMPT) }
cpt = {'h':self.COMMAND_HEIGHT, 'w':len(self.PROMPT) + 1, 'y':cml['y'],
'x':0}
tw = {'h':cpt['y'], 'w':self.xm, 'y':0, 'x':0}
tbl = {'h':self.DASHBOARD_HEIGHT, 'w':self.TABLE_WIDTH,
'y':self.ym - self.DASHBOARD_HEIGHT, 'x':0}
hnd = {'h':self.DASHBOARD_HEIGHT, 'w':self.HAND_WIDTH, 'y':tbl['y'],
'x':self.TABLE_WIDTH}
nfo = {'h':self.DASHBOARD_HEIGHT, 'w':self.INFO_WIDTH, 'y':tbl['y'],
'x':self.TABLE_WIDTH + self.HAND_WIDTH}
self.cmdline = curses.newwin(cml['h'], cml['w'], cml['y'], cml['x'])
self.cmdpad = curses.textpad.Textbox(self.cmdline)
self.cmdline.move(0,0)
self.cmdline.refresh()
self.cmdprompt = curses.newwin(cpt['h'], cpt['w'], cpt['y'], cpt['x'])
self.cmdprompt.leaveok(False)
self.cmdprompt.move(0,0)
self.cmdprompt.addstr(self.PROMPT)
self.cmdprompt.refresh()
self.text_win = curses.newwin(tw['h'], tw['w'], tw['y'], tw['x'])
self.text_win.scrollok(True)
self.text_win.leaveok(False)
self.table = curses.newwin(tbl['h'], tbl['w'], tbl['y'], tbl['x'])
self.table.leaveok(False)
self.table.border()
self.table.move(4,9)
self.table.addstr("┌──────┐")
for x in range(5,8):
self.table.move(x,9)
self.table.addstr("│ │")
self.table.move(8,9)
self.table.addstr("└──────┘")
self.TBL_NAMES = [( 9, 9), (5, 1), (2, 9), (5,17)]
self.TBL_NAME_LEN = 8
self.TBL_CARDS = [( 7,12), (6,10), (5,12), (6,14)]
self.TBL_BIDS = [(10, 9), (6, 1), (3, 9), (6,17)]
self.TBL_DEALER = [( 7,10), (5,10), (5,15), (7,15)]
self.table.refresh()
self.hand = curses.newwin(hnd['h'], hnd['w'], hnd['y'], hnd['x'])
self.hand.leaveok(False)
self.hand.border()
self.hand.move(1,1)
self.hand.addstr("HAND")
self.hand.refresh()
self.LAST_TRICK_DISP = [(3,2), (3,5), (3,8), (3,11)]
self.LAST_TAKER_DISP = (2,2)
self.LAST_TAKER_LEN = 14
self.TRUMP_DISP = (self.DASHBOARD_HEIGHT - 2, 9)
self.US_SCORE_DISP = (3, self.INFO_WIDTH - 5)
self.THEM_SCORE_DISP = (6, self.INFO_WIDTH - 5)
self.info = curses.newwin(nfo['h'], nfo['w'], nfo['y'], nfo['x'])
self.info.leaveok(False)
self.info.border()
self.info.move(1,2)
self.info.addstr('--Last trick--')
self.info.move(self.DASHBOARD_HEIGHT - 2, 2)
self.info.addstr('Trump:')
self.SCORES_TEMPLATE = [' Scores ',
' ┌───┐',
' Us: │ │',
' └───┘',
' ┌───┐',
' Them:│ │',
' └───┘']
for x in range(1,8):
self.info.move(x, self.INFO_WIDTH - 12)
self.info.addstr(self.SCORES_TEMPLATE[x-1])
self.info.refresh()
commandline_listener = threading.Thread(target=self._console_input)
commandline_listener.daemon = True
commandline_listener.start()
def __enter__(self):
self._old_stderr = sys.stderr
sys.stderr = self
for x in logging.Logger.manager.loggerDict:
x_log = logging.getLogger(x)
x_log.addHandler(logging.StreamHandler(self))
x_log.setLevel(self.log.level)
if (self.log.level > logging.INFO) or (self.log.level == 0):
self._old_log_level = self.log.level
self.log.setLevel(logging.INFO)
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.log.setLevel(self._old_log_level)
except AttributeError:
pass
for x in logging.Logger.manager.loggerDict:
logging.getLogger(x).handlers = []
sys.stderr = self._old_stderr
log.debug("Logged after executing self.__exit__()")
def _console_input(self):
while True:
self.cmdline.refresh()
self.cmd = self.cmdpad.edit()
self._parse_command()
self.cmdline.erase()
self.cmdline.move(0,0)
self.cmdline.refresh()
def getsizes(self):
ym, xm = self.main_win.getmaxyx()
return ym, xm
def _parse_command(self):
self.write(self.PROMPT + self.cmd)
if self.cmd == '':
return
cmd_name = self.cmd.split()[0]
if cmd_name in self._valid_commands:
cmd_args = self.cmd[len(cmd_name):].strip()
if self._valid_commands[cmd_name].match(cmd_args):
self.queue.put({cmd_name:cmd_args})
else:
self.write(cmd_name + ": " + self._command_usage[cmd_name]
+ " (" + cmd_args + " received)")
else:
self.write(cmd_name + ": not a valid command")
self.cmd = ''
def register_command(self, name, regex=r'^$', usage='usage not supplied'):
self._valid_commands[name] = re.compile(regex)
self._command_usage[name] = usage
def unregister_command(self, name):
try:
del self._valid_commands[name]
del self._command_usage[name]
except KeyError:
self.write("KeyError deleting command " + name + ": not found")
def update_dash(self, msg_type, *args):
try:
if msg_type is 'hand':
h_upd = args[0]
if type(h_upd) is not list:
log.error("update_dash(hand): hand data not list")
return
if len(h_upd) > 9:
log.error("update_dash(hand): too many cards")
return
while len(h_upd) < 9:
h_upd.append(' ')
for k,v in dict(zip(range(2,11),h_upd)).iteritems():
self.hand.move(k,2)
self.hand.addstr(v)
self.hand.refresh()
elif msg_type is 'seat':
apparent_seat = args[0]
nickname = args[1]
if len(nickname) > self.TBL_NAME_LEN:
nickname = nickname[:(self.TBL_NAME_LEN - 1)] + '>'
while len(nickname) < self.TBL_NAME_LEN:
nickname += ' '
self.table.move(*self.TBL_NAMES[apparent_seat])
self.table.addstr(nickname)
self.table.refresh()
elif msg_type is 'bid':
apparent_seat = args[0]
bid = args[1]
bid_strings = {None:'', 0:'Pass', 1:'Bid 1',
2:'Bid 2', 3:'Bid 3', 4:'Bid 4', 5:'Cinch!'}
try:
bid_str = bid_strings[bid]
except KeyError:
log.exception("Unrecognized bid %s", bid)
bid_str = '????'
while len(bid_str) < self.TBL_NAME_LEN:
bid_str += ' '
self.table.move(*self.TBL_BIDS[apparent_seat])
self.table.addstr(bid_str)
self.table.refresh()
elif msg_type is 'card':
apparent_seat = args[0]
card = args[1]
if card is None:
card = ' '
self.table.move(*self.TBL_CARDS[apparent_seat])
self.table.addstr(card)
self.table.refresh()
elif msg_type is 'room':
try:
room_num = int(args[0])
if room_num < 0:
raise TypeError
except TypeError:
log.exception("Room number argument %s not +int!", args[0])
room_num = 999999
if room_num is 0:
room_str = "LOBBY "
else:
room_str = "ROOM " + str(room_num) + " "
self.table.move(1,1)
self.table.addstr(room_str)
self.table.refresh()
elif msg_type is 'last':
line = args[0]
card = args[1]
if card is None:
card = ' '
self.info.move(*self.LAST_TRICK_DISP[line])
self.info.addstr(card)
self.info.refresh()
elif msg_type is 'taker':
taker = args[0]
if (taker is None) or (taker is ''):
taker = ''
else:
taker = taker + ' took:'
if len(taker) > self.LAST_TAKER_LEN:
taker = taker[:self.LAST_TAKER_LEN - 7] + '> took:'
while len(taker) < self.LAST_TAKER_LEN:
taker += ' '
self.info.move(*self.LAST_TAKER_DISP)
self.info.addstr(taker)
self.info.refresh()
elif msg_type is 'scores':
log.debug('cs.update_dash--scores: %s', args)
if args[0] is None:
us_score = ' '
them_score = ' '
else:
us_score = args[0][0]
them_score = args[0][1]
us_score = (str(us_score) + ' ')[:3]
them_score = (str(them_score) + ' ')[:3]
self.info.move(*self.US_SCORE_DISP)
self.info.addstr(us_score)
self.info.move(*self.THEM_SCORE_DISP)
self.info.addstr(them_score)
self.info.refresh()
elif msg_type is 'trump':
log.debug('cs.update_dash--trump: %s', args)
if args[0] is None:
trump = ' '
else:
trump = args[0]
self.info.move(*self.TRUMP_DISP)
self.info.addstr(trump)
self.info.refresh()
elif msg_type is 'dealer':
pass
else:
log.error("msg_type %s not valid in update_dash", msg_type)
finally:
self.cmdline.refresh()
def write(self, *stuff):
with self._write_lock:
output = []
for thing in stuff:
if type(thing) == unicode:
thing = unicode.splitlines(thing)
else:
thing = str.splitlines(thing)
for line in thing:
output += self.textwrapper.wrap(line)
for thing in output:
self.text_win.scroll(1)
self.text_win.move(self.text_win.getmaxyx()[0] - 1, 0)
self.text_win.addstr(thing)
self.text_win.refresh()
self.cmdline.refresh()
def driver(window, flags):
with CinchScreen(window) as cs:
cs.write('Console interface test driver.')
cs.write('------------------------------')
if flags['s']:
suits = u"Suit symbol test\n♥♦♣♠.".encode("UTF-8")
cs.write(suits)
if flags['a']:
cs.register_command('test', r'^[0-9]$', "test N (single digit only)")
if flags['b']:
cs.unregister_command('test')
if flags['w']:
cs.log.critical(cs.log.level)
cs.log.info("i: Testing curses-based logging handlers...")
cs.log.debug("d: Shouldn't see me unless -l d flag set!")
cs.log.error("e: Oh, what a night; Late December back in sixty three; What a very special time for me; As I remember, what a night!")
cs.log.info("i: Cette année-là; je chantais pour la première fois; le public ne me connaissait pas; oh quelle année cette année-la ! (Jumping jacks!)")
cs.write("cs.write call")
while True:
sleep(0.1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = DESC)
parser.add_argument("-l", "--loglevel",
help="set log level (default=WARNING)", type=str,
choices = list(LOG_SHORT.keys()), default='w')
parser.add_argument("-a", help="reg test cmd", action='store_true')
parser.add_argument("-w", help="test logging with write()", action='store_true')
parser.add_argument("-b", help="del test cmd", action='store_true')
parser.add_argument("-s", help="suittest", action='store_true')
args = parser.parse_args()
log.setLevel(LOG_SHORT[args.loglevel])
flags = {'a':args.a, 'b':args.b, 'w':args.w, 's':args.s}
log.debug("%s", flags)
curses.wrapper(driver, flags)
| true | true |
f7f5057250989eae8d9b8d9e45508d107c716ea0 | 9,229 | py | Python | ttbd/ttbl/store.py | jhaapako/tcf | ecd75404459c6fec9d9fa1522b70a8deab896644 | [
"Apache-2.0"
] | 24 | 2018-08-21T18:04:48.000Z | 2022-02-07T22:50:06.000Z | ttbd/ttbl/store.py | jhaapako/tcf | ecd75404459c6fec9d9fa1522b70a8deab896644 | [
"Apache-2.0"
] | 16 | 2018-08-21T18:03:52.000Z | 2022-03-01T17:15:42.000Z | ttbd/ttbl/store.py | jhaapako/tcf | ecd75404459c6fec9d9fa1522b70a8deab896644 | [
"Apache-2.0"
] | 29 | 2018-08-22T19:40:59.000Z | 2021-12-21T11:13:23.000Z | #! /usr/bin/python3
#
# Copyright (c) 2017-20 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
File storage interface
----------------------
Export an interface on each target that allows the user to:
- upload files to the servr
- download files from the server
- remove said files form the server
- list them
each user has a separate storage area, flat in structure (no
subdirectories) which can be use to place blobs which might be removed
by the server after a certain time based on policy. Note these storage
areas are common to all the targets for each user.
Examples:
- upload files to the server than then other tools will
use to (eg: burn into a Flash ROM).
"""
import errno
import hashlib
import os
import re
import commonl
import ttbl
#: List of paths in the systems where clients are allowed to read
#: files from
#:
#: Each entry is the path is the top level directory the user can
#: specify and the value is the mapping into the real file system path.
#:
#: In any :ref:`server configuration file <ttbd_configuration>`, add:
#:
#: >>> ttbl.store.paths_allowed['/images'] = '/home/SOMEUSER/images'
#:
#: Note it is not allowed to upload files to these locations, just to
#: list and download.
paths_allowed = {
}
class interface(ttbl.tt_interface):
def __init__(self):
ttbl.tt_interface.__init__(self)
def _target_setup(self, target, iface_name):
pass
def _release_hook(self, target, _force):
pass
_bad_path = re.compile(r"(^\.\.$|^\.\./|/\.\./|/\.\.$)")
# Paths allowed to access in TARGETSTATEDIR/PATH
target_sub_paths = {
# PATH1: False, # read-only
# PATH2: True, # read-write
# For the capture interface FIXME move to ttbl/capture.py
"capture": False,
}
def _validate_file_path(self, target, file_path, user_path):
matches = self._bad_path.findall(file_path)
if matches \
or os.path.pardir in file_path:
raise ValueError("%s: file path cannot contains components: "
"%s" % (file_path, " ".join(matches)))
if target:
for subpath, rw in self.target_sub_paths.items():
if file_path.startswith(subpath + "/"):
# file comes from the targt's designated state
# directory (capture, certificates) remove any
# possible nastiness
file_name = os.path.basename(file_path)
file_path_final = os.path.join(target.state_dir, subpath, file_name)
return file_path_final, rw
# fall through
if not os.path.isabs(file_path):
# file comes from the user's storage
file_path_normalized = os.path.normpath(file_path)
file_path_final = os.path.join(user_path, file_path_normalized)
return file_path_final, True
# file from the system (mounted FS or similar);
# double check it is allowed
for path, path_translated in paths_allowed.items():
if file_path.startswith(path):
file_path = file_path.replace(path, path_translated, 1)
file_path_final = os.path.normpath(file_path)
return file_path_final, False # FIXME: always read-only?
# FIXME: use PermissionError in Python3
raise RuntimeError(
"%s: tries to read from a location that is not allowed"
% file_path)
def _validate_path(self, target, path):
if target:
for subpath, _rw in self.target_sub_paths.items():
if path.startswith(subpath + "/") or path == subpath:
# file comes from the targt's designated state
# directory (capture, certificates) remove any
# possible nastiness
return os.path.join(target.state_dir, subpath)
for valid_path, translated_path in paths_allowed.items():
if path.startswith(valid_path):
return path.replace(valid_path, translated_path, 1)
raise RuntimeError("%s: path not allowed" % path)
valid_digests = {
"md5": "MD5",
"sha256": "SHA256",
"sha512": "SHA512",
"zero": "no signature"
}
def get_list(self, target, _who, args, _files, user_path):
filenames = self.arg_get(args, 'filenames', list,
allow_missing = True, default = [ ])
path = self.arg_get(args, 'path', str,
allow_missing = True, default = None)
if path == None:
path = user_path
elif path == "/":
pass # special handling
else:
path = self._validate_path(target, path)
digest = self.arg_get(args, 'digest', str,
allow_missing = True, default = "sha256")
if digest not in self.valid_digests:
raise RuntimeError("%s: digest not allowed (only %s)"
% digest, ", ".join(self.valid_digests))
file_data = {}
if path == "/":
# we want the top level list of folders, handle it specially
for path in paths_allowed:
file_data[path] = "directory"
file_data['result'] = dict(file_data) # COMPAT
return file_data
def _list_filename(index_filename, filename):
file_path = os.path.join(path, filename)
try:
if digest == "zero":
file_data[index_filename] = "0"
else:
# note file path is normalized, so we shouldn't
# get multiple cahce entries for different paths
file_data[index_filename] = commonl.hash_file_cached(file_path, digest)
except ( OSError, IOError ) as e:
if e.errno != errno.ENOENT:
raise
# the file does not exist, ignore it
if filenames:
for filename in filenames:
if not isinstance(filename, str):
continue
file_path, _rw = self._validate_file_path(target, filename, path)
if os.path.isdir(file_path):
file_data[filename] = 'directory'
else:
_list_filename(filename, file_path)
else:
for _path, dirnames, files in os.walk(path, topdown = True):
for filename in files:
_list_filename(filename, filename)
for dirname in dirnames:
file_data[dirname] = 'directory'
# WE ONLY generate the list of the path, not for
# subpaths -- by design we only do the first level
# because otherwise we could be generating a lot of
# load in the system if a user makes a mistake and
# keeps asking for a recursive list.
# FIXME: throttle this call
break
file_data['result'] = dict(file_data) # COMPAT
return file_data
def post_file(self, target, _who, args, files, user_path):
# we can only upload to the user's storage path, never to
# paths_allowed -> hence why we alway prefix it.
file_path = self.arg_get(args, 'file_path', str)
file_path_final, rw = self._validate_file_path(target, file_path, user_path)
if not rw:
raise PermissionError(f"{file_path}: is a read only location")
file_object = files['file']
file_object.save(file_path_final)
commonl.makedirs_p(user_path)
target.log.debug("%s: saved" % file_path_final)
return dict()
def get_file(self, target, _who, args, _files, user_path):
# we can get files from the user's path or from paths_allowed;
# an absolute path is assumed to come from paths_allowed,
# otherwise from the user's storage area.
file_path = self.arg_get(args, 'file_path', str)
offset = self.arg_get(args, 'offset', int,
allow_missing = True, default = 0)
file_path_final, _ = self._validate_file_path(target, file_path, user_path)
# interface core has file streaming support builtin
# already, it will take care of streaming the file to the
# client
try:
generation = os.readlink(file_path_final + ".generation")
except OSError:
generation = 0
# ttbd will parse this response in _target_interface() to
# return a raw file according to these parameters.
return dict(
stream_file = file_path_final,
stream_generation = generation,
stream_offset = offset,
)
def delete_file(self, target, _who, args, _files, user_path):
file_path = self.arg_get(args, 'file_path', str)
file_path_final, rw = self._validate_file_path(target, file_path, user_path)
if not rw:
raise PermissionError(f"{file_path}: is a read only location")
commonl.rm_f(file_path_final)
return dict()
| 38.615063 | 91 | 0.59183 |
import errno
import hashlib
import os
import re
import commonl
import ttbl
paths_allowed = {
}
class interface(ttbl.tt_interface):
def __init__(self):
ttbl.tt_interface.__init__(self)
def _target_setup(self, target, iface_name):
pass
def _release_hook(self, target, _force):
pass
_bad_path = re.compile(r"(^\.\.$|^\.\./|/\.\./|/\.\.$)")
target_sub_paths = {
"capture": False,
}
def _validate_file_path(self, target, file_path, user_path):
matches = self._bad_path.findall(file_path)
if matches \
or os.path.pardir in file_path:
raise ValueError("%s: file path cannot contains components: "
"%s" % (file_path, " ".join(matches)))
if target:
for subpath, rw in self.target_sub_paths.items():
if file_path.startswith(subpath + "/"):
# directory (capture, certificates) remove any
# possible nastiness
file_name = os.path.basename(file_path)
file_path_final = os.path.join(target.state_dir, subpath, file_name)
return file_path_final, rw
# fall through
if not os.path.isabs(file_path):
# file comes from the user's storage
file_path_normalized = os.path.normpath(file_path)
file_path_final = os.path.join(user_path, file_path_normalized)
return file_path_final, True
for path, path_translated in paths_allowed.items():
if file_path.startswith(path):
file_path = file_path.replace(path, path_translated, 1)
file_path_final = os.path.normpath(file_path)
return file_path_final, False
raise RuntimeError(
"%s: tries to read from a location that is not allowed"
% file_path)
def _validate_path(self, target, path):
if target:
for subpath, _rw in self.target_sub_paths.items():
if path.startswith(subpath + "/") or path == subpath:
# directory (capture, certificates) remove any
# possible nastiness
return os.path.join(target.state_dir, subpath)
for valid_path, translated_path in paths_allowed.items():
if path.startswith(valid_path):
return path.replace(valid_path, translated_path, 1)
raise RuntimeError("%s: path not allowed" % path)
valid_digests = {
"md5": "MD5",
"sha256": "SHA256",
"sha512": "SHA512",
"zero": "no signature"
}
def get_list(self, target, _who, args, _files, user_path):
filenames = self.arg_get(args, 'filenames', list,
allow_missing = True, default = [ ])
path = self.arg_get(args, 'path', str,
allow_missing = True, default = None)
if path == None:
path = user_path
elif path == "/":
pass # special handling
else:
path = self._validate_path(target, path)
digest = self.arg_get(args, 'digest', str,
allow_missing = True, default = "sha256")
if digest not in self.valid_digests:
raise RuntimeError("%s: digest not allowed (only %s)"
% digest, ", ".join(self.valid_digests))
file_data = {}
if path == "/":
# we want the top level list of folders, handle it specially
for path in paths_allowed:
file_data[path] = "directory"
file_data['result'] = dict(file_data) # COMPAT
return file_data
def _list_filename(index_filename, filename):
file_path = os.path.join(path, filename)
try:
if digest == "zero":
file_data[index_filename] = "0"
else:
# note file path is normalized, so we shouldn't
file_data[index_filename] = commonl.hash_file_cached(file_path, digest)
except ( OSError, IOError ) as e:
if e.errno != errno.ENOENT:
raise
if filenames:
for filename in filenames:
if not isinstance(filename, str):
continue
file_path, _rw = self._validate_file_path(target, filename, path)
if os.path.isdir(file_path):
file_data[filename] = 'directory'
else:
_list_filename(filename, file_path)
else:
for _path, dirnames, files in os.walk(path, topdown = True):
for filename in files:
_list_filename(filename, filename)
for dirname in dirnames:
file_data[dirname] = 'directory'
break
file_data['result'] = dict(file_data)
return file_data
def post_file(self, target, _who, args, files, user_path):
# paths_allowed -> hence why we alway prefix it.
file_path = self.arg_get(args, 'file_path', str)
file_path_final, rw = self._validate_file_path(target, file_path, user_path)
if not rw:
raise PermissionError(f"{file_path}: is a read only location")
file_object = files['file']
file_object.save(file_path_final)
commonl.makedirs_p(user_path)
target.log.debug("%s: saved" % file_path_final)
return dict()
def get_file(self, target, _who, args, _files, user_path):
# we can get files from the user's path or from paths_allowed;
file_path = self.arg_get(args, 'file_path', str)
offset = self.arg_get(args, 'offset', int,
allow_missing = True, default = 0)
file_path_final, _ = self._validate_file_path(target, file_path, user_path)
# interface core has file streaming support builtin
# already, it will take care of streaming the file to the
# client
try:
generation = os.readlink(file_path_final + ".generation")
except OSError:
generation = 0
# ttbd will parse this response in _target_interface() to
# return a raw file according to these parameters.
return dict(
stream_file = file_path_final,
stream_generation = generation,
stream_offset = offset,
)
def delete_file(self, target, _who, args, _files, user_path):
file_path = self.arg_get(args, 'file_path', str)
file_path_final, rw = self._validate_file_path(target, file_path, user_path)
if not rw:
raise PermissionError(f"{file_path}: is a read only location")
commonl.rm_f(file_path_final)
return dict()
| true | true |
f7f505cc652887e8e2b4485259dc556dc177920c | 723 | py | Python | code/good_dims.py | spragunr/jmu_ml_dimensionality_lab | c6fe5adba8520f2a81f00556dcbf3574c1437016 | [
"MIT"
] | null | null | null | code/good_dims.py | spragunr/jmu_ml_dimensionality_lab | c6fe5adba8520f2a81f00556dcbf3574c1437016 | [
"MIT"
] | null | null | null | code/good_dims.py | spragunr/jmu_ml_dimensionality_lab | c6fe5adba8520f2a81f00556dcbf3574c1437016 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 09:18:20 2020
@author: spragunr
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
num = 100
D = np.zeros((num, 6))
D[:,0] = np.random.randn(num)
D[:,1] = np.random.random(num)
D[np.random.randint(num, size=25), 2] = .5
D[np.random.randint(num, size=25), 2] = 1.0
D[:,3] = np.random.randn(num)
D[:,4] = np.random.random(num) + .2 * D[:, 3]
D[:,5] = D[:,1] * D[:,3]
print(D)
plt.plot(D[:,1], D[:,5], '*')
plt.plot(D[:,3], D[:,5], "o")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(D[:,1], D[:, 3], D[:,5])
plt.show()
np.savetxt('dims.csv', D, fmt='%.5f', delimiter=',') | 20.657143 | 52 | 0.587828 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
num = 100
D = np.zeros((num, 6))
D[:,0] = np.random.randn(num)
D[:,1] = np.random.random(num)
D[np.random.randint(num, size=25), 2] = .5
D[np.random.randint(num, size=25), 2] = 1.0
D[:,3] = np.random.randn(num)
D[:,4] = np.random.random(num) + .2 * D[:, 3]
D[:,5] = D[:,1] * D[:,3]
print(D)
plt.plot(D[:,1], D[:,5], '*')
plt.plot(D[:,3], D[:,5], "o")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(D[:,1], D[:, 3], D[:,5])
plt.show()
np.savetxt('dims.csv', D, fmt='%.5f', delimiter=',') | true | true |
f7f5077ce84f52eb001d1ed55bce909d535cd05f | 30 | py | Python | PiCN/Layers/__init__.py | NikolaiRutz/PiCN | 7775c61caae506a88af2e4ec34349e8bd9098459 | [
"BSD-3-Clause"
] | null | null | null | PiCN/Layers/__init__.py | NikolaiRutz/PiCN | 7775c61caae506a88af2e4ec34349e8bd9098459 | [
"BSD-3-Clause"
] | 5 | 2020-07-15T09:01:42.000Z | 2020-09-28T08:45:21.000Z | PiCN/Layers/__init__.py | NikolaiRutz/PiCN | 7775c61caae506a88af2e4ec34349e8bd9098459 | [
"BSD-3-Clause"
] | null | null | null | """Layers of the PiCN Stack""" | 30 | 30 | 0.666667 | true | true | |
f7f5079374313f0892e454e222ee2e8eef9ff5f3 | 2,026 | py | Python | setup.py | cchu70/getzlab-SignatureAnalyzer | e83d4673b181b982775b74046ae7ad3f10301def | [
"MIT"
] | 5 | 2021-11-23T16:19:22.000Z | 2022-01-30T01:36:41.000Z | setup.py | cchu70/getzlab-SignatureAnalyzer | e83d4673b181b982775b74046ae7ad3f10301def | [
"MIT"
] | 10 | 2021-11-06T16:49:58.000Z | 2022-02-08T09:52:43.000Z | setup.py | cchu70/getzlab-SignatureAnalyzer | e83d4673b181b982775b74046ae7ad3f10301def | [
"MIT"
] | 6 | 2021-11-09T04:34:15.000Z | 2022-03-23T19:43:52.000Z | from setuptools import setup
import re
import os
import sys
ver_info = sys.version_info
if ver_info < (3,6,0):
raise RuntimeError("signatureanalyzer requires at least python 3.6.0")
with open(os.path.join(os.path.dirname(__file__), 'signatureanalyzer', '__init__.py')) as r:
version = re.search(r'__version__ = \'(\d+\.\d+\.\d+[-_a-zA-Z0-9]*)\'', r.read()).group(1)
setup(
name = 'signatureanalyzer',
version = version,
author = 'Shankara Anand & Justin Cha - Broad Institute - Cancer Genome Computational Analysis',
author_email = 'sanand@broadinstitute.org',
url = 'https://github.com/broadinstitute/getzlab-SignatureAnalyzer',
long_description = open("README.md", encoding="utf-8").read(),
long_description_content_type = 'text/markdown',
description = 'Bayesian NMF methods for mutational signature analysis & transcriptomic profiling on GPUs (Getz Lab).',
packages = [
'signatureanalyzer',
'signatureanalyzer.plotting',
'signatureanalyzer.pathways',
'signatureanalyzer.signatureanalyzer_gpu'
],
install_requires = [
"gprofiler",
"h5py>=2.9.0",
"matplotlib",
"numpy",
"pandas>=0.25.0",
"pyarrow>=0.14.1",
"scikit-image>=0.15.0",
"scikit-learn>=0.21.3",
"scipy",
"seaborn>=0.9.0",
"tables>=3.6.1",
"torch>=1.2.0",
"tqdm>=4.33.0",
"twobitreader>=3.1.7",
],
package_data = {
"":[
"ref/cosmic_v2/sa*",
"ref/cosmic_v3/sa*"
]
},
entry_points = {
'console_scripts': [
'signatureanalyzer = signatureanalyzer.__main__:main'
]
},
classifiers = [
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
],
license="MIT"
)
| 31.65625 | 122 | 0.60464 | from setuptools import setup
import re
import os
import sys
ver_info = sys.version_info
if ver_info < (3,6,0):
raise RuntimeError("signatureanalyzer requires at least python 3.6.0")
with open(os.path.join(os.path.dirname(__file__), 'signatureanalyzer', '__init__.py')) as r:
version = re.search(r'__version__ = \'(\d+\.\d+\.\d+[-_a-zA-Z0-9]*)\'', r.read()).group(1)
setup(
name = 'signatureanalyzer',
version = version,
author = 'Shankara Anand & Justin Cha - Broad Institute - Cancer Genome Computational Analysis',
author_email = 'sanand@broadinstitute.org',
url = 'https://github.com/broadinstitute/getzlab-SignatureAnalyzer',
long_description = open("README.md", encoding="utf-8").read(),
long_description_content_type = 'text/markdown',
description = 'Bayesian NMF methods for mutational signature analysis & transcriptomic profiling on GPUs (Getz Lab).',
packages = [
'signatureanalyzer',
'signatureanalyzer.plotting',
'signatureanalyzer.pathways',
'signatureanalyzer.signatureanalyzer_gpu'
],
install_requires = [
"gprofiler",
"h5py>=2.9.0",
"matplotlib",
"numpy",
"pandas>=0.25.0",
"pyarrow>=0.14.1",
"scikit-image>=0.15.0",
"scikit-learn>=0.21.3",
"scipy",
"seaborn>=0.9.0",
"tables>=3.6.1",
"torch>=1.2.0",
"tqdm>=4.33.0",
"twobitreader>=3.1.7",
],
package_data = {
"":[
"ref/cosmic_v2/sa*",
"ref/cosmic_v3/sa*"
]
},
entry_points = {
'console_scripts': [
'signatureanalyzer = signatureanalyzer.__main__:main'
]
},
classifiers = [
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
],
license="MIT"
)
| true | true |
f7f508af78699ec9bde76677bf0ec282811530b1 | 2,809 | py | Python | translate.py | zhczhong/attention-is-all-you-need-pytorch | f58eeb04e40575def05f5cb1fc2906d55aa05ebe | [
"MIT"
] | null | null | null | translate.py | zhczhong/attention-is-all-you-need-pytorch | f58eeb04e40575def05f5cb1fc2906d55aa05ebe | [
"MIT"
] | null | null | null | translate.py | zhczhong/attention-is-all-you-need-pytorch | f58eeb04e40575def05f5cb1fc2906d55aa05ebe | [
"MIT"
] | null | null | null | ''' Translate input text with trained model. '''
import torch
import torch.utils.data
import argparse
from tqdm import tqdm
from dataset import collate_fn, TranslationDataset
from transformer.Translator import Translator
from preprocess import read_instances_from_file, convert_instance_to_idx_seq
from utils.postprocess import del_repeat
def main():
'''Main Function'''
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-vocab', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-no_cuda', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
# Prepare DataLoader
preprocess_data = torch.load(opt.vocab)
preprocess_settings = preprocess_data['settings']
test_src_word_insts = read_instances_from_file(
opt.src,
preprocess_settings.max_word_seq_len,
preprocess_settings.keep_case)
test_src_insts = convert_instance_to_idx_seq(
test_src_word_insts, preprocess_data['dict']['src'])
test_loader = torch.utils.data.DataLoader(
TranslationDataset(
src_word2idx=preprocess_data['dict']['src'],
tgt_word2idx=preprocess_data['dict']['tgt'],
src_insts=test_src_insts),
num_workers=2,
batch_size=opt.batch_size,
collate_fn=collate_fn)
encoder = torch.load("./49.pth")["encoder"]
translator = Translator(encoder,opt)
with open(opt.output, 'w') as f:
for batch in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):
all_hyp, all_scores = translator.translate_batch(*batch)
for idx_seqs in all_hyp:
for idx_seq in idx_seqs:
pred_line = ' '.join([test_loader.dataset.tgt_idx2word[idx] for idx in idx_seq[:-1]])
f.write(pred_line + '\n')
print('[Info] Finished.')
if __name__ == "__main__":
main()
| 39.013889 | 105 | 0.632253 |
import torch
import torch.utils.data
import argparse
from tqdm import tqdm
from dataset import collate_fn, TranslationDataset
from transformer.Translator import Translator
from preprocess import read_instances_from_file, convert_instance_to_idx_seq
from utils.postprocess import del_repeat
def main():
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-vocab', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-no_cuda', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
preprocess_data = torch.load(opt.vocab)
preprocess_settings = preprocess_data['settings']
test_src_word_insts = read_instances_from_file(
opt.src,
preprocess_settings.max_word_seq_len,
preprocess_settings.keep_case)
test_src_insts = convert_instance_to_idx_seq(
test_src_word_insts, preprocess_data['dict']['src'])
test_loader = torch.utils.data.DataLoader(
TranslationDataset(
src_word2idx=preprocess_data['dict']['src'],
tgt_word2idx=preprocess_data['dict']['tgt'],
src_insts=test_src_insts),
num_workers=2,
batch_size=opt.batch_size,
collate_fn=collate_fn)
encoder = torch.load("./49.pth")["encoder"]
translator = Translator(encoder,opt)
with open(opt.output, 'w') as f:
for batch in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):
all_hyp, all_scores = translator.translate_batch(*batch)
for idx_seqs in all_hyp:
for idx_seq in idx_seqs:
pred_line = ' '.join([test_loader.dataset.tgt_idx2word[idx] for idx in idx_seq[:-1]])
f.write(pred_line + '\n')
print('[Info] Finished.')
if __name__ == "__main__":
main()
| true | true |
f7f508d87c0216f1f37dcfeba4319027f09d8447 | 1,733 | py | Python | flask_api/server.py | pedrocarvalhodev/flask_api | 31f57626f6c5f94c600fb53867b490aee7c74f8c | [
"MIT"
] | 214 | 2017-09-29T06:21:28.000Z | 2022-01-08T00:15:54.000Z | flask_api/server.py | pedrocarvalhodev/flask_api | 31f57626f6c5f94c600fb53867b490aee7c74f8c | [
"MIT"
] | 2 | 2017-09-29T10:34:40.000Z | 2018-06-20T12:55:56.000Z | flask_api/server.py | pedrocarvalhodev/flask_api | 31f57626f6c5f94c600fb53867b490aee7c74f8c | [
"MIT"
] | 141 | 2017-09-29T04:57:39.000Z | 2022-02-09T22:48:43.000Z | import os
import pandas as pd
import dill as pickle
from flask import Flask, jsonify, request
from utils import PreProcessing
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def apicall():
"""API Call
Pandas dataframe (sent as a payload) from API Call
"""
try:
test_json = request.get_json()
test = pd.read_json(test_json, orient='records')
#To resolve the issue of TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str'
test['Dependents'] = [str(x) for x in list(test['Dependents'])]
#Getting the Loan_IDs separated out
loan_ids = test['Loan_ID']
except Exception as e:
raise e
clf = 'model_v1.pk'
if test.empty:
return(bad_request())
else:
#Load the saved model
print("Loading the model...")
loaded_model = None
with open('./models/'+clf,'rb') as f:
loaded_model = pickle.load(f)
print("The model has been loaded...doing predictions now...")
predictions = loaded_model.predict(test)
"""Add the predictions as Series to a new pandas dataframe
OR
Depending on the use-case, the entire test data appended with the new files
"""
prediction_series = list(pd.Series(predictions))
final_predictions = pd.DataFrame(list(zip(loan_ids, prediction_series)))
"""We can be as creative in sending the responses.
But we need to send the response codes as well.
"""
responses = jsonify(predictions=final_predictions.to_json(orient="records"))
responses.status_code = 200
return (responses)
@app.errorhandler(400)
def bad_request(error=None):
message = {
'status': 400,
'message': 'Bad Request: ' + request.url + '--> Please check your data payload...',
}
resp = jsonify(message)
resp.status_code = 400
return resp | 25.485294 | 91 | 0.701096 | import os
import pandas as pd
import dill as pickle
from flask import Flask, jsonify, request
from utils import PreProcessing
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def apicall():
try:
test_json = request.get_json()
test = pd.read_json(test_json, orient='records')
test['Dependents'] = [str(x) for x in list(test['Dependents'])]
loan_ids = test['Loan_ID']
except Exception as e:
raise e
clf = 'model_v1.pk'
if test.empty:
return(bad_request())
else:
print("Loading the model...")
loaded_model = None
with open('./models/'+clf,'rb') as f:
loaded_model = pickle.load(f)
print("The model has been loaded...doing predictions now...")
predictions = loaded_model.predict(test)
"""Add the predictions as Series to a new pandas dataframe
OR
Depending on the use-case, the entire test data appended with the new files
"""
prediction_series = list(pd.Series(predictions))
final_predictions = pd.DataFrame(list(zip(loan_ids, prediction_series)))
"""We can be as creative in sending the responses.
But we need to send the response codes as well.
"""
responses = jsonify(predictions=final_predictions.to_json(orient="records"))
responses.status_code = 200
return (responses)
@app.errorhandler(400)
def bad_request(error=None):
message = {
'status': 400,
'message': 'Bad Request: ' + request.url + '--> Please check your data payload...',
}
resp = jsonify(message)
resp.status_code = 400
return resp | true | true |
f7f50a664ec44b3942333cc05d427de5c1007e01 | 19,671 | py | Python | alf/algorithms/agent.py | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 | [
"Apache-2.0"
] | null | null | null | alf/algorithms/agent.py | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 | [
"Apache-2.0"
] | null | null | null | alf/algorithms/agent.py | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agent for integrating multiple algorithms."""
import copy
from typing import Callable
import alf
from alf.algorithms.actor_critic_algorithm import ActorCriticAlgorithm
from alf.algorithms.agent_helpers import AgentHelper
from alf.algorithms.config import TrainerConfig
from alf.algorithms.entropy_target_algorithm import (
EntropyTargetAlgorithm, NestedEntropyTargetAlgorithm)
from alf.algorithms.icm_algorithm import ICMAlgorithm
from alf.algorithms.mbrl_algorithm import LatentMbrlAlgorithm
from alf.algorithms.predictive_representation_learner import \
PredictiveRepresentationLearner
from alf.algorithms.rl_algorithm import RLAlgorithm
from alf.data_structures import AlgStep, Experience
from alf.data_structures import TimeStep, namedtuple
from alf.tensor_specs import TensorSpec
AgentState = namedtuple(
"AgentState", ["rl", "irm", "goal_generator", "repr", "rw"],
default_value=())
AgentInfo = namedtuple(
"AgentInfo",
["rl", "irm", "goal_generator", "entropy_target", "repr", "rw", "rewards"],
default_value=())
@alf.configurable
class Agent(RLAlgorithm):
"""Agent is a master algorithm that integrates different algorithms together.
"""
def __init__(self,
observation_spec,
action_spec,
reward_spec=TensorSpec(()),
env=None,
config: TrainerConfig = None,
rl_algorithm_cls=ActorCriticAlgorithm,
reward_weight_algorithm_cls=None,
representation_learner_cls=None,
goal_generator=None,
intrinsic_reward_module=None,
intrinsic_reward_coef=1.0,
extrinsic_reward_coef=1.0,
enforce_entropy_target=False,
entropy_target_cls=None,
optimizer=None,
debug_summaries=False,
name="AgentAlgorithm"):
"""
Args:
observation_spec (nested TensorSpec): representing the observations.
action_spec (nested BoundedTensorSpec): representing the actions.
reward_spec (TensorSpec): a rank-1 or rank-0 tensor spec representing
the reward(s).
env (Environment): The environment to interact with. ``env`` is a
batched environment, which means that it runs multiple
simulations simultaneously. Running multiple environments in
parallel is crucial to on-policy algorithms as it increases the
diversity of data and decreases temporal correlation. ``env`` only
needs to be provided to the root ``Algorithm``.
config (TrainerConfig): config for training. config only needs to be
provided to the algorithm which performs ``train_iter()`` by
itself.
rl_algorithm_cls (type): The algorithm class for learning the policy.
reward_weight_algorithm_cls (type): The algorithm class for adjusting
reward weights when multi-dim rewards are used. If provided, the
the default ``reward_weights`` of ``rl_algorithm`` will be
overwritten by this algorithm.
representation_learner_cls (type): The algorithm class for learning
the representation. If provided, the constructed learner will
calculate the representation from the original observation as
the observation for downstream algorithms such as ``rl_algorithm``.
intrinsic_reward_module (Algorithm): an algorithm whose outputs
is a scalar intrinsic reward.
goal_generator (Algorithm): an algorithm which outputs a tuple of goal
vector and a reward. The reward can be ``()`` if no reward is given.
intrinsic_reward_coef (float): Coefficient for intrinsic reward
extrinsic_reward_coef (float): Coefficient for extrinsic reward
enforce_entropy_target (bool): If True, use ``(Nested)EntropyTargetAlgorithm``
to dynamically adjust entropy regularization so that entropy is
not smaller than ``entropy_target`` supplied for constructing
``(Nested)EntropyTargetAlgorithm``. If this is enabled, make sure you don't
use ``entropy_regularization`` for loss (see ``ActorCriticLoss`` or
``PPOLoss``). In order to use this, The ``AlgStep.info`` from
``rl_algorithm_cls.train_step()`` and ``rl_algorithm_cls.rollout_step()``
needs to contain ``action_distribution``.
entropy_target_cls (type): If provided, will be used to dynamically
adjust entropy regularization.
optimizer (optimizer): The optimizer for training
debug_summaries (bool): True if debug summaries should be created.
name (str): Name of this algorithm.
"""
agent_helper = AgentHelper(AgentState)
rl_observation_spec = observation_spec
## 0. representation learner
representation_learner = None
if representation_learner_cls is not None:
representation_learner = representation_learner_cls(
observation_spec=rl_observation_spec,
action_spec=action_spec,
debug_summaries=debug_summaries)
rl_observation_spec = representation_learner.output_spec
agent_helper.register_algorithm(representation_learner, "repr")
## 1. goal generator
if goal_generator is not None:
agent_helper.register_algorithm(goal_generator, "goal_generator")
rl_observation_spec = [
rl_observation_spec, goal_generator.action_spec
]
## 2. rl algorithm
rl_algorithm = rl_algorithm_cls(
observation_spec=rl_observation_spec,
action_spec=action_spec,
reward_spec=reward_spec,
debug_summaries=debug_summaries)
agent_helper.register_algorithm(rl_algorithm, "rl")
if isinstance(rl_algorithm, LatentMbrlAlgorithm):
assert isinstance(representation_learner,
PredictiveRepresentationLearner), (
"need to use "
"PredictiveRepresentationLearner")
rl_algorithm.set_latent_predictive_representation_module(
representation_learner)
## 3. intrinsic motivation module
if intrinsic_reward_module is not None:
agent_helper.register_algorithm(intrinsic_reward_module, "irm")
## 4. entropy target
entropy_target_algorithm = None
if entropy_target_cls or enforce_entropy_target:
if entropy_target_cls is None:
if alf.nest.is_nested(action_spec):
entropy_target_cls = NestedEntropyTargetAlgorithm
else:
entropy_target_cls = EntropyTargetAlgorithm
entropy_target_algorithm = entropy_target_cls(
action_spec, debug_summaries=debug_summaries)
agent_helper.register_algorithm(entropy_target_algorithm,
"entropy_target")
# 5. reward weight algorithm
reward_weight_algorithm = None
if reward_weight_algorithm_cls is not None:
reward_weight_algorithm = reward_weight_algorithm_cls(
reward_spec=reward_spec, debug_summaries=debug_summaries)
agent_helper.register_algorithm(reward_weight_algorithm, "rw")
# Initialize the reward weights of the rl algorithm
rl_algorithm.set_reward_weights(
reward_weight_algorithm.reward_weights)
super().__init__(
observation_spec=observation_spec,
action_spec=action_spec,
reward_spec=reward_spec,
optimizer=optimizer,
is_on_policy=rl_algorithm.on_policy,
env=env,
config=config,
debug_summaries=debug_summaries,
name=name,
**agent_helper.state_specs())
for alg in (representation_learner, goal_generator,
intrinsic_reward_module, entropy_target_algorithm,
reward_weight_algorithm):
if alg is not None:
alg.set_on_policy(self.on_policy)
self._representation_learner = representation_learner
self._rl_algorithm = rl_algorithm
self._reward_weight_algorithm = reward_weight_algorithm
self._entropy_target_algorithm = entropy_target_algorithm
self._intrinsic_reward_coef = intrinsic_reward_coef
self._extrinsic_reward_coef = extrinsic_reward_coef
self._irm = intrinsic_reward_module
self._goal_generator = goal_generator
self._agent_helper = agent_helper
# Set ``use_rollout_state``` for all submodules using the setter.
# Need to make sure that no submodules use ``self._use_rollout_state``
# before this line.
self.use_rollout_state = self.use_rollout_state
def set_path(self, path):
super().set_path(path)
self._agent_helper.set_path(path)
def predict_step(self, time_step: TimeStep, state: AgentState):
"""Predict for one step."""
new_state = AgentState()
observation = time_step.observation
info = AgentInfo()
if self._representation_learner is not None:
repr_step = self._representation_learner.predict_step(
time_step, state.repr)
new_state = new_state._replace(repr=repr_step.state)
info = info._replace(repr=repr_step.info)
observation = repr_step.output
if self._goal_generator is not None:
goal_step = self._goal_generator.predict_step(
time_step._replace(observation=observation),
state.goal_generator)
goal, goal_reward = goal_step.output
new_state = new_state._replace(goal_generator=goal_step.state)
info = info._replace(goal_generator=goal_step.info)
observation = [observation, goal]
rl_step = self._rl_algorithm.predict_step(
time_step._replace(observation=observation), state.rl)
new_state = new_state._replace(rl=rl_step.state)
info = info._replace(rl=rl_step.info)
return AlgStep(output=rl_step.output, state=new_state, info=info)
def rollout_step(self, time_step: TimeStep, state: AgentState):
"""Rollout for one step."""
new_state = AgentState()
info = AgentInfo()
observation = time_step.observation
if self._representation_learner is not None:
repr_step = self._representation_learner.rollout_step(
time_step, state.repr)
new_state = new_state._replace(repr=repr_step.state)
info = info._replace(repr=repr_step.info)
observation = repr_step.output
rewards = {}
if self._goal_generator is not None:
goal_step = self._goal_generator.rollout_step(
time_step._replace(observation=observation),
state.goal_generator)
new_state = new_state._replace(goal_generator=goal_step.state)
info = info._replace(goal_generator=goal_step.info)
goal, goal_reward = goal_step.output
observation = [observation, goal]
if goal_reward != ():
rewards['goal_generator'] = goal_reward
if self._irm is not None:
irm_step = self._irm.rollout_step(
time_step._replace(observation=observation), state=state.irm)
info = info._replace(irm=irm_step.info)
new_state = new_state._replace(irm=irm_step.state)
rewards['irm'] = irm_step.output
if rewards:
info = info._replace(rewards=rewards)
overall_reward = self._calc_overall_reward(time_step.reward,
rewards)
else:
overall_reward = time_step.reward
rl_time_step = time_step._replace(
observation=observation, reward=overall_reward)
rl_step = self._rl_algorithm.rollout_step(rl_time_step, state.rl)
new_state = new_state._replace(rl=rl_step.state)
info = info._replace(rl=rl_step.info)
if self._entropy_target_algorithm:
assert 'action_distribution' in rl_step.info._fields, (
"AlgStep from rl_algorithm.rollout() does not contain "
"`action_distribution`, which is required by "
"`enforce_entropy_target`")
et_step = self._entropy_target_algorithm.rollout_step(
(rl_step.info.action_distribution, time_step.step_type))
info = info._replace(entropy_target=et_step.info)
if self._reward_weight_algorithm:
rw_step = self._reward_weight_algorithm.rollout_step(
time_step, state.rw)
info = info._replace(rw=rw_step.info)
return AlgStep(output=rl_step.output, state=new_state, info=info)
def train_step(self, time_step: TimeStep, state, rollout_info):
new_state = AgentState()
info = AgentInfo(rewards=rollout_info.rewards)
observation = time_step.observation
if self._representation_learner is not None:
repr_step = self._representation_learner.train_step(
time_step, state.repr, rollout_info.repr)
new_state = new_state._replace(repr=repr_step.state)
info = info._replace(repr=repr_step.info)
observation = repr_step.output
if self._goal_generator is not None:
goal_step = self._goal_generator.train_step(
time_step._replace(observation=observation),
state.goal_generator, rollout_info.goal_generator)
goal, goal_reward = goal_step.output
info = info._replace(goal_generator=goal_step.info)
new_state = new_state._replace(goal_generator=goal_step.state)
observation = [observation, goal]
if self._irm is not None:
irm_step = self._irm.train_step(
time_step._replace(observation=observation), state=state.irm)
info = info._replace(irm=irm_step.info)
new_state = new_state._replace(irm=irm_step.state)
rl_step = self._rl_algorithm.train_step(
time_step._replace(observation=observation), state.rl,
rollout_info.rl)
new_state = new_state._replace(rl=rl_step.state)
info = info._replace(rl=rl_step.info)
if self._entropy_target_algorithm:
assert 'action_distribution' in rl_step.info._fields, (
"PolicyStep from rl_algorithm.train_step() does not contain "
"`action_distribution`, which is required by "
"`enforce_entropy_target`")
et_step = self._entropy_target_algorithm.train_step(
(rl_step.info.action_distribution, time_step.step_type))
info = info._replace(entropy_target=et_step.info)
return AlgStep(output=rl_step.output, state=new_state, info=info)
def _calc_overall_reward(self, extrinsic_reward, intrinsic_rewards):
overall_reward = extrinsic_reward
if self._extrinsic_reward_coef != 1:
overall_reward *= self._extrinsic_reward_coef
if 'irm' in intrinsic_rewards:
overall_reward += self._intrinsic_reward_coef * intrinsic_rewards[
'irm']
if 'goal_generator' in intrinsic_rewards:
overall_reward += intrinsic_rewards['goal_generator']
return overall_reward
def calc_loss(self, info: AgentInfo):
"""Calculate loss."""
if info.rewards != ():
for name, reward in info.rewards.items():
self.summarize_reward("reward/%s" % name, reward)
algorithms = [
self._representation_learner, self._rl_algorithm, self._irm,
self._goal_generator, self._entropy_target_algorithm
]
algorithms = list(filter(lambda a: a is not None, algorithms))
return self._agent_helper.accumulate_loss_info(algorithms, info)
def after_update(self, experience, train_info: AgentInfo):
"""Call ``after_update()`` of the RL algorithm and goal generator,
respectively.
"""
algorithms = [
self._rl_algorithm, self._representation_learner,
self._goal_generator
]
algorithms = list(filter(lambda a: a is not None, algorithms))
self._agent_helper.after_update(algorithms, experience, train_info)
def after_train_iter(self, experience, info: AgentInfo):
"""Call ``after_train_iter()`` of the RL algorithm and goal generator,
respectively.
"""
algorithms = [
self._rl_algorithm, self._representation_learner,
self._goal_generator, self._reward_weight_algorithm
]
algorithms = list(filter(lambda a: a is not None, algorithms))
self._agent_helper.after_train_iter(algorithms, experience, info)
if self._reward_weight_algorithm:
self._rl_algorithm.set_reward_weights(
self._reward_weight_algorithm.reward_weights)
def preprocess_experience(self, root_inputs, rollout_info, batch_info):
"""Add intrinsic rewards to extrinsic rewards if there is an intrinsic
reward module. Also call ``preprocess_experience()`` of the rl
algorithm.
"""
exp = root_inputs
rewards = rollout_info.rewards
if rewards != ():
rewards = copy.copy(rewards)
rewards['overall'] = self._calc_overall_reward(
root_inputs.reward, rewards)
exp = exp._replace(reward=rewards['overall'])
if self._representation_learner:
exp, repr_info = self._representation_learner.preprocess_experience(
exp, rollout_info.repr, batch_info)
rollout_info = rollout_info._replace(repr=repr_info)
exp, rl_info = self._rl_algorithm.preprocess_experience(
exp, rollout_info.rl, batch_info)
return exp, rollout_info._replace(rl=rl_info)
def summarize_rollout(self, experience):
"""First call ``RLAlgorithm.summarize_rollout()`` to summarize basic
rollout statisics. If the rl algorithm has overridden this function,
then also call its customized version.
"""
super(Agent, self).summarize_rollout(experience)
if (super(Agent, self).summarize_rollout.__func__ !=
self._rl_algorithm.summarize_rollout.__func__):
self._rl_algorithm.summarize_rollout(
experience._replace(rollout_info=experience.rollout_info.rl))
| 45.853147 | 91 | 0.651467 |
import copy
from typing import Callable
import alf
from alf.algorithms.actor_critic_algorithm import ActorCriticAlgorithm
from alf.algorithms.agent_helpers import AgentHelper
from alf.algorithms.config import TrainerConfig
from alf.algorithms.entropy_target_algorithm import (
EntropyTargetAlgorithm, NestedEntropyTargetAlgorithm)
from alf.algorithms.icm_algorithm import ICMAlgorithm
from alf.algorithms.mbrl_algorithm import LatentMbrlAlgorithm
from alf.algorithms.predictive_representation_learner import \
PredictiveRepresentationLearner
from alf.algorithms.rl_algorithm import RLAlgorithm
from alf.data_structures import AlgStep, Experience
from alf.data_structures import TimeStep, namedtuple
from alf.tensor_specs import TensorSpec
AgentState = namedtuple(
"AgentState", ["rl", "irm", "goal_generator", "repr", "rw"],
default_value=())
AgentInfo = namedtuple(
"AgentInfo",
["rl", "irm", "goal_generator", "entropy_target", "repr", "rw", "rewards"],
default_value=())
@alf.configurable
class Agent(RLAlgorithm):
def __init__(self,
observation_spec,
action_spec,
reward_spec=TensorSpec(()),
env=None,
config: TrainerConfig = None,
rl_algorithm_cls=ActorCriticAlgorithm,
reward_weight_algorithm_cls=None,
representation_learner_cls=None,
goal_generator=None,
intrinsic_reward_module=None,
intrinsic_reward_coef=1.0,
extrinsic_reward_coef=1.0,
enforce_entropy_target=False,
entropy_target_cls=None,
optimizer=None,
debug_summaries=False,
name="AgentAlgorithm"):
agent_helper = AgentHelper(AgentState)
rl_observation_spec = observation_spec
rner = None
if representation_learner_cls is not None:
representation_learner = representation_learner_cls(
observation_spec=rl_observation_spec,
action_spec=action_spec,
debug_summaries=debug_summaries)
rl_observation_spec = representation_learner.output_spec
agent_helper.register_algorithm(representation_learner, "repr")
nerator is not None:
agent_helper.register_algorithm(goal_generator, "goal_generator")
rl_observation_spec = [
rl_observation_spec, goal_generator.action_spec
]
ithm = rl_algorithm_cls(
observation_spec=rl_observation_spec,
action_spec=action_spec,
reward_spec=reward_spec,
debug_summaries=debug_summaries)
agent_helper.register_algorithm(rl_algorithm, "rl")
if isinstance(rl_algorithm, LatentMbrlAlgorithm):
assert isinstance(representation_learner,
PredictiveRepresentationLearner), (
"need to use "
"PredictiveRepresentationLearner")
rl_algorithm.set_latent_predictive_representation_module(
representation_learner)
ule is not None:
agent_helper.register_algorithm(intrinsic_reward_module, "irm")
rget_algorithm = None
if entropy_target_cls or enforce_entropy_target:
if entropy_target_cls is None:
if alf.nest.is_nested(action_spec):
entropy_target_cls = NestedEntropyTargetAlgorithm
else:
entropy_target_cls = EntropyTargetAlgorithm
entropy_target_algorithm = entropy_target_cls(
action_spec, debug_summaries=debug_summaries)
agent_helper.register_algorithm(entropy_target_algorithm,
"entropy_target")
reward_weight_algorithm = None
if reward_weight_algorithm_cls is not None:
reward_weight_algorithm = reward_weight_algorithm_cls(
reward_spec=reward_spec, debug_summaries=debug_summaries)
agent_helper.register_algorithm(reward_weight_algorithm, "rw")
rl_algorithm.set_reward_weights(
reward_weight_algorithm.reward_weights)
super().__init__(
observation_spec=observation_spec,
action_spec=action_spec,
reward_spec=reward_spec,
optimizer=optimizer,
is_on_policy=rl_algorithm.on_policy,
env=env,
config=config,
debug_summaries=debug_summaries,
name=name,
**agent_helper.state_specs())
for alg in (representation_learner, goal_generator,
intrinsic_reward_module, entropy_target_algorithm,
reward_weight_algorithm):
if alg is not None:
alg.set_on_policy(self.on_policy)
self._representation_learner = representation_learner
self._rl_algorithm = rl_algorithm
self._reward_weight_algorithm = reward_weight_algorithm
self._entropy_target_algorithm = entropy_target_algorithm
self._intrinsic_reward_coef = intrinsic_reward_coef
self._extrinsic_reward_coef = extrinsic_reward_coef
self._irm = intrinsic_reward_module
self._goal_generator = goal_generator
self._agent_helper = agent_helper
self.use_rollout_state = self.use_rollout_state
def set_path(self, path):
super().set_path(path)
self._agent_helper.set_path(path)
def predict_step(self, time_step: TimeStep, state: AgentState):
new_state = AgentState()
observation = time_step.observation
info = AgentInfo()
if self._representation_learner is not None:
repr_step = self._representation_learner.predict_step(
time_step, state.repr)
new_state = new_state._replace(repr=repr_step.state)
info = info._replace(repr=repr_step.info)
observation = repr_step.output
if self._goal_generator is not None:
goal_step = self._goal_generator.predict_step(
time_step._replace(observation=observation),
state.goal_generator)
goal, goal_reward = goal_step.output
new_state = new_state._replace(goal_generator=goal_step.state)
info = info._replace(goal_generator=goal_step.info)
observation = [observation, goal]
rl_step = self._rl_algorithm.predict_step(
time_step._replace(observation=observation), state.rl)
new_state = new_state._replace(rl=rl_step.state)
info = info._replace(rl=rl_step.info)
return AlgStep(output=rl_step.output, state=new_state, info=info)
def rollout_step(self, time_step: TimeStep, state: AgentState):
new_state = AgentState()
info = AgentInfo()
observation = time_step.observation
if self._representation_learner is not None:
repr_step = self._representation_learner.rollout_step(
time_step, state.repr)
new_state = new_state._replace(repr=repr_step.state)
info = info._replace(repr=repr_step.info)
observation = repr_step.output
rewards = {}
if self._goal_generator is not None:
goal_step = self._goal_generator.rollout_step(
time_step._replace(observation=observation),
state.goal_generator)
new_state = new_state._replace(goal_generator=goal_step.state)
info = info._replace(goal_generator=goal_step.info)
goal, goal_reward = goal_step.output
observation = [observation, goal]
if goal_reward != ():
rewards['goal_generator'] = goal_reward
if self._irm is not None:
irm_step = self._irm.rollout_step(
time_step._replace(observation=observation), state=state.irm)
info = info._replace(irm=irm_step.info)
new_state = new_state._replace(irm=irm_step.state)
rewards['irm'] = irm_step.output
if rewards:
info = info._replace(rewards=rewards)
overall_reward = self._calc_overall_reward(time_step.reward,
rewards)
else:
overall_reward = time_step.reward
rl_time_step = time_step._replace(
observation=observation, reward=overall_reward)
rl_step = self._rl_algorithm.rollout_step(rl_time_step, state.rl)
new_state = new_state._replace(rl=rl_step.state)
info = info._replace(rl=rl_step.info)
if self._entropy_target_algorithm:
assert 'action_distribution' in rl_step.info._fields, (
"AlgStep from rl_algorithm.rollout() does not contain "
"`action_distribution`, which is required by "
"`enforce_entropy_target`")
et_step = self._entropy_target_algorithm.rollout_step(
(rl_step.info.action_distribution, time_step.step_type))
info = info._replace(entropy_target=et_step.info)
if self._reward_weight_algorithm:
rw_step = self._reward_weight_algorithm.rollout_step(
time_step, state.rw)
info = info._replace(rw=rw_step.info)
return AlgStep(output=rl_step.output, state=new_state, info=info)
def train_step(self, time_step: TimeStep, state, rollout_info):
new_state = AgentState()
info = AgentInfo(rewards=rollout_info.rewards)
observation = time_step.observation
if self._representation_learner is not None:
repr_step = self._representation_learner.train_step(
time_step, state.repr, rollout_info.repr)
new_state = new_state._replace(repr=repr_step.state)
info = info._replace(repr=repr_step.info)
observation = repr_step.output
if self._goal_generator is not None:
goal_step = self._goal_generator.train_step(
time_step._replace(observation=observation),
state.goal_generator, rollout_info.goal_generator)
goal, goal_reward = goal_step.output
info = info._replace(goal_generator=goal_step.info)
new_state = new_state._replace(goal_generator=goal_step.state)
observation = [observation, goal]
if self._irm is not None:
irm_step = self._irm.train_step(
time_step._replace(observation=observation), state=state.irm)
info = info._replace(irm=irm_step.info)
new_state = new_state._replace(irm=irm_step.state)
rl_step = self._rl_algorithm.train_step(
time_step._replace(observation=observation), state.rl,
rollout_info.rl)
new_state = new_state._replace(rl=rl_step.state)
info = info._replace(rl=rl_step.info)
if self._entropy_target_algorithm:
assert 'action_distribution' in rl_step.info._fields, (
"PolicyStep from rl_algorithm.train_step() does not contain "
"`action_distribution`, which is required by "
"`enforce_entropy_target`")
et_step = self._entropy_target_algorithm.train_step(
(rl_step.info.action_distribution, time_step.step_type))
info = info._replace(entropy_target=et_step.info)
return AlgStep(output=rl_step.output, state=new_state, info=info)
def _calc_overall_reward(self, extrinsic_reward, intrinsic_rewards):
overall_reward = extrinsic_reward
if self._extrinsic_reward_coef != 1:
overall_reward *= self._extrinsic_reward_coef
if 'irm' in intrinsic_rewards:
overall_reward += self._intrinsic_reward_coef * intrinsic_rewards[
'irm']
if 'goal_generator' in intrinsic_rewards:
overall_reward += intrinsic_rewards['goal_generator']
return overall_reward
def calc_loss(self, info: AgentInfo):
if info.rewards != ():
for name, reward in info.rewards.items():
self.summarize_reward("reward/%s" % name, reward)
algorithms = [
self._representation_learner, self._rl_algorithm, self._irm,
self._goal_generator, self._entropy_target_algorithm
]
algorithms = list(filter(lambda a: a is not None, algorithms))
return self._agent_helper.accumulate_loss_info(algorithms, info)
def after_update(self, experience, train_info: AgentInfo):
algorithms = [
self._rl_algorithm, self._representation_learner,
self._goal_generator
]
algorithms = list(filter(lambda a: a is not None, algorithms))
self._agent_helper.after_update(algorithms, experience, train_info)
def after_train_iter(self, experience, info: AgentInfo):
algorithms = [
self._rl_algorithm, self._representation_learner,
self._goal_generator, self._reward_weight_algorithm
]
algorithms = list(filter(lambda a: a is not None, algorithms))
self._agent_helper.after_train_iter(algorithms, experience, info)
if self._reward_weight_algorithm:
self._rl_algorithm.set_reward_weights(
self._reward_weight_algorithm.reward_weights)
def preprocess_experience(self, root_inputs, rollout_info, batch_info):
exp = root_inputs
rewards = rollout_info.rewards
if rewards != ():
rewards = copy.copy(rewards)
rewards['overall'] = self._calc_overall_reward(
root_inputs.reward, rewards)
exp = exp._replace(reward=rewards['overall'])
if self._representation_learner:
exp, repr_info = self._representation_learner.preprocess_experience(
exp, rollout_info.repr, batch_info)
rollout_info = rollout_info._replace(repr=repr_info)
exp, rl_info = self._rl_algorithm.preprocess_experience(
exp, rollout_info.rl, batch_info)
return exp, rollout_info._replace(rl=rl_info)
def summarize_rollout(self, experience):
super(Agent, self).summarize_rollout(experience)
if (super(Agent, self).summarize_rollout.__func__ !=
self._rl_algorithm.summarize_rollout.__func__):
self._rl_algorithm.summarize_rollout(
experience._replace(rollout_info=experience.rollout_info.rl))
| true | true |
f7f50a6af1a7b21349205ad2a4f263668a442225 | 3,651 | py | Python | segment tree 1/4D.py | iammanish17/CodeforcesEdu | 961543b332c773010320bd0b2e9d4a4b1c8dc0ea | [
"MIT"
] | 6 | 2020-09-14T19:16:23.000Z | 2021-12-10T19:07:51.000Z | segment tree 1/4D.py | iammanish17/CodeforcesEdu | 961543b332c773010320bd0b2e9d4a4b1c8dc0ea | [
"MIT"
] | null | null | null | segment tree 1/4D.py | iammanish17/CodeforcesEdu | 961543b332c773010320bd0b2e9d4a4b1c8dc0ea | [
"MIT"
] | 1 | 2021-08-12T19:37:22.000Z | 2021-08-12T19:37:22.000Z | # By manish.17, contest: ITMO Academy. Дерево отрезков часть 1. 4, problem: (D) Number of Different on Segment
# https://codeforces.com/profile/manish.17
# ------------------- fast io --------------------
import os
import sys
from io import BytesIO, IOBase
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# ------------------- fast io --------------------
from math import inf, log2
class SegmentTree:
def __init__(self, array):
self.n = len(array)
self.size = 2**(int(log2(self.n-1))+1) if self.n != 1 else 1
self.data = [set() for _ in range(2 * self.size)]
self.process(array)
def process(self, array):
for i in range(n):
self.data[self.size + i] = set()
self.data[self.size + i].add(array[i])
for i in range(self.size-1, -1, -1):
self.data[i] = set.union(self.data[2*i], self.data[2*i+1])
def query(self, alpha, omega):
"""Returns the result of function over the range (inclusive)!"""
if alpha == omega:
return len(self.data[alpha + self.size])
res = set()
alpha += self.size
omega += self.size + 1
while alpha < omega:
if alpha & 1:
res = set.union(res,self.data[alpha])
alpha += 1
if omega & 1:
omega -= 1
res = set.union(res, self.data[omega])
alpha >>= 1
omega >>= 1
return len(res)
def update(self, index, value):
"""Updates the element at index to given value!"""
index += self.size
self.data[index] = set()
self.data[index].add(value)
index >>= 1
while index:
self.data[index] = set.union(self.data[2*index], self.data[2*index+1])
index >>= 1
n, q = map(int, input().split())
a = list(map(int, input().split()))
st = SegmentTree(a)
for _ in range(q):
t, x, y = map(int, input().split())
if t == 1:
print(st.query(x-1, y-1))
else:
st.update(x-1, y)
| 32.026316 | 110 | 0.548069 |
import os
import sys
from io import BytesIO, IOBase
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
from math import inf, log2
class SegmentTree:
def __init__(self, array):
self.n = len(array)
self.size = 2**(int(log2(self.n-1))+1) if self.n != 1 else 1
self.data = [set() for _ in range(2 * self.size)]
self.process(array)
def process(self, array):
for i in range(n):
self.data[self.size + i] = set()
self.data[self.size + i].add(array[i])
for i in range(self.size-1, -1, -1):
self.data[i] = set.union(self.data[2*i], self.data[2*i+1])
def query(self, alpha, omega):
if alpha == omega:
return len(self.data[alpha + self.size])
res = set()
alpha += self.size
omega += self.size + 1
while alpha < omega:
if alpha & 1:
res = set.union(res,self.data[alpha])
alpha += 1
if omega & 1:
omega -= 1
res = set.union(res, self.data[omega])
alpha >>= 1
omega >>= 1
return len(res)
def update(self, index, value):
index += self.size
self.data[index] = set()
self.data[index].add(value)
index >>= 1
while index:
self.data[index] = set.union(self.data[2*index], self.data[2*index+1])
index >>= 1
n, q = map(int, input().split())
a = list(map(int, input().split()))
st = SegmentTree(a)
for _ in range(q):
t, x, y = map(int, input().split())
if t == 1:
print(st.query(x-1, y-1))
else:
st.update(x-1, y)
| true | true |
f7f50ac99edae4e90f2a335ae97c0574b8cc2a39 | 723 | py | Python | nsm/word_embeddings.py | MartinoMensio/neural-symbolic-machines | 10683b8d7e0a97c2f6f35206deb5d3a31f4796c4 | [
"Apache-2.0"
] | 354 | 2018-07-05T22:18:58.000Z | 2022-03-25T08:07:26.000Z | nsm/word_embeddings.py | MartinoMensio/neural-symbolic-machines | 10683b8d7e0a97c2f6f35206deb5d3a31f4796c4 | [
"Apache-2.0"
] | 33 | 2018-07-19T15:05:57.000Z | 2022-02-09T23:29:30.000Z | nsm/word_embeddings.py | MartinoMensio/neural-symbolic-machines | 10683b8d7e0a97c2f6f35206deb5d3a31f4796c4 | [
"Apache-2.0"
] | 70 | 2018-07-08T20:19:52.000Z | 2022-01-24T15:04:33.000Z | import json
import numpy as np
import gensim
class EmbeddingModel(object):
def __init__(
self, vocab_file, embedding_file, normalize_embeddings=True):
with open(embedding_file, 'rb') as f:
self.embedding_mat = np.load(f)
if normalize_embeddings:
self.embedding_mat = self.embedding_mat / np.linalg.norm(
self.embedding_mat, axis=1, keepdims=True)
with open(vocab_file, 'r') as f:
tks = json.load(f)
self.vocab = dict(zip(tks, range(len(tks))))
def __contains__(self, word):
return word in self.vocab
def __getitem__(self, word):
if word in self.vocab:
index = self.vocab[word]
return self.embedding_mat[index]
else:
raise KeyError
| 26.777778 | 67 | 0.674965 | import json
import numpy as np
import gensim
class EmbeddingModel(object):
def __init__(
self, vocab_file, embedding_file, normalize_embeddings=True):
with open(embedding_file, 'rb') as f:
self.embedding_mat = np.load(f)
if normalize_embeddings:
self.embedding_mat = self.embedding_mat / np.linalg.norm(
self.embedding_mat, axis=1, keepdims=True)
with open(vocab_file, 'r') as f:
tks = json.load(f)
self.vocab = dict(zip(tks, range(len(tks))))
def __contains__(self, word):
return word in self.vocab
def __getitem__(self, word):
if word in self.vocab:
index = self.vocab[word]
return self.embedding_mat[index]
else:
raise KeyError
| true | true |
f7f50ad4dcac198f7d201ed593a03e7d9b1e1c4d | 2,840 | py | Python | cyborg/common/constants.py | GuojianZhou/cyborg | 05c2d5d7b3ba6893b0e68d1591089019aea2d2fd | [
"Apache-2.0"
] | 1 | 2021-06-01T08:50:39.000Z | 2021-06-01T08:50:39.000Z | cyborg/common/constants.py | GuojianZhou/cyborg | 05c2d5d7b3ba6893b0e68d1591089019aea2d2fd | [
"Apache-2.0"
] | null | null | null | cyborg/common/constants.py | GuojianZhou/cyborg | 05c2d5d7b3ba6893b0e68d1591089019aea2d2fd | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os_resource_classes as orc
CONDUCTOR_TOPIC = 'cyborg-conductor'
AGENT_TOPIC = 'cyborg-agent'
DEVICE_GPU = 'GPU'
DEVICE_FPGA = 'FPGA'
DEVICE_AICHIP = 'AICHIP'
DEVICE_VMLU = 'MLU'
ARQ_STATES = (ARQ_INITIAL, ARQ_BIND_STARTED, ARQ_BOUND, ARQ_UNBOUND,
ARQ_BIND_FAILED, ARQ_DELETING) = (
'Initial', 'BindStarted', 'Bound', 'Unbound', 'BindFailed', 'Deleting')
ARQ_BIND_STAGE = (ARQ_PRE_BIND, ARQ_FINISH_BIND,
ARQ_OUFOF_BIND_FLOW) = (
[ARQ_INITIAL, ARQ_BIND_STARTED], [ARQ_BOUND, ARQ_BIND_FAILED],
[ARQ_UNBOUND, ARQ_DELETING])
ARQ_BIND_STATUS = (ARQ_BIND_STATUS_FINISH, ARQ_BIND_STATUS_FAILED) = (
"completed", "failed")
ARQ_BIND_STATES_STATUS_MAP = {
ARQ_BOUND: ARQ_BIND_STATUS_FINISH,
ARQ_BIND_FAILED: ARQ_BIND_STATUS_FAILED,
ARQ_DELETING: ARQ_BIND_STATUS_FAILED
}
# TODO(Shaohe): maybe we can use oslo automaton lib
# ref: https://docs.openstack.org/automaton/latest/user/examples.html
# The states in value list can transfrom to the key state
ARQ_STATES_TRANSFORM_MATRIX = {
ARQ_INITIAL: [],
ARQ_BIND_STARTED: [ARQ_INITIAL, ARQ_UNBOUND],
ARQ_BOUND: [ARQ_BIND_STARTED],
ARQ_UNBOUND: [ARQ_INITIAL, ARQ_BIND_STARTED, ARQ_BOUND, ARQ_BIND_FAILED],
ARQ_BIND_FAILED: [ARQ_BIND_STARTED, ARQ_BOUND],
ARQ_DELETING: [ARQ_INITIAL, ARQ_BIND_STARTED, ARQ_BOUND,
ARQ_UNBOUND, ARQ_BIND_FAILED]
}
# Device type
DEVICE_TYPE = (DEVICE_GPU, DEVICE_FPGA, DEVICE_AICHIP, DEVICE_VMLU)
# Attach handle type
# 'TEST_PCI': used by fake driver, ignored by Nova virt driver.
ATTACH_HANDLE_TYPES = (AH_TYPE_PCI, AH_TYPE_MDEV, AH_TYPE_TEST_PCI) = (
"PCI", "MDEV", "TEST_PCI")
# Control Path ID type
CPID_TYPE = (CPID_TYPE_PCI) = ("PCI")
# Resource Class
RESOURCES = {
"FPGA": orc.FPGA,
"PGPU": orc.PGPU,
"VGPU": orc.VGPU,
"VMLU": orc.VMLU
}
ACCEL_SPECS = (
ACCEL_BITSTREAM_ID,
ACCEL_FUNCTION_ID
) = (
"accel:bitstream_id",
"accel:function_id"
)
SUPPORT_RESOURCES = (
FPGA, GPU, VGPU, PGPU, VMLU) = (
"FPGA", "GPU", "VGPU", "PGPU", "VMLU"
)
FPGA_TRAITS = (
FPGA_FUNCTION_ID,
) = (
"CUSTOM_FPGA_FUNCTION_ID",
)
RESOURCES_PREFIX = "resources:"
| 26.792453 | 78 | 0.709507 |
import os_resource_classes as orc
CONDUCTOR_TOPIC = 'cyborg-conductor'
AGENT_TOPIC = 'cyborg-agent'
DEVICE_GPU = 'GPU'
DEVICE_FPGA = 'FPGA'
DEVICE_AICHIP = 'AICHIP'
DEVICE_VMLU = 'MLU'
ARQ_STATES = (ARQ_INITIAL, ARQ_BIND_STARTED, ARQ_BOUND, ARQ_UNBOUND,
ARQ_BIND_FAILED, ARQ_DELETING) = (
'Initial', 'BindStarted', 'Bound', 'Unbound', 'BindFailed', 'Deleting')
ARQ_BIND_STAGE = (ARQ_PRE_BIND, ARQ_FINISH_BIND,
ARQ_OUFOF_BIND_FLOW) = (
[ARQ_INITIAL, ARQ_BIND_STARTED], [ARQ_BOUND, ARQ_BIND_FAILED],
[ARQ_UNBOUND, ARQ_DELETING])
ARQ_BIND_STATUS = (ARQ_BIND_STATUS_FINISH, ARQ_BIND_STATUS_FAILED) = (
"completed", "failed")
ARQ_BIND_STATES_STATUS_MAP = {
ARQ_BOUND: ARQ_BIND_STATUS_FINISH,
ARQ_BIND_FAILED: ARQ_BIND_STATUS_FAILED,
ARQ_DELETING: ARQ_BIND_STATUS_FAILED
}
ARQ_STATES_TRANSFORM_MATRIX = {
ARQ_INITIAL: [],
ARQ_BIND_STARTED: [ARQ_INITIAL, ARQ_UNBOUND],
ARQ_BOUND: [ARQ_BIND_STARTED],
ARQ_UNBOUND: [ARQ_INITIAL, ARQ_BIND_STARTED, ARQ_BOUND, ARQ_BIND_FAILED],
ARQ_BIND_FAILED: [ARQ_BIND_STARTED, ARQ_BOUND],
ARQ_DELETING: [ARQ_INITIAL, ARQ_BIND_STARTED, ARQ_BOUND,
ARQ_UNBOUND, ARQ_BIND_FAILED]
}
DEVICE_TYPE = (DEVICE_GPU, DEVICE_FPGA, DEVICE_AICHIP, DEVICE_VMLU)
ATTACH_HANDLE_TYPES = (AH_TYPE_PCI, AH_TYPE_MDEV, AH_TYPE_TEST_PCI) = (
"PCI", "MDEV", "TEST_PCI")
CPID_TYPE = (CPID_TYPE_PCI) = ("PCI")
RESOURCES = {
"FPGA": orc.FPGA,
"PGPU": orc.PGPU,
"VGPU": orc.VGPU,
"VMLU": orc.VMLU
}
ACCEL_SPECS = (
ACCEL_BITSTREAM_ID,
ACCEL_FUNCTION_ID
) = (
"accel:bitstream_id",
"accel:function_id"
)
SUPPORT_RESOURCES = (
FPGA, GPU, VGPU, PGPU, VMLU) = (
"FPGA", "GPU", "VGPU", "PGPU", "VMLU"
)
FPGA_TRAITS = (
FPGA_FUNCTION_ID,
) = (
"CUSTOM_FPGA_FUNCTION_ID",
)
RESOURCES_PREFIX = "resources:"
| true | true |
f7f50aed59175a461b99066303f05fa39de16436 | 3,212 | py | Python | sdk/python/pulumi_aws_native/location/get_tracker.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/location/get_tracker.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/location/get_tracker.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetTrackerResult',
'AwaitableGetTrackerResult',
'get_tracker',
'get_tracker_output',
]
@pulumi.output_type
class GetTrackerResult:
def __init__(__self__, arn=None, create_time=None, tracker_arn=None, update_time=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if tracker_arn and not isinstance(tracker_arn, str):
raise TypeError("Expected argument 'tracker_arn' to be a str")
pulumi.set(__self__, "tracker_arn", tracker_arn)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[str]:
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="trackerArn")
def tracker_arn(self) -> Optional[str]:
return pulumi.get(self, "tracker_arn")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[str]:
return pulumi.get(self, "update_time")
class AwaitableGetTrackerResult(GetTrackerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTrackerResult(
arn=self.arn,
create_time=self.create_time,
tracker_arn=self.tracker_arn,
update_time=self.update_time)
def get_tracker(tracker_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTrackerResult:
"""
Definition of AWS::Location::Tracker Resource Type
"""
__args__ = dict()
__args__['trackerName'] = tracker_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:location:getTracker', __args__, opts=opts, typ=GetTrackerResult).value
return AwaitableGetTrackerResult(
arn=__ret__.arn,
create_time=__ret__.create_time,
tracker_arn=__ret__.tracker_arn,
update_time=__ret__.update_time)
@_utilities.lift_output_func(get_tracker)
def get_tracker_output(tracker_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTrackerResult]:
"""
Definition of AWS::Location::Tracker Resource Type
"""
...
| 34.170213 | 118 | 0.673101 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetTrackerResult',
'AwaitableGetTrackerResult',
'get_tracker',
'get_tracker_output',
]
@pulumi.output_type
class GetTrackerResult:
def __init__(__self__, arn=None, create_time=None, tracker_arn=None, update_time=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if tracker_arn and not isinstance(tracker_arn, str):
raise TypeError("Expected argument 'tracker_arn' to be a str")
pulumi.set(__self__, "tracker_arn", tracker_arn)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[str]:
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="trackerArn")
def tracker_arn(self) -> Optional[str]:
return pulumi.get(self, "tracker_arn")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[str]:
return pulumi.get(self, "update_time")
class AwaitableGetTrackerResult(GetTrackerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTrackerResult(
arn=self.arn,
create_time=self.create_time,
tracker_arn=self.tracker_arn,
update_time=self.update_time)
def get_tracker(tracker_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTrackerResult:
__args__ = dict()
__args__['trackerName'] = tracker_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:location:getTracker', __args__, opts=opts, typ=GetTrackerResult).value
return AwaitableGetTrackerResult(
arn=__ret__.arn,
create_time=__ret__.create_time,
tracker_arn=__ret__.tracker_arn,
update_time=__ret__.update_time)
@_utilities.lift_output_func(get_tracker)
def get_tracker_output(tracker_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTrackerResult]:
...
| true | true |
f7f50ba0c9ca56486a33bfc84bc094e946f123fc | 11,967 | py | Python | lib/daemon.py | gertjaap/electrum-vtc | 85e3432fdb0c221b93fc3ef4a0d4b96397cf4215 | [
"MIT"
] | 57 | 2017-05-30T00:11:19.000Z | 2018-02-01T18:30:18.000Z | lib/daemon.py | gertjaap/electrum-vtc | 85e3432fdb0c221b93fc3ef4a0d4b96397cf4215 | [
"MIT"
] | 63 | 2017-05-13T03:20:47.000Z | 2018-02-04T18:07:06.000Z | lib/daemon.py | gertjaap/electrum-vtc | 85e3432fdb0c221b93fc3ef4a0d4b96397cf4215 | [
"MIT"
] | 27 | 2017-05-11T22:48:37.000Z | 2018-02-03T20:24:32.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ast
import os
import time
import traceback
import sys
# from jsonrpc import JSONRPCResponseManager
import jsonrpclib
from .jsonrpc import VerifyingJSONRPCServer
from .version import ELECTRUM_VERSION
from .network import Network
from .util import json_decode, DaemonThread
from .util import print_error, to_string
from .wallet import Wallet
from .storage import WalletStorage
from .commands import known_commands, Commands
from .simple_config import SimpleConfig
from .exchange_rate import FxThread
from .plugins import run_hook
def get_lockfile(config):
return os.path.join(config.path, 'daemon')
def remove_lockfile(lockfile):
os.unlink(lockfile)
def get_fd_or_server(config):
'''Tries to create the lockfile, using O_EXCL to
prevent races. If it succeeds it returns the FD.
Otherwise try and connect to the server specified in the lockfile.
If this succeeds, the server is returned. Otherwise remove the
lockfile and try again.'''
lockfile = get_lockfile(config)
while True:
try:
return os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644), None
except OSError:
pass
server = get_server(config)
if server is not None:
return None, server
# Couldn't connect; remove lockfile and try again.
remove_lockfile(lockfile)
def get_server(config):
lockfile = get_lockfile(config)
while True:
create_time = None
try:
with open(lockfile) as f:
(host, port), create_time = ast.literal_eval(f.read())
rpc_user, rpc_password = get_rpc_credentials(config)
if rpc_password == '':
# authentication disabled
server_url = 'http://%s:%d' % (host, port)
else:
server_url = 'http://%s:%s@%s:%d' % (
rpc_user, rpc_password, host, port)
server = jsonrpclib.Server(server_url)
# Test daemon is running
server.ping()
return server
except Exception as e:
print_error("[get_server]", e)
if not create_time or create_time < time.time() - 1.0:
return None
# Sleep a bit and try again; it might have just been started
time.sleep(1.0)
def get_rpc_credentials(config):
rpc_user = config.get('rpcuser', None)
rpc_password = config.get('rpcpassword', None)
if rpc_user is None or rpc_password is None:
rpc_user = 'user'
import ecdsa, base64
bits = 128
nbytes = bits // 8 + (bits % 8 > 0)
pw_int = ecdsa.util.randrange(pow(2, bits))
pw_b64 = base64.b64encode(
pw_int.to_bytes(nbytes, 'big'), b'-_')
rpc_password = to_string(pw_b64, 'ascii')
config.set_key('rpcuser', rpc_user)
config.set_key('rpcpassword', rpc_password, save=True)
elif rpc_password == '':
from .util import print_stderr
print_stderr('WARNING: RPC authentication is disabled.')
return rpc_user, rpc_password
class Daemon(DaemonThread):
def __init__(self, config, fd, is_gui):
DaemonThread.__init__(self)
self.config = config
if config.get('offline'):
self.network = None
else:
self.network = Network(config)
self.network.start()
self.fx = FxThread(config, self.network)
if self.network:
self.network.add_jobs([self.fx])
self.gui = None
self.wallets = {}
# Setup JSONRPC server
self.init_server(config, fd, is_gui)
def init_server(self, config, fd, is_gui):
host = config.get('rpchost', '127.0.0.1')
port = config.get('rpcport', 0)
rpc_user, rpc_password = get_rpc_credentials(config)
try:
server = VerifyingJSONRPCServer((host, port), logRequests=False,
rpc_user=rpc_user, rpc_password=rpc_password)
except Exception as e:
self.print_error('Warning: cannot initialize RPC server on host', host, e)
self.server = None
os.close(fd)
return
os.write(fd, bytes(repr((server.socket.getsockname(), time.time())), 'utf8'))
os.close(fd)
self.server = server
server.timeout = 0.1
server.register_function(self.ping, 'ping')
if is_gui:
server.register_function(self.run_gui, 'gui')
else:
server.register_function(self.run_daemon, 'daemon')
self.cmd_runner = Commands(self.config, None, self.network)
for cmdname in known_commands:
server.register_function(getattr(self.cmd_runner, cmdname), cmdname)
server.register_function(self.run_cmdline, 'run_cmdline')
def ping(self):
return True
def run_daemon(self, config_options):
config = SimpleConfig(config_options)
sub = config.get('subcommand')
assert sub in [None, 'start', 'stop', 'status', 'load_wallet', 'close_wallet']
if sub in [None, 'start']:
response = "Daemon already running"
elif sub == 'load_wallet':
path = config.get_wallet_path()
wallet = self.load_wallet(path, config.get('password'))
if wallet is not None:
self.cmd_runner.wallet = wallet
run_hook('load_wallet', wallet, None)
response = wallet is not None
elif sub == 'close_wallet':
path = config.get_wallet_path()
if path in self.wallets:
self.stop_wallet(path)
response = True
else:
response = False
elif sub == 'status':
if self.network:
p = self.network.get_parameters()
current_wallet = self.cmd_runner.wallet
current_wallet_path = current_wallet.storage.path \
if current_wallet else None
response = {
'path': self.network.config.path,
'server': p[0],
'blockchain_height': self.network.get_local_height(),
'server_height': self.network.get_server_height(),
'spv_nodes': len(self.network.get_interfaces()),
'connected': self.network.is_connected(),
'auto_connect': p[4],
'version': ELECTRUM_VERSION,
'wallets': {k: w.is_up_to_date()
for k, w in self.wallets.items()},
'current_wallet': current_wallet_path,
'fee_per_kb': self.config.fee_per_kb(),
}
else:
response = "Daemon offline"
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
return response
def run_gui(self, config_options):
config = SimpleConfig(config_options)
if self.gui:
#if hasattr(self.gui, 'new_window'):
# path = config.get_wallet_path()
# self.gui.new_window(path, config.get('url'))
# response = "ok"
#else:
# response = "error: current GUI does not support multiple windows"
response = "error: Electrum GUI already running"
else:
response = "Error: Electrum is running in daemon mode. Please stop the daemon first."
return response
def load_wallet(self, path, password):
# wizard will be launched if we return
if path in self.wallets:
wallet = self.wallets[path]
return wallet
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
return
if storage.is_encrypted():
if not password:
return
storage.decrypt(password)
if storage.requires_split():
return
if storage.get_action():
return
wallet = Wallet(storage)
wallet.start_threads(self.network)
self.wallets[path] = wallet
return wallet
def add_wallet(self, wallet):
path = wallet.storage.path
self.wallets[path] = wallet
def get_wallet(self, path):
return self.wallets.get(path)
def stop_wallet(self, path):
wallet = self.wallets.pop(path)
wallet.stop_threads()
def run_cmdline(self, config_options):
password = config_options.get('password')
new_password = config_options.get('new_password')
config = SimpleConfig(config_options)
# FIXME this is ugly...
config.fee_estimates = self.network.config.fee_estimates.copy()
config.mempool_fees = self.network.config.mempool_fees.copy()
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
if cmd.requires_wallet:
path = config.get_wallet_path()
wallet = self.wallets.get(path)
if wallet is None:
return {'error': 'Wallet "%s" is not loaded. Use "electrum daemon load_wallet"'%os.path.basename(path) }
else:
wallet = None
# arguments passed to function
args = map(lambda x: config.get(x), cmd.params)
# decode json arguments
args = [json_decode(i) for i in args]
# options
kwargs = {}
for x in cmd.options:
kwargs[x] = (config_options.get(x) if x in ['password', 'new_password'] else config.get(x))
cmd_runner = Commands(config, wallet, self.network)
func = getattr(cmd_runner, cmd.name)
result = func(*args, **kwargs)
return result
def run(self):
while self.is_running():
self.server.handle_request() if self.server else time.sleep(0.1)
for k, wallet in self.wallets.items():
wallet.stop_threads()
if self.network:
self.print_error("shutting down network")
self.network.stop()
self.network.join()
self.on_stop()
def stop(self):
self.print_error("stopping, removing lockfile")
remove_lockfile(get_lockfile(self.config))
DaemonThread.stop(self)
def init_gui(self, config, plugins):
gui_name = config.get('gui', 'qt')
if gui_name in ['lite', 'classic']:
gui_name = 'qt'
gui = __import__('electrum_vtc_gui.' + gui_name, fromlist=['electrum_vtc_gui'])
self.gui = gui.ElectrumGui(config, self, plugins)
try:
self.gui.main()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
# app will exit now
| 37.750789 | 120 | 0.604496 |
import ast
import os
import time
import traceback
import sys
import jsonrpclib
from .jsonrpc import VerifyingJSONRPCServer
from .version import ELECTRUM_VERSION
from .network import Network
from .util import json_decode, DaemonThread
from .util import print_error, to_string
from .wallet import Wallet
from .storage import WalletStorage
from .commands import known_commands, Commands
from .simple_config import SimpleConfig
from .exchange_rate import FxThread
from .plugins import run_hook
def get_lockfile(config):
return os.path.join(config.path, 'daemon')
def remove_lockfile(lockfile):
os.unlink(lockfile)
def get_fd_or_server(config):
lockfile = get_lockfile(config)
while True:
try:
return os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644), None
except OSError:
pass
server = get_server(config)
if server is not None:
return None, server
remove_lockfile(lockfile)
def get_server(config):
lockfile = get_lockfile(config)
while True:
create_time = None
try:
with open(lockfile) as f:
(host, port), create_time = ast.literal_eval(f.read())
rpc_user, rpc_password = get_rpc_credentials(config)
if rpc_password == '':
# authentication disabled
server_url = 'http://%s:%d' % (host, port)
else:
server_url = 'http://%s:%s@%s:%d' % (
rpc_user, rpc_password, host, port)
server = jsonrpclib.Server(server_url)
# Test daemon is running
server.ping()
return server
except Exception as e:
print_error("[get_server]", e)
if not create_time or create_time < time.time() - 1.0:
return None
# Sleep a bit and try again; it might have just been started
time.sleep(1.0)
def get_rpc_credentials(config):
rpc_user = config.get('rpcuser', None)
rpc_password = config.get('rpcpassword', None)
if rpc_user is None or rpc_password is None:
rpc_user = 'user'
import ecdsa, base64
bits = 128
nbytes = bits // 8 + (bits % 8 > 0)
pw_int = ecdsa.util.randrange(pow(2, bits))
pw_b64 = base64.b64encode(
pw_int.to_bytes(nbytes, 'big'), b'-_')
rpc_password = to_string(pw_b64, 'ascii')
config.set_key('rpcuser', rpc_user)
config.set_key('rpcpassword', rpc_password, save=True)
elif rpc_password == '':
from .util import print_stderr
print_stderr('WARNING: RPC authentication is disabled.')
return rpc_user, rpc_password
class Daemon(DaemonThread):
def __init__(self, config, fd, is_gui):
DaemonThread.__init__(self)
self.config = config
if config.get('offline'):
self.network = None
else:
self.network = Network(config)
self.network.start()
self.fx = FxThread(config, self.network)
if self.network:
self.network.add_jobs([self.fx])
self.gui = None
self.wallets = {}
# Setup JSONRPC server
self.init_server(config, fd, is_gui)
def init_server(self, config, fd, is_gui):
host = config.get('rpchost', '127.0.0.1')
port = config.get('rpcport', 0)
rpc_user, rpc_password = get_rpc_credentials(config)
try:
server = VerifyingJSONRPCServer((host, port), logRequests=False,
rpc_user=rpc_user, rpc_password=rpc_password)
except Exception as e:
self.print_error('Warning: cannot initialize RPC server on host', host, e)
self.server = None
os.close(fd)
return
os.write(fd, bytes(repr((server.socket.getsockname(), time.time())), 'utf8'))
os.close(fd)
self.server = server
server.timeout = 0.1
server.register_function(self.ping, 'ping')
if is_gui:
server.register_function(self.run_gui, 'gui')
else:
server.register_function(self.run_daemon, 'daemon')
self.cmd_runner = Commands(self.config, None, self.network)
for cmdname in known_commands:
server.register_function(getattr(self.cmd_runner, cmdname), cmdname)
server.register_function(self.run_cmdline, 'run_cmdline')
def ping(self):
return True
def run_daemon(self, config_options):
config = SimpleConfig(config_options)
sub = config.get('subcommand')
assert sub in [None, 'start', 'stop', 'status', 'load_wallet', 'close_wallet']
if sub in [None, 'start']:
response = "Daemon already running"
elif sub == 'load_wallet':
path = config.get_wallet_path()
wallet = self.load_wallet(path, config.get('password'))
if wallet is not None:
self.cmd_runner.wallet = wallet
run_hook('load_wallet', wallet, None)
response = wallet is not None
elif sub == 'close_wallet':
path = config.get_wallet_path()
if path in self.wallets:
self.stop_wallet(path)
response = True
else:
response = False
elif sub == 'status':
if self.network:
p = self.network.get_parameters()
current_wallet = self.cmd_runner.wallet
current_wallet_path = current_wallet.storage.path \
if current_wallet else None
response = {
'path': self.network.config.path,
'server': p[0],
'blockchain_height': self.network.get_local_height(),
'server_height': self.network.get_server_height(),
'spv_nodes': len(self.network.get_interfaces()),
'connected': self.network.is_connected(),
'auto_connect': p[4],
'version': ELECTRUM_VERSION,
'wallets': {k: w.is_up_to_date()
for k, w in self.wallets.items()},
'current_wallet': current_wallet_path,
'fee_per_kb': self.config.fee_per_kb(),
}
else:
response = "Daemon offline"
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
return response
def run_gui(self, config_options):
config = SimpleConfig(config_options)
if self.gui:
#if hasattr(self.gui, 'new_window'):
# path = config.get_wallet_path()
# self.gui.new_window(path, config.get('url'))
# response = "ok"
#else:
# response = "error: current GUI does not support multiple windows"
response = "error: Electrum GUI already running"
else:
response = "Error: Electrum is running in daemon mode. Please stop the daemon first."
return response
def load_wallet(self, path, password):
# wizard will be launched if we return
if path in self.wallets:
wallet = self.wallets[path]
return wallet
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
return
if storage.is_encrypted():
if not password:
return
storage.decrypt(password)
if storage.requires_split():
return
if storage.get_action():
return
wallet = Wallet(storage)
wallet.start_threads(self.network)
self.wallets[path] = wallet
return wallet
def add_wallet(self, wallet):
path = wallet.storage.path
self.wallets[path] = wallet
def get_wallet(self, path):
return self.wallets.get(path)
def stop_wallet(self, path):
wallet = self.wallets.pop(path)
wallet.stop_threads()
def run_cmdline(self, config_options):
password = config_options.get('password')
new_password = config_options.get('new_password')
config = SimpleConfig(config_options)
# FIXME this is ugly...
config.fee_estimates = self.network.config.fee_estimates.copy()
config.mempool_fees = self.network.config.mempool_fees.copy()
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
if cmd.requires_wallet:
path = config.get_wallet_path()
wallet = self.wallets.get(path)
if wallet is None:
return {'error': 'Wallet "%s" is not loaded. Use "electrum daemon load_wallet"'%os.path.basename(path) }
else:
wallet = None
# arguments passed to function
args = map(lambda x: config.get(x), cmd.params)
# decode json arguments
args = [json_decode(i) for i in args]
# options
kwargs = {}
for x in cmd.options:
kwargs[x] = (config_options.get(x) if x in ['password', 'new_password'] else config.get(x))
cmd_runner = Commands(config, wallet, self.network)
func = getattr(cmd_runner, cmd.name)
result = func(*args, **kwargs)
return result
def run(self):
while self.is_running():
self.server.handle_request() if self.server else time.sleep(0.1)
for k, wallet in self.wallets.items():
wallet.stop_threads()
if self.network:
self.print_error("shutting down network")
self.network.stop()
self.network.join()
self.on_stop()
def stop(self):
self.print_error("stopping, removing lockfile")
remove_lockfile(get_lockfile(self.config))
DaemonThread.stop(self)
def init_gui(self, config, plugins):
gui_name = config.get('gui', 'qt')
if gui_name in ['lite', 'classic']:
gui_name = 'qt'
gui = __import__('electrum_vtc_gui.' + gui_name, fromlist=['electrum_vtc_gui'])
self.gui = gui.ElectrumGui(config, self, plugins)
try:
self.gui.main()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
# app will exit now
| true | true |
f7f50ba5cb6eef728d25241e98cb94b70973b622 | 2,562 | py | Python | env/common/files/bin/systemd_status.py | kjetilkl/infrastructure-playbook | 4347aa6e6a251a18228adae2c0db3744f78f4b45 | [
"MIT"
] | null | null | null | env/common/files/bin/systemd_status.py | kjetilkl/infrastructure-playbook | 4347aa6e6a251a18228adae2c0db3744f78f4b45 | [
"MIT"
] | null | null | null | env/common/files/bin/systemd_status.py | kjetilkl/infrastructure-playbook | 4347aa6e6a251a18228adae2c0db3744f78f4b45 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Kim Brugger (25 Sep 2020), contact: kim@brugger.dk
"""
import socket
import subprocess
import argparse
import shlex
import json
import datetime
states = {'active': 1,
'inactive': 2,
'activating': 3,
'deactivating': 4,
'failed': 5,
'not-found': 6,
'dead': 7,}
def get_host_name() -> str:
return socket.getfqdn()
def get_state(service_name) -> int:
cmd = f"systemctl show --no-page {service_name}"
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOQA
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
res = {'name': service_name}
if stderr:
res['status':'status-error']
return res
for line in str(stdout).split("\n"):
if "=" not in line:
continue
key, value = line.split("=",1)
if key == 'ActiveState' and 'status' not in res:
res['status'] = value
if key == 'LoadState' and value == 'not-found':
res['status'] = "not-found"
if key == 'ExecMainStartTimestamp':
ts = datetime.datetime.strptime(value, "%a %Y-%m-%d %H:%M:%S %Z")
now = datetime.datetime.now()
res['uptime'] = (now - ts).total_seconds()
res[ "status_code" ] = states[ res['status']]
return res
def main():
parser = argparse.ArgumentParser(description='systemd service status reporter')
parser.add_argument('-t', '--telegraf', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('-j', '--json', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('services', nargs='+', help="service(s) to check")
args = parser.parse_args()
statuses = []
for service in args.services:
status = get_state(service)
if args.telegraf:
line = f"service,host={get_host_name()},service={service} status_code={status['status_code']}"
if 'uptime' in status:
line += f",uptime={status['uptime']}"
print( line )
elif args.json:
statuses.append( status )
else:
line = f"{service:20s} {status['status_code']}/{status['status']:15}"
if 'uptime' in status:
line += f" {status['uptime']}s"
print( line )
if args.json:
print(json.dumps( statuses))
if __name__ == "__main__":
main()
| 26.412371 | 117 | 0.575722 |
import socket
import subprocess
import argparse
import shlex
import json
import datetime
states = {'active': 1,
'inactive': 2,
'activating': 3,
'deactivating': 4,
'failed': 5,
'not-found': 6,
'dead': 7,}
def get_host_name() -> str:
return socket.getfqdn()
def get_state(service_name) -> int:
cmd = f"systemctl show --no-page {service_name}"
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
res = {'name': service_name}
if stderr:
res['status':'status-error']
return res
for line in str(stdout).split("\n"):
if "=" not in line:
continue
key, value = line.split("=",1)
if key == 'ActiveState' and 'status' not in res:
res['status'] = value
if key == 'LoadState' and value == 'not-found':
res['status'] = "not-found"
if key == 'ExecMainStartTimestamp':
ts = datetime.datetime.strptime(value, "%a %Y-%m-%d %H:%M:%S %Z")
now = datetime.datetime.now()
res['uptime'] = (now - ts).total_seconds()
res[ "status_code" ] = states[ res['status']]
return res
def main():
parser = argparse.ArgumentParser(description='systemd service status reporter')
parser.add_argument('-t', '--telegraf', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('-j', '--json', default=False, action="store_true", help="telegraf compatible format")
parser.add_argument('services', nargs='+', help="service(s) to check")
args = parser.parse_args()
statuses = []
for service in args.services:
status = get_state(service)
if args.telegraf:
line = f"service,host={get_host_name()},service={service} status_code={status['status_code']}"
if 'uptime' in status:
line += f",uptime={status['uptime']}"
print( line )
elif args.json:
statuses.append( status )
else:
line = f"{service:20s} {status['status_code']}/{status['status']:15}"
if 'uptime' in status:
line += f" {status['uptime']}s"
print( line )
if args.json:
print(json.dumps( statuses))
if __name__ == "__main__":
main()
| true | true |
f7f50bc400c9db3745673ea7bfddecbc34de52ad | 5,153 | py | Python | varlink/tests/test_scanner.py | varlink/python | ea5de247d8d86767095d50a13d0ce3dbc80c877f | [
"Apache-2.0"
] | 30 | 2018-04-12T16:05:47.000Z | 2021-09-02T15:03:24.000Z | varlink/tests/test_scanner.py | varlink/python-varlink | ea5de247d8d86767095d50a13d0ce3dbc80c877f | [
"Apache-2.0"
] | 23 | 2018-05-10T09:29:09.000Z | 2022-01-12T09:24:16.000Z | varlink/tests/test_scanner.py | varlink/python | ea5de247d8d86767095d50a13d0ce3dbc80c877f | [
"Apache-2.0"
] | 8 | 2018-05-14T08:38:24.000Z | 2021-12-06T11:46:53.000Z | from __future__ import print_function
from __future__ import unicode_literals
import unittest
import varlink
class TestScanner(unittest.TestCase):
def test_scanner_1(self):
interface = varlink.Interface("""# Example Varlink service
interface org.example.more
# Enum, returning either start, progress or end
# progress: [0-100]
type State (
start: ?bool,
progress: ?int,
end: ?bool
)
method TestMap(map: [string]string) -> (map: [string](i: int, val: string))
# Returns the same string
method Ping(ping: string) -> (pong: string)
# Dummy progress method
# n: number of progress steps
method TestMore(n: int) -> (state: State)
# Stop serving
method StopServing() -> ()
type ErrorChain (
description: string,
caused_by: ?ErrorChain
)
error ActionFailed (reason: ?ErrorChain)
""")
self.assertEqual(interface.name, "org.example.more")
self.assertIsNotNone(interface.get_method("Ping"))
self.assertIsNotNone(interface.get_method("TestMore"))
self.assertIsNotNone(interface.get_method("TestMap"))
self.assertIsNotNone(interface.get_method("StopServing"))
self.assertIsInstance(interface.members.get("ActionFailed"), varlink.scanner._Error)
self.assertIsInstance(interface.members.get("State"), varlink.scanner._Alias)
def test_doubleoption(self):
interface = None
try:
interface = varlink.Interface("""
interface org.example.doubleoption
method Foo(a: ??string) -> ()
""")
except SyntaxError:
pass
self.assertIsNone(interface)
def test_complex(self):
interface = varlink.Interface("""
interface org.example.complex
type TypeEnum ( a, b, c )
type TypeFoo (
bool: bool,
int: int,
float: float,
string: ?string,
enum: ?[]( foo, bar, baz ),
type: ?TypeEnum,
anon: ( foo: bool, bar: int, baz: [](a: int, b: int) ),
object: object
)
method Foo(a: (b: bool, c: int), foo: TypeFoo) -> (a: [](b: bool, c: int), foo: TypeFoo)
error ErrorFoo (a: (b: bool, c: int), foo: TypeFoo)
""")
self.assertEqual(interface.name, "org.example.complex")
self.assertIsNotNone(interface.get_method("Foo"))
self.assertIsInstance(interface.members.get("ErrorFoo"), varlink.scanner._Error)
self.assertIsInstance(interface.members.get("TypeEnum"), varlink.scanner._Alias)
def test_interfacename(self):
self.assertRaises(SyntaxError, varlink.Interface, "interface .a.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface com.-example.leadinghyphen\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface com.example-.danglinghyphen-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface co9.example.number-toplevel\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface 1om.example.number-toplevel\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface ab\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface .a.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.b.c.\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a..b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface 1.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface 8a.0.0\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface -a.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.b.c-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.b-.c-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.-b.c-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.-.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.*.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.?\nmethod F()->()")
self.assertIsNotNone(varlink.Interface("interface a.b\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface a.b.c\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface a.1\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface a.0.0\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface org.varlink.service\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface com.example.0example\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface com.example.example-dash\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface xn--lgbbat1ad8j.example.algeria\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface xn--c1yn36f.xn--c1yn36f.xn--c1yn36f\nmethod F()->()").name)
| 44.808696 | 117 | 0.671842 | from __future__ import print_function
from __future__ import unicode_literals
import unittest
import varlink
class TestScanner(unittest.TestCase):
def test_scanner_1(self):
interface = varlink.Interface("""# Example Varlink service
interface org.example.more
# Enum, returning either start, progress or end
# progress: [0-100]
type State (
start: ?bool,
progress: ?int,
end: ?bool
)
method TestMap(map: [string]string) -> (map: [string](i: int, val: string))
# Returns the same string
method Ping(ping: string) -> (pong: string)
# Dummy progress method
# n: number of progress steps
method TestMore(n: int) -> (state: State)
# Stop serving
method StopServing() -> ()
type ErrorChain (
description: string,
caused_by: ?ErrorChain
)
error ActionFailed (reason: ?ErrorChain)
""")
self.assertEqual(interface.name, "org.example.more")
self.assertIsNotNone(interface.get_method("Ping"))
self.assertIsNotNone(interface.get_method("TestMore"))
self.assertIsNotNone(interface.get_method("TestMap"))
self.assertIsNotNone(interface.get_method("StopServing"))
self.assertIsInstance(interface.members.get("ActionFailed"), varlink.scanner._Error)
self.assertIsInstance(interface.members.get("State"), varlink.scanner._Alias)
def test_doubleoption(self):
interface = None
try:
interface = varlink.Interface("""
interface org.example.doubleoption
method Foo(a: ??string) -> ()
""")
except SyntaxError:
pass
self.assertIsNone(interface)
def test_complex(self):
interface = varlink.Interface("""
interface org.example.complex
type TypeEnum ( a, b, c )
type TypeFoo (
bool: bool,
int: int,
float: float,
string: ?string,
enum: ?[]( foo, bar, baz ),
type: ?TypeEnum,
anon: ( foo: bool, bar: int, baz: [](a: int, b: int) ),
object: object
)
method Foo(a: (b: bool, c: int), foo: TypeFoo) -> (a: [](b: bool, c: int), foo: TypeFoo)
error ErrorFoo (a: (b: bool, c: int), foo: TypeFoo)
""")
self.assertEqual(interface.name, "org.example.complex")
self.assertIsNotNone(interface.get_method("Foo"))
self.assertIsInstance(interface.members.get("ErrorFoo"), varlink.scanner._Error)
self.assertIsInstance(interface.members.get("TypeEnum"), varlink.scanner._Alias)
def test_interfacename(self):
self.assertRaises(SyntaxError, varlink.Interface, "interface .a.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface com.-example.leadinghyphen\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface com.example-.danglinghyphen-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface co9.example.number-toplevel\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface 1om.example.number-toplevel\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface ab\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface .a.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.b.c.\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a..b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface 1.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface 8a.0.0\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface -a.b.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.b.c-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.b-.c-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.-b.c-\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.-.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.*.c\nmethod F()->()")
self.assertRaises(SyntaxError, varlink.Interface, "interface a.?\nmethod F()->()")
self.assertIsNotNone(varlink.Interface("interface a.b\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface a.b.c\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface a.1\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface a.0.0\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface org.varlink.service\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface com.example.0example\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface com.example.example-dash\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface xn--lgbbat1ad8j.example.algeria\nmethod F()->()").name)
self.assertIsNotNone(varlink.Interface("interface xn--c1yn36f.xn--c1yn36f.xn--c1yn36f\nmethod F()->()").name)
| true | true |
f7f50cdaae738f60bcdafb10c0ba9026db9bf9ef | 10,706 | py | Python | src/money/money.py | cool-RR/money | 7209d4c3bd3ee0d0a8ea5e8a900c51fa81f0526e | [
"MIT"
] | 1 | 2019-09-04T06:04:33.000Z | 2019-09-04T06:04:33.000Z | src/money/money.py | cool-RR/money | 7209d4c3bd3ee0d0a8ea5e8a900c51fa81f0526e | [
"MIT"
] | null | null | null | src/money/money.py | cool-RR/money | 7209d4c3bd3ee0d0a8ea5e8a900c51fa81f0526e | [
"MIT"
] | null | null | null | """
Money classes
"""
import decimal
import re
from .exchange import xrates
from .exceptions import CurrencyMismatch, ExchangeRateNotFound
__all__ = ['Money', 'XMoney']
BABEL_AVAILABLE = False
REGEX_CURRENCY_CODE = re.compile("^[A-Z]{3}$")
try:
import babel
import babel.numbers
BABEL_AVAILABLE = True
except ImportError:
pass
class Money(object):
"""Money class with a decimal amount and a currency"""
__hash__ = None
def __init__(self, amount="0", currency=None):
try:
self.amount = decimal.Decimal(amount)
except decimal.InvalidOperation:
raise ValueError("amount value could not be converted to "
"Decimal(): '{}'".format(amount)) from None
if currency in [None, False, '']:
raise ValueError("invalid currency value: '{}'".format(currency))
if not REGEX_CURRENCY_CODE.match(currency):
raise ValueError("currency not in ISO 4217 format: "
"'{}'".format(currency))
self.currency = currency
def __repr__(self):
return "{} {}".format(self.currency, self.amount)
def __str__(self):
return "{} {:,.2f}".format(self.currency, self.amount)
def __lt__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '<')
other = other.amount
return self.amount < other
def __le__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '<=')
other = other.amount
return self.amount <= other
def __eq__(self, other):
if isinstance(other, Money):
return ((self.amount == other.amount) and
(self.currency == other.currency))
return False
def __ne__(self, other):
return not self == other
def __gt__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '>')
other = other.amount
return self.amount > other
def __ge__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '>=')
other = other.amount
return self.amount >= other
def __bool__(self):
"""
Considering Money a numeric type (on ``amount``):
bool(Money(2, 'XXX')) --> True
bool(Money(0, 'XXX')) --> False
"""
return bool(self.amount)
def __add__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '+')
other = other.amount
amount = self.amount + other
return self.__class__(amount, self.currency)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '-')
other = other.amount
amount = self.amount - other
return self.__class__(amount, self.currency)
def __rsub__(self, other):
return (-self).__add__(other)
def __mul__(self, other):
if isinstance(other, Money):
raise TypeError("multiplication is unsupported between "
"two money objects")
amount = self.amount * other
return self.__class__(amount, self.currency)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '/')
elif other.amount == 0:
raise ZeroDivisionError()
return self.amount / other.amount
else:
if other == 0:
raise ZeroDivisionError()
amount = self.amount / other
return self.__class__(amount, self.currency)
def __floordiv__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '//')
elif other.amount == 0:
raise ZeroDivisionError()
return self.amount // other.amount
else:
if other == 0:
raise ZeroDivisionError()
amount = self.amount // other
return self.__class__(amount, self.currency)
def __mod__(self, other):
if isinstance(other, Money):
raise TypeError("modulo is unsupported between two '{}' "
"objects".format(self.__class__.__name__))
if other == 0:
raise ZeroDivisionError()
amount = self.amount % other
return self.__class__(amount, self.currency)
def __divmod__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, 'divmod')
elif other.amount == 0:
raise ZeroDivisionError()
return divmod(self.amount, other.amount)
else:
if other == 0:
raise ZeroDivisionError()
whole, remainder = divmod(self.amount, other)
return (self.__class__(whole, self.currency),
self.__class__(remainder, self.currency))
def __pow__(self, other):
if isinstance(other, Money):
raise TypeError("power operator is unsupported between two '{}' "
"objects".format(self.__class__.__name__))
amount = self.amount ** other
return self.__class__(amount, self.currency)
def __neg__(self):
return self.__class__(-self.amount, self.currency)
def __pos__(self):
return self.__class__(+self.amount, self.currency)
def __abs__(self):
return self.__class__(abs(self.amount), self.currency)
def __int__(self):
return int(self.amount)
def __float__(self):
return float(self.amount)
def __round__(self, ndigits=0):
return self.__class__(round(self.amount, ndigits), self.currency)
def to(self, currency):
"""Return equivalent money object in another currency"""
if currency == self.currency:
return self
rate = xrates.quotation(self.currency, currency)
if rate is None:
raise ExchangeRateNotFound(xrates.backend_name,
self.currency, currency)
amount = self.amount * rate
return self.__class__(amount, currency)
def format(self, locale=None, pattern=None):
"""
Return a locale-aware, currency-formatted string.
This method emulates babel.numbers.format_currency().
A specific locale identifier (language[_territory]) can be passed,
otherwise the system's default locale will be used. A custom
formatting pattern of the form "¤#,##0.00;(¤#,##0.00)"
(positive[;negative]) can also be passed, otherwise it will be
determined from the locale and the CLDR (Unicode Common Locale Data
Repository) included with Babel.
>>> m = Money('1234.567', 'EUR')
>>> m.format() # assuming the system's locale is 'en_US'
€1,234.57
>>> m.format('de_DE') # German formatting
1.234,57 €
>>> m.format('de', '#,##0 ¤') # German formatting (short), no cents
1.235 €
>>> m.format(pattern='#,##0.00 ¤¤¤') # Default locale, full name
1,235.57 euro
Learn more about this formatting syntaxis at:
http://www.unicode.org/reports/tr35/tr35-numbers.html
"""
if BABEL_AVAILABLE:
if not locale:
locale = babel.default_locale('LC_NUMERIC')
locale = babel.Locale.parse(locale)
if not pattern:
pattern = locale.currency_formats.get(pattern)
pattern = babel.numbers.parse_pattern(pattern)
return pattern.apply(self.amount, locale, currency=self.currency)
else:
raise NotImplementedError("formatting requires Babel "
"(https://pypi.python.org/pypi/Babel)")
@classmethod
def loads(cls, s):
"""Parse from a string representation (repr)"""
try:
currency, amount = s.strip().split(' ')
return cls(amount, currency)
except ValueError as err:
raise ValueError("failed to parse string '{}': "
"{}".format(s, err)) from None
class XMoney(Money):
"""Money subclass with implicit currency conversion"""
def __lt__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__lt__(other)
def __le__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__le__(other)
def __gt__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__gt__(other)
def __ge__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__ge__(other)
def __add__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__add__(other)
def __sub__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__sub__(other)
def __truediv__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__truediv__(other)
def __floordiv__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__floordiv__(other)
def __divmod__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__divmod__(other)
| 34.314103 | 79 | 0.573977 | import decimal
import re
from .exchange import xrates
from .exceptions import CurrencyMismatch, ExchangeRateNotFound
__all__ = ['Money', 'XMoney']
BABEL_AVAILABLE = False
REGEX_CURRENCY_CODE = re.compile("^[A-Z]{3}$")
try:
import babel
import babel.numbers
BABEL_AVAILABLE = True
except ImportError:
pass
class Money(object):
__hash__ = None
def __init__(self, amount="0", currency=None):
try:
self.amount = decimal.Decimal(amount)
except decimal.InvalidOperation:
raise ValueError("amount value could not be converted to "
"Decimal(): '{}'".format(amount)) from None
if currency in [None, False, '']:
raise ValueError("invalid currency value: '{}'".format(currency))
if not REGEX_CURRENCY_CODE.match(currency):
raise ValueError("currency not in ISO 4217 format: "
"'{}'".format(currency))
self.currency = currency
def __repr__(self):
return "{} {}".format(self.currency, self.amount)
def __str__(self):
return "{} {:,.2f}".format(self.currency, self.amount)
def __lt__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '<')
other = other.amount
return self.amount < other
def __le__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '<=')
other = other.amount
return self.amount <= other
def __eq__(self, other):
if isinstance(other, Money):
return ((self.amount == other.amount) and
(self.currency == other.currency))
return False
def __ne__(self, other):
return not self == other
def __gt__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '>')
other = other.amount
return self.amount > other
def __ge__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '>=')
other = other.amount
return self.amount >= other
def __bool__(self):
return bool(self.amount)
def __add__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '+')
other = other.amount
amount = self.amount + other
return self.__class__(amount, self.currency)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '-')
other = other.amount
amount = self.amount - other
return self.__class__(amount, self.currency)
def __rsub__(self, other):
return (-self).__add__(other)
def __mul__(self, other):
if isinstance(other, Money):
raise TypeError("multiplication is unsupported between "
"two money objects")
amount = self.amount * other
return self.__class__(amount, self.currency)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '/')
elif other.amount == 0:
raise ZeroDivisionError()
return self.amount / other.amount
else:
if other == 0:
raise ZeroDivisionError()
amount = self.amount / other
return self.__class__(amount, self.currency)
def __floordiv__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, '//')
elif other.amount == 0:
raise ZeroDivisionError()
return self.amount // other.amount
else:
if other == 0:
raise ZeroDivisionError()
amount = self.amount // other
return self.__class__(amount, self.currency)
def __mod__(self, other):
if isinstance(other, Money):
raise TypeError("modulo is unsupported between two '{}' "
"objects".format(self.__class__.__name__))
if other == 0:
raise ZeroDivisionError()
amount = self.amount % other
return self.__class__(amount, self.currency)
def __divmod__(self, other):
if isinstance(other, Money):
if other.currency != self.currency:
raise CurrencyMismatch(self.currency, other.currency, 'divmod')
elif other.amount == 0:
raise ZeroDivisionError()
return divmod(self.amount, other.amount)
else:
if other == 0:
raise ZeroDivisionError()
whole, remainder = divmod(self.amount, other)
return (self.__class__(whole, self.currency),
self.__class__(remainder, self.currency))
def __pow__(self, other):
if isinstance(other, Money):
raise TypeError("power operator is unsupported between two '{}' "
"objects".format(self.__class__.__name__))
amount = self.amount ** other
return self.__class__(amount, self.currency)
def __neg__(self):
return self.__class__(-self.amount, self.currency)
def __pos__(self):
return self.__class__(+self.amount, self.currency)
def __abs__(self):
return self.__class__(abs(self.amount), self.currency)
def __int__(self):
return int(self.amount)
def __float__(self):
return float(self.amount)
def __round__(self, ndigits=0):
return self.__class__(round(self.amount, ndigits), self.currency)
def to(self, currency):
if currency == self.currency:
return self
rate = xrates.quotation(self.currency, currency)
if rate is None:
raise ExchangeRateNotFound(xrates.backend_name,
self.currency, currency)
amount = self.amount * rate
return self.__class__(amount, currency)
def format(self, locale=None, pattern=None):
if BABEL_AVAILABLE:
if not locale:
locale = babel.default_locale('LC_NUMERIC')
locale = babel.Locale.parse(locale)
if not pattern:
pattern = locale.currency_formats.get(pattern)
pattern = babel.numbers.parse_pattern(pattern)
return pattern.apply(self.amount, locale, currency=self.currency)
else:
raise NotImplementedError("formatting requires Babel "
"(https://pypi.python.org/pypi/Babel)")
@classmethod
def loads(cls, s):
try:
currency, amount = s.strip().split(' ')
return cls(amount, currency)
except ValueError as err:
raise ValueError("failed to parse string '{}': "
"{}".format(s, err)) from None
class XMoney(Money):
def __lt__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__lt__(other)
def __le__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__le__(other)
def __gt__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__gt__(other)
def __ge__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__ge__(other)
def __add__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__add__(other)
def __sub__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__sub__(other)
def __truediv__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__truediv__(other)
def __floordiv__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__floordiv__(other)
def __divmod__(self, other):
if isinstance(other, Money):
other = other.to(self.currency)
return super().__divmod__(other)
| true | true |
f7f50d3859dc37348d989f476844d4fc5754a40a | 7,891 | py | Python | dockupdater/lib/config.py | francois4224/dockupdater | 367057140cfeb19d653df2e70da24e4c51e45d38 | [
"MIT"
] | 62 | 2019-03-08T15:08:01.000Z | 2022-03-24T09:13:56.000Z | dockupdater/lib/config.py | francois4224/dockupdater | 367057140cfeb19d653df2e70da24e4c51e45d38 | [
"MIT"
] | 25 | 2019-03-09T03:19:32.000Z | 2022-03-02T16:39:04.000Z | dockupdater/lib/config.py | francois4224/dockupdater | 367057140cfeb19d653df2e70da24e4c51e45d38 | [
"MIT"
] | 12 | 2019-03-08T15:10:55.000Z | 2021-12-27T15:09:07.000Z | import re
from copy import deepcopy
from logging import getLogger
from os import environ
from pathlib import Path
from ..helpers.helpers import convert_to_boolean
from .logger import BlacklistFilter
OPTION_REGEX_PATTERN = r"^(?:weight:(?P<weight>\d+),)?(?P<regex>.*)$"
DEFAULT_REGEX_WEIGHT = 100
MINIMUM_INTERVAL = 30
ENABLE_LABEL = "dockupdater.enable"
DISABLE_LABEL = "dockupdater.disable"
LABELS_MAPPING = {
"dockupdater.latest": "latest",
"dockupdater.notifiers": "notifiers",
"dockupdater.stop_signal": "stop_signal",
"dockupdater.cleanup": "cleanup",
"dockupdater.template_file": "template_file",
"dockupdater.wait": "wait",
"dockupdater.recreate_first": "recreate_first",
"dockupdater.starts": "starts",
"dockupdater.stops": "stops",
}
class DefaultConfig(object):
"""Default configuration"""
hostname = environ.get('HOSTNAME')
interval = 300
cron = None
docker_sockets = ['unix://var/run/docker.sock']
docker_tls = False
docker_tls_verify = True
log_level = 'info'
cleanup = False
run_once = False
label = False
stop_signal = None
disable_containers_check = False
disable_services_check = False
latest = False
wait = 0
recreate_first = False
stops = []
starts = []
repo_user = None
repo_pass = None
notifiers = []
skip_start_notif = False
template_file = None
class OptionRegex(object):
def __init__(self, pattern):
"""pattern may be in these format:
- weight:Digit,regex_pattern
- regex_pattern
"""
match = re.fullmatch(OPTION_REGEX_PATTERN, pattern, re.IGNORECASE).groupdict()
try:
re.compile(match.get("regex")) # Test the regex
self.regex = match.get("regex")
except re.error:
raise AttributeError("Invalid regex {} for option start or stop.".format(match.get("regex")))
self.weight = int(match.get("weight") or DEFAULT_REGEX_WEIGHT)
self.tokens = None
def match(self, name):
regex = self.regex
if self.tokens:
for token, value in self.tokens.items():
if token and value:
regex = regex.replace("{" + str(token) + "}", value)
return bool(re.fullmatch(regex, name))
def __repr__(self):
return f"<Option {self.regex}[{self.weight}]>"
class Config(object):
def __init__(self, **kwargs):
super().__setattr__("options", kwargs)
self.logger = getLogger()
self.compute_args()
self.filtered_strings = None
@classmethod
def from_labels(cls, config, labels):
"""Create a new config object from an existing config and a dict of docker labels"""
options = deepcopy(config.options)
if labels:
for label, value in labels.items():
if label in LABELS_MAPPING:
if label in ["dockupdater.notifiers"]:
options[LABELS_MAPPING[label]] = value.split(" ")
elif label in ["dockupdater.latest", "dockupdater.cleanup"]:
options[LABELS_MAPPING[label]] = convert_to_boolean(value)
elif label in ["dockupdater.wait"]:
options[LABELS_MAPPING[label]] = int(value)
elif label in ["dockupdater.stops", "dockupdater.starts"]:
options[LABELS_MAPPING[label]] = [OptionRegex(item) for item in value.split(" ")]
else:
options[LABELS_MAPPING[label]] = value
if label == "dockupdater.template_file":
# Reload template
options["template"] = Config.load_template(options.get('template_file'))
elif label.startswith("dockupdater."):
config.logger.warning("Warning label %s doesn't exist", label)
return cls(**options)
def __setattr__(self, key, value):
if key in self.options:
self.options[key] = value
else:
super().__setattr__(key, value)
def __getattr__(self, attr):
try:
return self.options[attr]
except KeyError:
raise AttributeError
def config_blacklist(self):
"""Mask sensitive data from logs"""
filtered_strings = [
getattr(self, key.lower()) for key, value in self.options.items()
if key.lower() in BlacklistFilter.blacklisted_keys
]
# Clear None values
self.filtered_strings = list(filter(None, filtered_strings))
# take lists inside of list and append to list
for index, value in enumerate(self.filtered_strings, 0):
if isinstance(value, list) or isinstance(value, tuple):
self.filtered_strings.extend(self.filtered_strings.pop(index))
self.filtered_strings.insert(index, self.filtered_strings[-1:][0])
# Filter out no string item
self.filtered_strings = [item for item in self.filtered_strings if isinstance(item, str)]
# Added matching for ports
ports = [string.split(':')[0] for string in self.filtered_strings if ':' in string]
self.filtered_strings.extend(ports)
# Added matching for tcp sockets. ConnectionPool ignores the tcp://
tcp_sockets = [string.split('//')[1] for string in self.filtered_strings if '//' in string]
self.filtered_strings.extend(tcp_sockets)
# Get JUST hostname from tcp//unix
for socket in getattr(self, 'docker_sockets'):
self.filtered_strings.append(socket.split('//')[1].split(':')[0])
for handler in self.logger.handlers:
handler.addFilter(BlacklistFilter(set(self.filtered_strings)))
def compute_args(self):
if self.repo_user and self.repo_pass:
self.options['auth_json'] = {'Username': self.repo_user, 'Password': self.repo_pass}
else:
self.options['auth_json'] = None
if self.disable_containers_check and self.disable_services_check:
raise AttributeError("Error you can't disable all monitoring (containers/services).")
# Config sanity checks
if self.cron:
if not isinstance(self.cron, list):
cron_times = self.cron.strip().split(' ')
if len(cron_times) != 5:
self.logger.critical("Cron must be in cron syntax. e.g. * * * * * (5 places).")
raise AttributeError("Invalid cron")
else:
self.logger.info("Cron configuration is valid. Using Cron schedule %s", cron_times)
self.cron = cron_times
self.interval = None
else:
if self.interval < MINIMUM_INTERVAL:
self.logger.warning('Minimum value for interval was 30 seconds.')
self.interval = MINIMUM_INTERVAL
# Convert parameters to regex object
self.stops = [OptionRegex(stop) if not isinstance(stop, OptionRegex) else stop for stop in self.stops]
self.stops.sort(key=lambda x: x.weight)
self.starts = [OptionRegex(start) if not isinstance(start, OptionRegex) else start for start in self.starts]
self.starts.sort(key=lambda x: x.weight)
self.options['template'] = Config.load_template(self.template_file)
@staticmethod
def load_template(template_file):
# Load default template file
if not template_file:
dir_path = Path().absolute()
template_file = dir_path.joinpath("dockupdater/templates/notification.j2")
if Path(template_file).exists():
with open(template_file) as f:
return f.read()
else:
raise AttributeError(f"Template file {template_file} not found")
| 38.305825 | 116 | 0.61285 | import re
from copy import deepcopy
from logging import getLogger
from os import environ
from pathlib import Path
from ..helpers.helpers import convert_to_boolean
from .logger import BlacklistFilter
OPTION_REGEX_PATTERN = r"^(?:weight:(?P<weight>\d+),)?(?P<regex>.*)$"
DEFAULT_REGEX_WEIGHT = 100
MINIMUM_INTERVAL = 30
ENABLE_LABEL = "dockupdater.enable"
DISABLE_LABEL = "dockupdater.disable"
LABELS_MAPPING = {
"dockupdater.latest": "latest",
"dockupdater.notifiers": "notifiers",
"dockupdater.stop_signal": "stop_signal",
"dockupdater.cleanup": "cleanup",
"dockupdater.template_file": "template_file",
"dockupdater.wait": "wait",
"dockupdater.recreate_first": "recreate_first",
"dockupdater.starts": "starts",
"dockupdater.stops": "stops",
}
class DefaultConfig(object):
hostname = environ.get('HOSTNAME')
interval = 300
cron = None
docker_sockets = ['unix://var/run/docker.sock']
docker_tls = False
docker_tls_verify = True
log_level = 'info'
cleanup = False
run_once = False
label = False
stop_signal = None
disable_containers_check = False
disable_services_check = False
latest = False
wait = 0
recreate_first = False
stops = []
starts = []
repo_user = None
repo_pass = None
notifiers = []
skip_start_notif = False
template_file = None
class OptionRegex(object):
def __init__(self, pattern):
match = re.fullmatch(OPTION_REGEX_PATTERN, pattern, re.IGNORECASE).groupdict()
try:
re.compile(match.get("regex"))
self.regex = match.get("regex")
except re.error:
raise AttributeError("Invalid regex {} for option start or stop.".format(match.get("regex")))
self.weight = int(match.get("weight") or DEFAULT_REGEX_WEIGHT)
self.tokens = None
def match(self, name):
regex = self.regex
if self.tokens:
for token, value in self.tokens.items():
if token and value:
regex = regex.replace("{" + str(token) + "}", value)
return bool(re.fullmatch(regex, name))
def __repr__(self):
return f"<Option {self.regex}[{self.weight}]>"
class Config(object):
def __init__(self, **kwargs):
super().__setattr__("options", kwargs)
self.logger = getLogger()
self.compute_args()
self.filtered_strings = None
@classmethod
def from_labels(cls, config, labels):
options = deepcopy(config.options)
if labels:
for label, value in labels.items():
if label in LABELS_MAPPING:
if label in ["dockupdater.notifiers"]:
options[LABELS_MAPPING[label]] = value.split(" ")
elif label in ["dockupdater.latest", "dockupdater.cleanup"]:
options[LABELS_MAPPING[label]] = convert_to_boolean(value)
elif label in ["dockupdater.wait"]:
options[LABELS_MAPPING[label]] = int(value)
elif label in ["dockupdater.stops", "dockupdater.starts"]:
options[LABELS_MAPPING[label]] = [OptionRegex(item) for item in value.split(" ")]
else:
options[LABELS_MAPPING[label]] = value
if label == "dockupdater.template_file":
options["template"] = Config.load_template(options.get('template_file'))
elif label.startswith("dockupdater."):
config.logger.warning("Warning label %s doesn't exist", label)
return cls(**options)
def __setattr__(self, key, value):
if key in self.options:
self.options[key] = value
else:
super().__setattr__(key, value)
def __getattr__(self, attr):
try:
return self.options[attr]
except KeyError:
raise AttributeError
def config_blacklist(self):
filtered_strings = [
getattr(self, key.lower()) for key, value in self.options.items()
if key.lower() in BlacklistFilter.blacklisted_keys
]
# Clear None values
self.filtered_strings = list(filter(None, filtered_strings))
# take lists inside of list and append to list
for index, value in enumerate(self.filtered_strings, 0):
if isinstance(value, list) or isinstance(value, tuple):
self.filtered_strings.extend(self.filtered_strings.pop(index))
self.filtered_strings.insert(index, self.filtered_strings[-1:][0])
# Filter out no string item
self.filtered_strings = [item for item in self.filtered_strings if isinstance(item, str)]
# Added matching for ports
ports = [string.split(':')[0] for string in self.filtered_strings if ':' in string]
self.filtered_strings.extend(ports)
# Added matching for tcp sockets. ConnectionPool ignores the tcp://
tcp_sockets = [string.split('//')[1] for string in self.filtered_strings if '//' in string]
self.filtered_strings.extend(tcp_sockets)
# Get JUST hostname from tcp//unix
for socket in getattr(self, 'docker_sockets'):
self.filtered_strings.append(socket.split('//')[1].split(':')[0])
for handler in self.logger.handlers:
handler.addFilter(BlacklistFilter(set(self.filtered_strings)))
def compute_args(self):
if self.repo_user and self.repo_pass:
self.options['auth_json'] = {'Username': self.repo_user, 'Password': self.repo_pass}
else:
self.options['auth_json'] = None
if self.disable_containers_check and self.disable_services_check:
raise AttributeError("Error you can't disable all monitoring (containers/services).")
if self.cron:
if not isinstance(self.cron, list):
cron_times = self.cron.strip().split(' ')
if len(cron_times) != 5:
self.logger.critical("Cron must be in cron syntax. e.g. * * * * * (5 places).")
raise AttributeError("Invalid cron")
else:
self.logger.info("Cron configuration is valid. Using Cron schedule %s", cron_times)
self.cron = cron_times
self.interval = None
else:
if self.interval < MINIMUM_INTERVAL:
self.logger.warning('Minimum value for interval was 30 seconds.')
self.interval = MINIMUM_INTERVAL
self.stops = [OptionRegex(stop) if not isinstance(stop, OptionRegex) else stop for stop in self.stops]
self.stops.sort(key=lambda x: x.weight)
self.starts = [OptionRegex(start) if not isinstance(start, OptionRegex) else start for start in self.starts]
self.starts.sort(key=lambda x: x.weight)
self.options['template'] = Config.load_template(self.template_file)
@staticmethod
def load_template(template_file):
if not template_file:
dir_path = Path().absolute()
template_file = dir_path.joinpath("dockupdater/templates/notification.j2")
if Path(template_file).exists():
with open(template_file) as f:
return f.read()
else:
raise AttributeError(f"Template file {template_file} not found")
| true | true |
f7f50da159ea586a20e7f2b05bf3de4efdd09183 | 865 | py | Python | sited_py/lib/org_noear_siteder_dao_db_DbApi.py | wistn/sited_py | fbecf09f410bd2494a952383073956946d9df813 | [
"Apache-2.0"
] | null | null | null | sited_py/lib/org_noear_siteder_dao_db_DbApi.py | wistn/sited_py | fbecf09f410bd2494a952383073956946d9df813 | [
"Apache-2.0"
] | null | null | null | sited_py/lib/org_noear_siteder_dao_db_DbApi.py | wistn/sited_py | fbecf09f410bd2494a952383073956946d9df813 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
Author:wistn
since:2020-06-11
LastEditors:Do not edit
LastEditTime:2020-10-06
Description:
"""
class DbApi:
@classmethod
def isFaved(cls, book):
return False
@classmethod
def logBID(cls, source, bookKey, bookUrl):
# if (
# db.existsSQL('SELECT * from .books WHERE bkey=?', bookKey) == False
# ) {
# db.updateSQL(
# 'INSERT INTO books(bkey,url,source) VALUES(?,?,?) ',
# bookKey,
# bookUrl,
# source
# )
pass
@classmethod
def getBID(cls, bookKey):
# let bid = 0
# let dr = db.selectSQL('SELECT id from .books WHERE bkey=? ', bookKey)
# if (dr.read()) {
# bid = dr.getInt('id')
# # dr.close()
# return bid
pass
| 22.763158 | 81 | 0.487861 |
class DbApi:
@classmethod
def isFaved(cls, book):
return False
@classmethod
def logBID(cls, source, bookKey, bookUrl):
pass
@classmethod
def getBID(cls, bookKey):
pass
| true | true |
f7f50e51123eed9027284ef4bf9f78fd4b253fb6 | 16,078 | py | Python | tests/test_leafmap.py | 1tylermitchell/leafmap | 9d5107758a85c407ea23c95df3c2bf97efda4d33 | [
"MIT"
] | null | null | null | tests/test_leafmap.py | 1tylermitchell/leafmap | 9d5107758a85c407ea23c95df3c2bf97efda4d33 | [
"MIT"
] | null | null | null | tests/test_leafmap.py | 1tylermitchell/leafmap | 9d5107758a85c407ea23c95df3c2bf97efda4d33 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `leafmap` module."""
import os
import unittest
from leafmap import leafmap
import geopandas as gpd
from unittest.mock import patch
class TestLeafmap(unittest.TestCase):
"""Tests for `leafmap` module."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_add_basemap(self):
"""Check basemaps"""
m = leafmap.Map()
m.add_basemap("TERRAIN")
out_str = m.to_html()
assert "Google Terrain" in out_str
# def test_add_cog_layer(self):
# """Check COG layer"""
# m = leafmap.Map()
# url = "https://opendata.digitalglobe.com/events/california-fire-2020/pre-event/2018-02-16/pine-gulch-fire20/1030010076004E00.tif"
# m.add_cog_layer(url, name="Fire (pre-event)")
# out_str = m.to_html()
# assert "Fire (pre-event)" in out_str
# def test_add_cog_mosaic(self):
# """Check COG mosaic"""
# m = leafmap.Map()
# links = [
# "https://opendata.digitalglobe.com/events/california-fire-2020/pre-event/2018-02-16/pine-gulch-fire20/1030010076004E00.tif",
# "https://opendata.digitalglobe.com/events/california-fire-2020/pre-event/2018-08-18/pine-gulch-fire20/1040010041D3B300.tif",
# ]
# m.add_cog_mosaic(links, name="COG mosaic", attribution="MAXAR")
# out_str = m.to_html()
# assert "COG mosaic" in out_str
# assert "MAXAR" in out_str
def test_add_colorbar(self):
"""Check colorbar"""
m = leafmap.Map()
colors = ["006633", "E5FFCC", "662A00", "D8D8D8", "F5F5F5"]
vmin = 0
vmax = 4000
m.add_colorbar(colors=colors, vmin=vmin, vmax=vmax, caption="Elevation")
out_str = m.to_html()
assert "Elevation" in out_str
@patch("matplotlib.pyplot.show")
def test_add_colormap(self, mock_show):
"""Check colormap"""
m = leafmap.Map()
m.add_colormap(cmap="gray", label="Elevation")
out_str = m.to_html()
assert "Elevation" in out_str
def test_add_gdf(self):
"""Check GeoDataFrame"""
m = leafmap.Map()
gdf = gpd.read_file(
"https://github.com/giswqs/leafmap/raw/master/examples/data/cable-geo.geojson"
)
m.add_gdf(gdf, layer_name="Cable lines")
out_str = m.to_html()
assert "Cable lines" in out_str
def test_add_gdf_from_postgis(self):
"""Check PostGIS"""
m = leafmap.Map()
try:
con = leafmap.connect_postgis(
database="nyc",
host="localhost",
user=None,
password=None,
use_env_var=True,
)
sql = "SELECT * FROM nyc_neighborhoods"
gdf = leafmap.read_postgis(sql, con)
m.add_gdf(gdf, layer_name="NYC")
m.add_gdf_from_postgis(
sql,
con,
layer_name="NYC Neighborhoods",
fill_colors=["red", "green", "blue"],
)
out_str = m.to_html()
assert "NYC Neighborhoods" in out_str
except Exception as _:
out_str = m.to_html()
assert "NYC Neighborhoods" not in out_str
def test_add_geojson(self):
"""Check GeoJSON"""
m = leafmap.Map()
in_geojson = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/cable-geo.geojson"
m.add_geojson(in_geojson, layer_name="Cable lines")
out_str = m.to_html()
assert "Cable lines" in out_str
def test_add_heatmap(self):
"""Check heat map"""
m = leafmap.Map()
in_csv = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.csv"
m.add_heatmap(
in_csv,
latitude="latitude",
longitude="longitude",
value="pop_max",
name="Heat map",
radius=20,
)
out_str = m.to_html()
assert "Heat map" in out_str
def test_add_kml(self):
"""Check KML"""
m = leafmap.Map()
in_kml = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us-states.kml"
m.add_kml(in_kml, layer_name="US States KML")
out_str = m.to_html()
assert "US States KML" in out_str
def test_add_legend(self):
"""Check legend"""
m = leafmap.Map()
url = (
"https://www.mrlc.gov/geoserver/mrlc_display/NLCD_2016_Land_Cover_L48/wms?"
)
m.add_wms_layer(
url,
layers="NLCD_2016_Land_Cover_L48",
name="NLCD 2016 CONUS Land Cover",
format="image/png",
transparent=True,
)
m.add_legend(builtin_legend="NLCD")
out_str = m.to_html()
assert "NLCD" in out_str
def test_add_marker_cluster(self):
"""Check marker cluster"""
m = leafmap.Map()
m.add_marker_cluster()
out_str = m.to_html()
assert "Marker Cluster" in out_str
def test_add_minimap(self):
"""Check minimap"""
m = leafmap.Map()
m.add_minimap()
out_str = m.to_html()
assert "150px" in out_str
def test_add_osm_from_address(self):
"""Check OSM data from address"""
m = leafmap.Map()
m.add_osm_from_address(
address="New York City",
tags={"amenity": "bar"},
dist=1500,
layer_name="NYC bars",
)
out_str = m.to_html()
assert "NYC bars" in out_str
def test_add_osm_from_bbox(self):
"""Check OSM data from bbox"""
m = leafmap.Map()
north, south, east, west = 40.7551, 40.7454, -73.9738, -73.9965
m.add_osm_from_bbox(
north, south, east, west, tags={"amenity": "bar"}, layer_name="NYC bars"
)
out_str = m.to_html()
assert "NYC bars" in out_str
def test_add_osm_from_geocode(self):
"""Check OSM data from geocode"""
m = leafmap.Map()
m.add_osm_from_geocode("New York City", layer_name="NYC")
out_str = m.to_html()
assert "NYC" in out_str
def test_add_osm_from_place(self):
"""Check OSM data from place"""
m = leafmap.Map()
place = "Bunker Hill, Los Angeles, California"
tags = {"building": True}
m.add_osm_from_place(place, tags, layer_name="Los Angeles, CA")
out_str = m.to_html()
assert "Los Angeles, CA" in out_str
def test_add_osm_from_point(self):
"""Check OSM data from point"""
m = leafmap.Map()
m.add_osm_from_point(
center_point=(46.7808, -96.0156),
tags={"natural": "water"},
dist=10000,
layer_name="Lakes",
)
out_str = m.to_html()
assert "Lakes" in out_str
def test_add_osm_from_polygon(self):
"""Check OSM data from polygon"""
from shapely.geometry import Polygon
m = leafmap.Map()
polygon = Polygon(
[
[-73.996784, 40.725046],
[-73.996784, 40.734282],
[-73.983052, 40.734282],
[-73.983052, 40.725046],
[-73.996784, 40.725046],
]
)
tags = {"building": True}
m.add_osm_from_polygon(polygon, tags, layer_name="NYC Buildings")
out_str = m.to_html()
assert "NYC Buildings" in out_str
def test_add_osm_from_view(self):
"""Check OSM data from view"""
m = leafmap.Map()
m.add_osm_from_view(tags={"building": True}, layer_name="NYC buildings")
out_str = m.to_html()
assert "NYC buildings" in out_str
def test_add_planet_by_month(self):
"""Check Planet monthly imagery"""
m = leafmap.Map()
m.add_planet_by_month(year=2020, month=8)
out_str = m.to_html()
assert "Planet_2020_08" in out_str
def test_add_planet_by_quarter(self):
"""Check Planet quarterly imagery"""
m = leafmap.Map()
m.add_planet_by_quarter(year=2019, quarter=2)
out_str = m.to_html()
assert "Planet_2019_q2" in out_str
def test_add_point_layer(self):
"""Check adding point layer"""
m = leafmap.Map()
url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.geojson"
m.add_point_layer(url, popup=["name", "pop_max"], layer_name="US Cities")
out_str = m.to_html()
assert "US Cities" in out_str
def test_add_raster(self):
"""Check loading raster data"""
m = leafmap.Map()
landsat_url = "https://drive.google.com/file/d/1vRkAWQYsLWCi6vcTMk8vLxoXMFbdMFn8/view?usp=sharing"
leafmap.download_from_gdrive(landsat_url, "dem.tif", unzip=False)
m.add_raster("dem.tif", colormap="terrain", layer_name="DEM")
out_str = m.to_html()
assert "DEM" in out_str
def test_add_shp(self):
"""Check adding shapefile"""
m = leafmap.Map()
in_shp = (
"https://github.com/giswqs/leafmap/raw/master/examples/data/countries.zip"
)
m.add_shp(in_shp, layer_name="Countries")
out_str = m.to_html()
assert "Countries" in out_str
def test_add_stac_layer(self):
"""Check adding STAC layer"""
m = leafmap.Map()
url = "https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json"
m.add_stac_layer(url, bands=["B3", "B2", "B1"], name="False color")
out_str = m.to_html()
assert "False color" in out_str
def test_add_tile_layer(self):
"""Check adding tile layer"""
m = leafmap.Map()
m.add_tile_layer(
url="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}",
name="Google Satellite",
attribution="Google",
)
out_str = m.to_html()
assert "Google Satellite" in out_str
def test_add_time_slider(self):
"""Check adding time slider"""
m = leafmap.Map()
layers_dict = leafmap.planet_quarterly_tiles()
m.add_time_slider(layers_dict, time_interval=1)
out_str = m.to_html()
assert "Planet_2019_q2" in out_str
def test_add_vector(self):
"""Check adding vector"""
m = leafmap.Map()
url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/countries.geojson"
m.add_vector(
url,
layer_name="Countries",
fill_colors=["red", "yellow", "green", "orange"],
)
out_str = m.to_html()
assert "Countries" in out_str
def test_add_vector_tile_layer(self):
"""Check adding vector tile layer"""
m = leafmap.Map()
url = "https://tile.nextzen.org/tilezen/vector/v1/512/all/{z}/{x}/{y}.mvt?api_key=gCZXZglvRQa6sB2z7JzL1w"
attribution = "Nextzen"
m.add_vector_tile_layer(url, attribution)
out_str = m.to_html()
assert "Nextzen" in out_str
def test_add_wms_layer(self):
"""Check adding WMS layer"""
m = leafmap.Map()
naip_url = "https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?"
m.add_wms_layer(
url=naip_url,
layers="0",
name="NAIP Imagery",
format="image/png",
shown=True,
)
out_str = m.to_html()
assert "NAIP Imagery" in out_str
def test_add_xy_data(self):
"""Check adding xy data"""
m = leafmap.Map()
in_csv = (
"https://raw.githubusercontent.com/giswqs/data/main/world/world_cities.csv"
)
m.add_xy_data(in_csv, x="longitude", y="latitude", layer_name="World Cities")
out_str = m.to_html()
assert "World Cities" in out_str
def test_basemap_demo(self):
"""Check basemap demo"""
m = leafmap.Map()
m.basemap_demo()
out_str = m.to_html()
assert "Basemaps" in out_str
def test_find_layer(self):
"""Check finding layer"""
m = leafmap.Map()
self.assertIsNone(m.find_layer("HYBRID"))
self.assertIsNotNone(m.find_layer("Google Maps"))
def test_find_layer_index(self):
"""Check finding layer index"""
m = leafmap.Map()
self.assertEqual(m.find_layer_index("Google Maps"), 1)
def test_get_layer_names(self):
"""Check getting layer names"""
m = leafmap.Map()
assert "Google Maps" in m.get_layer_names()
def test_get_scale(self):
"""Check getting scale"""
m = leafmap.Map()
self.assertEqual(m.get_scale(), 9783.94)
def test_image_overlay(self):
"""Check image overlay"""
m = leafmap.Map()
url = "https://www.mrlc.gov/sites/default/files/NLCD_Colour_Classification_Update.jpg"
bounds = [(28, -128), (35, -123)]
m.image_overlay(url=url, bounds=bounds, name="NLCD legend")
out_str = m.to_html()
assert "NLCD legend" in out_str
def test_layer_opacity(self):
"""Check layer opacity"""
m = leafmap.Map()
m.layer_opacity("Google Maps", 0.5)
layer = m.find_layer("Google Maps")
self.assertEqual(layer.opacity, 0.5)
def test_set_center(self):
"""Check set map center"""
m = leafmap.Map()
m.set_center(lon=100, lat=40)
self.assertEqual(m.center, [40, 100])
def test_split_map(self):
"""Check split-panel map"""
m = leafmap.Map()
m.split_map(left_layer="HYBRID", right_layer="ESRI")
out_str = m.to_html()
assert "ESRI" in out_str
def test_to_html(self):
"""Check map to html"""
m = leafmap.Map()
out_str = m.to_html()
assert "Google Maps" in out_str
# def test_to_image(self):
# """Check map to image"""
# m = leafmap.Map()
# out_file = os.path.abspath("map.png")
# m.to_image(out_file)
# self.assertTrue(os.path.exists(out_file))
def test_toolbar_reset(self):
"""Check toolbar reset"""
m = leafmap.Map()
m.toolbar_reset()
toolbar_grid = m.toolbar
for tool in toolbar_grid.children:
self.assertFalse(tool.value)
def test_video_overlay(self):
"""Check video overlay"""
m = leafmap.Map()
url = "https://www.mapbox.com/bites/00188/patricia_nasa.webm"
bounds = [(13, -130), (32, -100)]
m.video_overlay(url=url, bounds=bounds, name="Video")
out_str = m.to_html()
assert "Video" in out_str
def test_zoom_to_bounds(self):
"""Check zoom to bounds"""
m = leafmap.Map()
bounds = [13, -130, 32, -100]
m.zoom_to_bounds(bounds)
out_str = m.to_html()
assert "Google Maps" in out_str
def test_zoom_to_gdf(self):
"""Check zoom to GeoDataFrame"""
m = leafmap.Map()
gdf = gpd.read_file(
"https://github.com/giswqs/leafmap/raw/master/examples/data/cable-geo.geojson"
)
m.zoom_to_gdf(gdf)
out_str = m.to_html()
assert "Google Maps" in out_str
def test_leafmap_split_map(self):
"""Check split-panel map"""
m = leafmap.split_map(left_layer="ROADMAP", right_layer="HYBRID")
out_str = m.to_html()
assert "Google Maps" in out_str
def test_linked_maps(self):
"""Check linked maps"""
layers = ["ROADMAP", "HYBRID"]
m = leafmap.linked_maps(rows=1, cols=2, height="400px", layers=layers)
self.assertEqual(m.n_rows, 1)
self.assertEqual(m.n_columns, 2)
# def test_basemap_tiles(self):
# self.assertIsInstance(leafmap.basemap_tiles.to_dict(), dict)
if __name__ == "__main__":
unittest.main()
| 33.426195 | 166 | 0.581851 |
import os
import unittest
from leafmap import leafmap
import geopandas as gpd
from unittest.mock import patch
class TestLeafmap(unittest.TestCase):
def setUp(self):
def tearDown(self):
def test_add_basemap(self):
m = leafmap.Map()
m.add_basemap("TERRAIN")
out_str = m.to_html()
assert "Google Terrain" in out_str
def test_add_colorbar(self):
m = leafmap.Map()
colors = ["006633", "E5FFCC", "662A00", "D8D8D8", "F5F5F5"]
vmin = 0
vmax = 4000
m.add_colorbar(colors=colors, vmin=vmin, vmax=vmax, caption="Elevation")
out_str = m.to_html()
assert "Elevation" in out_str
@patch("matplotlib.pyplot.show")
def test_add_colormap(self, mock_show):
m = leafmap.Map()
m.add_colormap(cmap="gray", label="Elevation")
out_str = m.to_html()
assert "Elevation" in out_str
def test_add_gdf(self):
m = leafmap.Map()
gdf = gpd.read_file(
"https://github.com/giswqs/leafmap/raw/master/examples/data/cable-geo.geojson"
)
m.add_gdf(gdf, layer_name="Cable lines")
out_str = m.to_html()
assert "Cable lines" in out_str
def test_add_gdf_from_postgis(self):
m = leafmap.Map()
try:
con = leafmap.connect_postgis(
database="nyc",
host="localhost",
user=None,
password=None,
use_env_var=True,
)
sql = "SELECT * FROM nyc_neighborhoods"
gdf = leafmap.read_postgis(sql, con)
m.add_gdf(gdf, layer_name="NYC")
m.add_gdf_from_postgis(
sql,
con,
layer_name="NYC Neighborhoods",
fill_colors=["red", "green", "blue"],
)
out_str = m.to_html()
assert "NYC Neighborhoods" in out_str
except Exception as _:
out_str = m.to_html()
assert "NYC Neighborhoods" not in out_str
def test_add_geojson(self):
m = leafmap.Map()
in_geojson = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/cable-geo.geojson"
m.add_geojson(in_geojson, layer_name="Cable lines")
out_str = m.to_html()
assert "Cable lines" in out_str
def test_add_heatmap(self):
m = leafmap.Map()
in_csv = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/world_cities.csv"
m.add_heatmap(
in_csv,
latitude="latitude",
longitude="longitude",
value="pop_max",
name="Heat map",
radius=20,
)
out_str = m.to_html()
assert "Heat map" in out_str
def test_add_kml(self):
m = leafmap.Map()
in_kml = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us-states.kml"
m.add_kml(in_kml, layer_name="US States KML")
out_str = m.to_html()
assert "US States KML" in out_str
def test_add_legend(self):
m = leafmap.Map()
url = (
"https://www.mrlc.gov/geoserver/mrlc_display/NLCD_2016_Land_Cover_L48/wms?"
)
m.add_wms_layer(
url,
layers="NLCD_2016_Land_Cover_L48",
name="NLCD 2016 CONUS Land Cover",
format="image/png",
transparent=True,
)
m.add_legend(builtin_legend="NLCD")
out_str = m.to_html()
assert "NLCD" in out_str
def test_add_marker_cluster(self):
m = leafmap.Map()
m.add_marker_cluster()
out_str = m.to_html()
assert "Marker Cluster" in out_str
def test_add_minimap(self):
m = leafmap.Map()
m.add_minimap()
out_str = m.to_html()
assert "150px" in out_str
def test_add_osm_from_address(self):
m = leafmap.Map()
m.add_osm_from_address(
address="New York City",
tags={"amenity": "bar"},
dist=1500,
layer_name="NYC bars",
)
out_str = m.to_html()
assert "NYC bars" in out_str
def test_add_osm_from_bbox(self):
m = leafmap.Map()
north, south, east, west = 40.7551, 40.7454, -73.9738, -73.9965
m.add_osm_from_bbox(
north, south, east, west, tags={"amenity": "bar"}, layer_name="NYC bars"
)
out_str = m.to_html()
assert "NYC bars" in out_str
def test_add_osm_from_geocode(self):
m = leafmap.Map()
m.add_osm_from_geocode("New York City", layer_name="NYC")
out_str = m.to_html()
assert "NYC" in out_str
def test_add_osm_from_place(self):
m = leafmap.Map()
place = "Bunker Hill, Los Angeles, California"
tags = {"building": True}
m.add_osm_from_place(place, tags, layer_name="Los Angeles, CA")
out_str = m.to_html()
assert "Los Angeles, CA" in out_str
def test_add_osm_from_point(self):
m = leafmap.Map()
m.add_osm_from_point(
center_point=(46.7808, -96.0156),
tags={"natural": "water"},
dist=10000,
layer_name="Lakes",
)
out_str = m.to_html()
assert "Lakes" in out_str
def test_add_osm_from_polygon(self):
from shapely.geometry import Polygon
m = leafmap.Map()
polygon = Polygon(
[
[-73.996784, 40.725046],
[-73.996784, 40.734282],
[-73.983052, 40.734282],
[-73.983052, 40.725046],
[-73.996784, 40.725046],
]
)
tags = {"building": True}
m.add_osm_from_polygon(polygon, tags, layer_name="NYC Buildings")
out_str = m.to_html()
assert "NYC Buildings" in out_str
def test_add_osm_from_view(self):
m = leafmap.Map()
m.add_osm_from_view(tags={"building": True}, layer_name="NYC buildings")
out_str = m.to_html()
assert "NYC buildings" in out_str
def test_add_planet_by_month(self):
m = leafmap.Map()
m.add_planet_by_month(year=2020, month=8)
out_str = m.to_html()
assert "Planet_2020_08" in out_str
def test_add_planet_by_quarter(self):
m = leafmap.Map()
m.add_planet_by_quarter(year=2019, quarter=2)
out_str = m.to_html()
assert "Planet_2019_q2" in out_str
def test_add_point_layer(self):
m = leafmap.Map()
url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/us_cities.geojson"
m.add_point_layer(url, popup=["name", "pop_max"], layer_name="US Cities")
out_str = m.to_html()
assert "US Cities" in out_str
def test_add_raster(self):
m = leafmap.Map()
landsat_url = "https://drive.google.com/file/d/1vRkAWQYsLWCi6vcTMk8vLxoXMFbdMFn8/view?usp=sharing"
leafmap.download_from_gdrive(landsat_url, "dem.tif", unzip=False)
m.add_raster("dem.tif", colormap="terrain", layer_name="DEM")
out_str = m.to_html()
assert "DEM" in out_str
def test_add_shp(self):
m = leafmap.Map()
in_shp = (
"https://github.com/giswqs/leafmap/raw/master/examples/data/countries.zip"
)
m.add_shp(in_shp, layer_name="Countries")
out_str = m.to_html()
assert "Countries" in out_str
def test_add_stac_layer(self):
m = leafmap.Map()
url = "https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json"
m.add_stac_layer(url, bands=["B3", "B2", "B1"], name="False color")
out_str = m.to_html()
assert "False color" in out_str
def test_add_tile_layer(self):
m = leafmap.Map()
m.add_tile_layer(
url="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}",
name="Google Satellite",
attribution="Google",
)
out_str = m.to_html()
assert "Google Satellite" in out_str
def test_add_time_slider(self):
m = leafmap.Map()
layers_dict = leafmap.planet_quarterly_tiles()
m.add_time_slider(layers_dict, time_interval=1)
out_str = m.to_html()
assert "Planet_2019_q2" in out_str
def test_add_vector(self):
m = leafmap.Map()
url = "https://raw.githubusercontent.com/giswqs/leafmap/master/examples/data/countries.geojson"
m.add_vector(
url,
layer_name="Countries",
fill_colors=["red", "yellow", "green", "orange"],
)
out_str = m.to_html()
assert "Countries" in out_str
def test_add_vector_tile_layer(self):
m = leafmap.Map()
url = "https://tile.nextzen.org/tilezen/vector/v1/512/all/{z}/{x}/{y}.mvt?api_key=gCZXZglvRQa6sB2z7JzL1w"
attribution = "Nextzen"
m.add_vector_tile_layer(url, attribution)
out_str = m.to_html()
assert "Nextzen" in out_str
def test_add_wms_layer(self):
m = leafmap.Map()
naip_url = "https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?"
m.add_wms_layer(
url=naip_url,
layers="0",
name="NAIP Imagery",
format="image/png",
shown=True,
)
out_str = m.to_html()
assert "NAIP Imagery" in out_str
def test_add_xy_data(self):
m = leafmap.Map()
in_csv = (
"https://raw.githubusercontent.com/giswqs/data/main/world/world_cities.csv"
)
m.add_xy_data(in_csv, x="longitude", y="latitude", layer_name="World Cities")
out_str = m.to_html()
assert "World Cities" in out_str
def test_basemap_demo(self):
m = leafmap.Map()
m.basemap_demo()
out_str = m.to_html()
assert "Basemaps" in out_str
def test_find_layer(self):
m = leafmap.Map()
self.assertIsNone(m.find_layer("HYBRID"))
self.assertIsNotNone(m.find_layer("Google Maps"))
def test_find_layer_index(self):
m = leafmap.Map()
self.assertEqual(m.find_layer_index("Google Maps"), 1)
def test_get_layer_names(self):
m = leafmap.Map()
assert "Google Maps" in m.get_layer_names()
def test_get_scale(self):
m = leafmap.Map()
self.assertEqual(m.get_scale(), 9783.94)
def test_image_overlay(self):
m = leafmap.Map()
url = "https://www.mrlc.gov/sites/default/files/NLCD_Colour_Classification_Update.jpg"
bounds = [(28, -128), (35, -123)]
m.image_overlay(url=url, bounds=bounds, name="NLCD legend")
out_str = m.to_html()
assert "NLCD legend" in out_str
def test_layer_opacity(self):
m = leafmap.Map()
m.layer_opacity("Google Maps", 0.5)
layer = m.find_layer("Google Maps")
self.assertEqual(layer.opacity, 0.5)
def test_set_center(self):
m = leafmap.Map()
m.set_center(lon=100, lat=40)
self.assertEqual(m.center, [40, 100])
def test_split_map(self):
m = leafmap.Map()
m.split_map(left_layer="HYBRID", right_layer="ESRI")
out_str = m.to_html()
assert "ESRI" in out_str
def test_to_html(self):
m = leafmap.Map()
out_str = m.to_html()
assert "Google Maps" in out_str
def test_toolbar_reset(self):
m = leafmap.Map()
m.toolbar_reset()
toolbar_grid = m.toolbar
for tool in toolbar_grid.children:
self.assertFalse(tool.value)
def test_video_overlay(self):
m = leafmap.Map()
url = "https://www.mapbox.com/bites/00188/patricia_nasa.webm"
bounds = [(13, -130), (32, -100)]
m.video_overlay(url=url, bounds=bounds, name="Video")
out_str = m.to_html()
assert "Video" in out_str
def test_zoom_to_bounds(self):
m = leafmap.Map()
bounds = [13, -130, 32, -100]
m.zoom_to_bounds(bounds)
out_str = m.to_html()
assert "Google Maps" in out_str
def test_zoom_to_gdf(self):
m = leafmap.Map()
gdf = gpd.read_file(
"https://github.com/giswqs/leafmap/raw/master/examples/data/cable-geo.geojson"
)
m.zoom_to_gdf(gdf)
out_str = m.to_html()
assert "Google Maps" in out_str
def test_leafmap_split_map(self):
m = leafmap.split_map(left_layer="ROADMAP", right_layer="HYBRID")
out_str = m.to_html()
assert "Google Maps" in out_str
def test_linked_maps(self):
layers = ["ROADMAP", "HYBRID"]
m = leafmap.linked_maps(rows=1, cols=2, height="400px", layers=layers)
self.assertEqual(m.n_rows, 1)
self.assertEqual(m.n_columns, 2)
if __name__ == "__main__":
unittest.main()
| true | true |
f7f50e7055d5b6f3f0c8269186a5c673279e5136 | 11,839 | py | Python | custom_components/deepstack_face/image_processing.py | Hoellenwesen/HASS-Deepstack-face | 5ca4a75ec7aab092124edf77aa692c78bedb2a96 | [
"MIT"
] | null | null | null | custom_components/deepstack_face/image_processing.py | Hoellenwesen/HASS-Deepstack-face | 5ca4a75ec7aab092124edf77aa692c78bedb2a96 | [
"MIT"
] | null | null | null | custom_components/deepstack_face/image_processing.py | Hoellenwesen/HASS-Deepstack-face | 5ca4a75ec7aab092124edf77aa692c78bedb2a96 | [
"MIT"
] | null | null | null | """
Component that will perform facial recognition via deepstack.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.deepstack_face
"""
import io
import logging
import re
import time
from pathlib import Path
import requests
from PIL import Image, ImageDraw
import deepstack.core as ds
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant.components.image_processing import (
ATTR_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingFaceEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_IP_ADDRESS,
CONF_PORT,
)
from homeassistant.core import split_entity_id
_LOGGER = logging.getLogger(__name__)
# rgb(red, green, blue)
RED = (255, 0, 0) # For objects within the ROI
YELLOW = (255,255,0)
GREEN = (34,139,34)
BLUE = (0,0,255)
CONF_API_KEY = "api_key"
CONF_TIMEOUT = "timeout"
CONF_DETECT_ONLY = "detect_only"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
CONF_SAVE_FACES_FOLDER = "save_faces_folder"
CONF_SAVE_FACES = "save_faces"
CONF_SHOW_BOXES = "show_boxes"
CONF_BOX_COLOR = "box_color"
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
DEFAULT_API_KEY = ""
DEFAULT_TIMEOUT = 10
DOMAIN = "deepstack_face"
CLASSIFIER = "deepstack_face"
DATA_DEEPSTACK = "deepstack_classifiers"
FILE_PATH = "file_path"
SERVICE_TEACH_FACE = "teach_face"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FACES_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_FACES, default=False): cv.boolean,
vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean,
vol.Optional(CONF_BOX_COLOR, default=RED): cv.string,
}
)
SERVICE_TEACH_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
}
)
def get_valid_filename(name: str) -> str:
return re.sub(r"(?u)[^-\w.]", "", str(name).strip().replace(" ", "_"))
def get_faces(predictions: list, img_width: int, img_height: int):
"""Return faces with formatting for annotating images."""
faces = []
decimal_places = 3
for pred in predictions:
if not "userid" in pred.keys():
name = "unknown"
else:
name = pred["userid"]
confidence = round(pred["confidence"] * 100, decimal_places)
box_width = pred["x_max"] - pred["x_min"]
box_height = pred["y_max"] - pred["y_min"]
box = {
"height": round(box_height / img_height, decimal_places),
"width": round(box_width / img_width, decimal_places),
"y_min": round(pred["y_min"] / img_height, decimal_places),
"x_min": round(pred["x_min"] / img_width, decimal_places),
"y_max": round(pred["y_max"] / img_height, decimal_places),
"x_max": round(pred["x_max"] / img_width, decimal_places),
}
faces.append(
{"name": name, "confidence": confidence, "bounding_box": box, "prediction": pred}
)
return faces
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
if DATA_DEEPSTACK not in hass.data:
hass.data[DATA_DEEPSTACK] = []
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
save_faces_folder = config.get(CONF_SAVE_FACES_FOLDER)
if save_faces_folder:
save_faces_folder = Path(save_faces_folder)
entities = []
for camera in config[CONF_SOURCE]:
face_entity = FaceClassifyEntity(
config[CONF_IP_ADDRESS],
config[CONF_PORT],
config.get(CONF_API_KEY),
config.get(CONF_TIMEOUT),
config.get(CONF_DETECT_ONLY),
save_file_folder,
config.get(CONF_SAVE_TIMESTAMPTED_FILE),
save_faces_folder,
config.get(CONF_SAVE_FACES),
config[CONF_SHOW_BOXES],
config.get(CONF_BOX_COLOR),
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
)
entities.append(face_entity)
hass.data[DATA_DEEPSTACK].append(face_entity)
add_devices(entities)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get("entity_id")
classifiers = hass.data[DATA_DEEPSTACK]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA
)
class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
def __init__(
self,
ip_address,
port,
api_key,
timeout,
detect_only,
save_file_folder,
save_timestamped_file,
save_faces_folder,
save_faces,
show_boxes,
box_color,
camera_entity,
name=None,
):
"""Init with the API key and model id."""
super().__init__()
self._dsface = ds.DeepstackFace(
ip=ip_address, port=port, api_key=api_key, timeout=timeout
)
self._detect_only = detect_only
self._show_boxes = show_boxes
self._box_color = box_color
self._last_detection = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
self._save_faces_folder = save_faces_folder
self._save_faces = save_faces
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "{} {}".format(CLASSIFIER, camera_name)
self._predictions = []
self._matched = {}
self.total_faces = None
def process_image(self, image):
"""Process an image, comes in as bytes."""
self._predictions = []
self._matched = {}
self.total_faces = None
try:
pil_image = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Deepstack unable to process image, bad data")
return
image_width, image_height = pil_image.size
try:
if self._detect_only:
self._predictions = self._dsface.detect(image)
else:
self._predictions = self._dsface.recognize(image)
except ds.DeepstackException as exc:
_LOGGER.error("Depstack error : %s", exc)
return
if len(self._predictions) > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
self.total_faces = len(self._predictions)
self._matched = ds.get_recognized_faces(self._predictions)
self.faces = get_faces(self._predictions, image_width, image_height)
self.process_faces(
self.faces, self.total_faces,
) # fire image_processing.detect_face
if not self._detect_only:
if self._save_faces and self._save_faces_folder:
self.save_faces(
pil_image, self._save_faces_folder
)
if self._save_file_folder:
self.save_image(
pil_image, self._save_file_folder,
)
else:
self.total_faces = None
self._matched = {}
def teach(self, name: str, file_path: str):
"""Teach classifier a face name."""
if not self.hass.config.is_allowed_path(file_path):
return
with open(file_path, "rb") as image:
self._dsface.register(name, image)
_LOGGER.info("Depstack face taught name : %s", name)
event_data = {
"person_name": name,
"file_path": file_path,
}
self.hass.bus.async_fire(f"{DOMAIN}_teach_face", event_data)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Ensure consistent state."""
return self.total_faces
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def force_update(self):
"""Force update to fire state events even if state has not changed."""
return True
@property
def extra_state_attributes(self):
"""Return the classifier attributes."""
attr = {}
if self._detect_only:
attr[CONF_DETECT_ONLY] = self._detect_only
if not self._detect_only:
attr["total_matched_faces"] = len(self._matched)
attr["matched_faces"] = self._matched
if self._last_detection:
attr["last_detection"] = self._last_detection
return attr
def save_faces(self, pil_image: Image, directory: Path):
"""Saves recognized faces."""
for face in self.faces:
box = face["prediction"]
name = face["name"]
confidence = face["confidence"]
face_name = face["name"]
cropped_image = pil_image.crop(
(box["x_min"], box["y_min"], box["x_max"], box["y_max"])
)
timestamp_save_path = directory / f"{face_name}_{confidence:.1f}_{self._last_detection}.jpg"
cropped_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved face %s", timestamp_save_path)
def save_image(self, pil_image: Image, directory: Path):
"""Draws the actual bounding box of the detected objects."""
image_width, image_height = pil_image.size
draw = ImageDraw.Draw(pil_image)
for face in self.faces:
if not self._show_boxes:
break
name = face["name"]
confidence = face["confidence"]
box = face["bounding_box"]
box_label = f"{name}: {confidence:.1f}%"
box_color = self._box_color
draw_box(
draw,
(box["y_min"], box["x_min"], box["y_max"], box["x_max"]),
image_width,
image_height,
text=box_label,
color=box_color.upper(),
)
latest_save_path = (
directory / f"{get_valid_filename(self._name).lower()}_latest.jpg"
)
pil_image.save(latest_save_path)
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
pil_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved file %s", timestamp_save_path)
| 32.70442 | 104 | 0.62235 | import io
import logging
import re
import time
from pathlib import Path
import requests
from PIL import Image, ImageDraw
import deepstack.core as ds
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant.components.image_processing import (
ATTR_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingFaceEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_IP_ADDRESS,
CONF_PORT,
)
from homeassistant.core import split_entity_id
_LOGGER = logging.getLogger(__name__)
RED = (255, 0, 0)
YELLOW = (255,255,0)
GREEN = (34,139,34)
BLUE = (0,0,255)
CONF_API_KEY = "api_key"
CONF_TIMEOUT = "timeout"
CONF_DETECT_ONLY = "detect_only"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
CONF_SAVE_FACES_FOLDER = "save_faces_folder"
CONF_SAVE_FACES = "save_faces"
CONF_SHOW_BOXES = "show_boxes"
CONF_BOX_COLOR = "box_color"
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
DEFAULT_API_KEY = ""
DEFAULT_TIMEOUT = 10
DOMAIN = "deepstack_face"
CLASSIFIER = "deepstack_face"
DATA_DEEPSTACK = "deepstack_classifiers"
FILE_PATH = "file_path"
SERVICE_TEACH_FACE = "teach_face"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FACES_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_FACES, default=False): cv.boolean,
vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean,
vol.Optional(CONF_BOX_COLOR, default=RED): cv.string,
}
)
SERVICE_TEACH_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
}
)
def get_valid_filename(name: str) -> str:
return re.sub(r"(?u)[^-\w.]", "", str(name).strip().replace(" ", "_"))
def get_faces(predictions: list, img_width: int, img_height: int):
faces = []
decimal_places = 3
for pred in predictions:
if not "userid" in pred.keys():
name = "unknown"
else:
name = pred["userid"]
confidence = round(pred["confidence"] * 100, decimal_places)
box_width = pred["x_max"] - pred["x_min"]
box_height = pred["y_max"] - pred["y_min"]
box = {
"height": round(box_height / img_height, decimal_places),
"width": round(box_width / img_width, decimal_places),
"y_min": round(pred["y_min"] / img_height, decimal_places),
"x_min": round(pred["x_min"] / img_width, decimal_places),
"y_max": round(pred["y_max"] / img_height, decimal_places),
"x_max": round(pred["x_max"] / img_width, decimal_places),
}
faces.append(
{"name": name, "confidence": confidence, "bounding_box": box, "prediction": pred}
)
return faces
def setup_platform(hass, config, add_devices, discovery_info=None):
if DATA_DEEPSTACK not in hass.data:
hass.data[DATA_DEEPSTACK] = []
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
save_faces_folder = config.get(CONF_SAVE_FACES_FOLDER)
if save_faces_folder:
save_faces_folder = Path(save_faces_folder)
entities = []
for camera in config[CONF_SOURCE]:
face_entity = FaceClassifyEntity(
config[CONF_IP_ADDRESS],
config[CONF_PORT],
config.get(CONF_API_KEY),
config.get(CONF_TIMEOUT),
config.get(CONF_DETECT_ONLY),
save_file_folder,
config.get(CONF_SAVE_TIMESTAMPTED_FILE),
save_faces_folder,
config.get(CONF_SAVE_FACES),
config[CONF_SHOW_BOXES],
config.get(CONF_BOX_COLOR),
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
)
entities.append(face_entity)
hass.data[DATA_DEEPSTACK].append(face_entity)
add_devices(entities)
def service_handle(service):
entity_ids = service.data.get("entity_id")
classifiers = hass.data[DATA_DEEPSTACK]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA
)
class FaceClassifyEntity(ImageProcessingFaceEntity):
def __init__(
self,
ip_address,
port,
api_key,
timeout,
detect_only,
save_file_folder,
save_timestamped_file,
save_faces_folder,
save_faces,
show_boxes,
box_color,
camera_entity,
name=None,
):
super().__init__()
self._dsface = ds.DeepstackFace(
ip=ip_address, port=port, api_key=api_key, timeout=timeout
)
self._detect_only = detect_only
self._show_boxes = show_boxes
self._box_color = box_color
self._last_detection = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
self._save_faces_folder = save_faces_folder
self._save_faces = save_faces
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "{} {}".format(CLASSIFIER, camera_name)
self._predictions = []
self._matched = {}
self.total_faces = None
def process_image(self, image):
self._predictions = []
self._matched = {}
self.total_faces = None
try:
pil_image = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Deepstack unable to process image, bad data")
return
image_width, image_height = pil_image.size
try:
if self._detect_only:
self._predictions = self._dsface.detect(image)
else:
self._predictions = self._dsface.recognize(image)
except ds.DeepstackException as exc:
_LOGGER.error("Depstack error : %s", exc)
return
if len(self._predictions) > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
self.total_faces = len(self._predictions)
self._matched = ds.get_recognized_faces(self._predictions)
self.faces = get_faces(self._predictions, image_width, image_height)
self.process_faces(
self.faces, self.total_faces,
)
if not self._detect_only:
if self._save_faces and self._save_faces_folder:
self.save_faces(
pil_image, self._save_faces_folder
)
if self._save_file_folder:
self.save_image(
pil_image, self._save_file_folder,
)
else:
self.total_faces = None
self._matched = {}
def teach(self, name: str, file_path: str):
if not self.hass.config.is_allowed_path(file_path):
return
with open(file_path, "rb") as image:
self._dsface.register(name, image)
_LOGGER.info("Depstack face taught name : %s", name)
event_data = {
"person_name": name,
"file_path": file_path,
}
self.hass.bus.async_fire(f"{DOMAIN}_teach_face", event_data)
@property
def camera_entity(self):
return self._camera
@property
def name(self):
return self._name
@property
def state(self):
return self.total_faces
@property
def should_poll(self):
return False
@property
def force_update(self):
return True
@property
def extra_state_attributes(self):
attr = {}
if self._detect_only:
attr[CONF_DETECT_ONLY] = self._detect_only
if not self._detect_only:
attr["total_matched_faces"] = len(self._matched)
attr["matched_faces"] = self._matched
if self._last_detection:
attr["last_detection"] = self._last_detection
return attr
def save_faces(self, pil_image: Image, directory: Path):
for face in self.faces:
box = face["prediction"]
name = face["name"]
confidence = face["confidence"]
face_name = face["name"]
cropped_image = pil_image.crop(
(box["x_min"], box["y_min"], box["x_max"], box["y_max"])
)
timestamp_save_path = directory / f"{face_name}_{confidence:.1f}_{self._last_detection}.jpg"
cropped_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved face %s", timestamp_save_path)
def save_image(self, pil_image: Image, directory: Path):
image_width, image_height = pil_image.size
draw = ImageDraw.Draw(pil_image)
for face in self.faces:
if not self._show_boxes:
break
name = face["name"]
confidence = face["confidence"]
box = face["bounding_box"]
box_label = f"{name}: {confidence:.1f}%"
box_color = self._box_color
draw_box(
draw,
(box["y_min"], box["x_min"], box["y_max"], box["x_max"]),
image_width,
image_height,
text=box_label,
color=box_color.upper(),
)
latest_save_path = (
directory / f"{get_valid_filename(self._name).lower()}_latest.jpg"
)
pil_image.save(latest_save_path)
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
pil_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved file %s", timestamp_save_path)
| true | true |
f7f50e76af61b9b530f207d296ad3d77d467050d | 60,957 | py | Python | python/paddle/distributed/fleet/launch_utils.py | ucsk/Paddle | 1d4566592287d84b39f7f3cab2f00e9d3f993d92 | [
"Apache-2.0"
] | 2 | 2022-01-04T10:51:58.000Z | 2022-01-10T12:29:08.000Z | python/paddle/distributed/fleet/launch_utils.py | ucsk/Paddle | 1d4566592287d84b39f7f3cab2f00e9d3f993d92 | [
"Apache-2.0"
] | null | null | null | python/paddle/distributed/fleet/launch_utils.py | ucsk/Paddle | 1d4566592287d84b39f7f3cab2f00e9d3f993d92 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import os
import signal
import copy
import sys
import subprocess
import tempfile
import shutil
from contextlib import closing
import multiprocessing
import socket
import warnings
import six
import struct
import json
import paddle
import paddle.fluid as fluid
from distutils.util import strtobool
import paddle.utils.cpp_extension.extension_utils as utils
logger = logging.getLogger("root")
logger.propagate = False
class DistributeMode():
"""
There are various mode for fleetrun, each of them is designed for different model.
"""
COLLECTIVE = 0
PS = 1
PS_HETER = 2
class DeviceMode():
"""
Training devices type
"""
UNKNOWN = -1
CPU = 0
GPU = 1
KUNLUN = 2
XPU = 2
ASCEND_NPU = 3
UNKNOWN = 3
class Cluster(object):
def __init__(self, hdfs):
self.job_server = None
self.pods = []
self.hdfs = None
self.job_stage_flag = None
def __str__(self):
return "job_server:{} pods:{} job_stage_flag:{} hdfs:{}".format(
self.job_server, [str(pod) for pod in self.pods],
self.job_stage_flag, self.hdfs)
def __eq__(self, cluster):
if len(self.pods) != len(cluster.pods):
return False
for a, b in zip(self.pods, cluster.pods):
if a != b:
return False
if self.job_stage_flag != cluster.job_stage_flag:
return False
return True
def __ne__(self, cluster):
return not self.__eq__(cluster)
def update_pods(self, cluster):
self.pods = copy.copy(cluster.pods)
def trainers_nranks(self):
return len(self.trainers_endpoints())
def pods_nranks(self):
return len(self.pods)
def trainers_endpoints(self):
r = []
for pod in self.pods:
for t in pod.trainers:
r.append(t.endpoint)
return r
def world_device_ids(self):
r = []
for pod in self.pods:
for t in pod.trainers:
str_accelerators = [str(acc) for acc in t.accelerators]
r.append(str_accelerators)
return r
def pods_endpoints(self):
r = []
for pod in self.pods:
ep = "{}:{}".format(pod.addr, pod.port)
assert pod.port != None and pod.addr != None, "{} not a valid endpoint".format(
ep)
r.append(ep)
return r
def get_pod_by_id(self, pod_id):
for pod in self.pods:
if str(pod_id) == str(pod.id):
return pod
return None
class JobServer(object):
def __init__(self):
self.endpoint = None
def __str__(self):
return "{}".format(self.endpoint)
def __eq__(self, j):
return self.endpint == j.endpoint
def __ne__(self, j):
return not self == j
class Trainer(object):
def __init__(self):
self.accelerators = []
self.endpoint = None
self.rank = None
self.stage = None
def __str__(self):
return "accelerator:{} endpoint:{} rank:{}".format(
self.accelerators, self.endpoint, self.rank)
def __eq__(self, t):
if len(self.accelerators) != len(t.accelerators):
return False
if self.endpoint != t.endpoint or \
self.rank != t.rank:
return False
for a, b in zip(self.accelerators, t.accelerators):
if a != b:
return False
return True
def __ne__(self, t):
return not self == t
def rank(self):
return self.rank
class Pod(object):
def __init__(self):
self.rank = None
self.id = None
self.addr = None
self.port = None
self.trainers = []
self.servers = []
self.workers = []
self.heter_workers = []
self.accelerators = []
self.device_mode = None
def __str__(self):
return "rank:{} id:{} addr:{} port:{} visible_accelerator:{} trainers:{} servers:{} \
workers:{} heter_workers:{}".format(
self.rank, self.id, self.addr, self.port, self.accelerators, [
str(t) for t in self.trainers
], [str(s) for s in self.servers], [str(w) for w in self.workers],
[str(h) for h in self.heter_workers])
def __eq__(self, pod):
if self.rank != pod.rank or \
self.id != pod.id or \
self.addr != pod.addr or \
self.port != pod.port:
logger.debug("pod {} != {}".format(self, pod))
return False
if len(self.trainers) != len(pod.trainers):
logger.debug("trainers {} != {}".format(self.trainers,
pod.trainers))
return False
for i in range(len(self.trainers)):
if self.trainers[i] != pod.trainers[i]:
logger.debug("trainer {} != {}".format(self.trainers[i],
pod.trainers[i]))
return False
if len(self.servers) != len(pod.servers):
logger.debug("servers {} != {}".format(self.servers, pod.servers))
return False
for i in range(len(self.servers)):
if self.servers[i] != pod.servers[i]:
logger.debug("servers {} != {}".format(self.servers[i],
pod.servers[i]))
return False
if len(self.workers) != len(pod.workers):
logger.debug("workers {} != {}".format(self.workers, pod.workers))
return False
for i in range(len(self.workers)):
if self.workers[i] != pod.workers[i]:
logger.debug("workers {} != {}".format(self.workers[i],
pod.workers[i]))
return False
return True
def __ne__(self, pod):
return not self == pod
def parse_response(self, res_pods):
pass
def rank(self):
return self.rank
def get_visible_accelerators(self):
r = ""
for g in self.accelerators:
r += "{},".format(g)
assert r != "", "this pod {} can't see any accelerators".format(self)
r = r[:-1]
return r
def get_logger(log_level=20, name="root"):
logger = logging.getLogger(name)
logger.setLevel(log_level)
log_handler = logging.StreamHandler()
log_format = logging.Formatter(
'%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
return logger
def get_cluster(node_ips, node_ip, trainer_endpoints, device_mode,
devices_per_proc):
assert type(trainer_endpoints) is list, "trainer_endpoints must be list"
cluster = Cluster(hdfs=None)
trainer_rank = 0
for node_rank, ip in enumerate(node_ips):
pod = Pod()
pod.rank = node_rank
pod.addr = ip
pod.device_mode = device_mode
cur_node_endpoints = trainer_endpoints[node_rank]
# when use paddlecloud, endpoints may > devices_per_proc(user_defined)
assert len(cur_node_endpoints) >= len(
devices_per_proc
), "current trainer_endpoints size should be greater equal than acclerators size."
for i in range(len(devices_per_proc)):
trainer = Trainer()
if device_mode == DeviceMode.GPU or device_mode == DeviceMode.ASCEND_NPU:
if isinstance(devices_per_proc[i], (list, tuple)):
trainer.accelerators.extend(devices_per_proc[i])
pod.accelerators.extend(devices_per_proc[i])
else:
trainer.accelerators.append(devices_per_proc[i])
pod.accelerators.append(devices_per_proc[i])
elif device_mode == DeviceMode.XPU:
if isinstance(devices_per_proc[i], (list, tuple)):
trainer.accelerators.extend(devices_per_proc[i])
else:
trainer.accelerators.append(devices_per_proc[i])
trainer.endpoint = "%s" % (cur_node_endpoints[i])
trainer.rank = trainer_rank
trainer_rank += 1
pod.trainers.append(trainer)
cluster.pods.append(pod)
pod_rank = node_ips.index(node_ip)
return cluster, cluster.pods[pod_rank]
def terminate_local_procs(procs):
# try to terminate process by group, this happend in multiprocess senario in user process
if os.name != 'nt':
for p in procs:
if p.proc.poll() is None:
os.killpg(os.getpgid(p.proc.pid), signal.SIGTERM)
if p.log_fn:
p.log_fn.close()
logger.info("terminate process group gid:{}".format(p.proc.pid))
time.sleep(1)
for p in procs:
if p.proc.poll() is None:
p.proc.terminate()
if p.log_fn:
p.log_fn.close()
logger.debug("terminate process id:{}".format(p.proc.pid))
# wait all process terminiated
time.sleep(3)
for step in range(0, 50):
alive = False
for p in procs:
if p.proc.poll() is None: # not termniate
os.kill(p.proc.pid, signal.SIGKILL)
alive = True
if not alive:
logger.info("terminate all the procs")
return
time.sleep(3)
logger.fatal("can't kill all process and exit")
exit(1)
def get_host_name_ip():
try:
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
return host_name, host_ip
except:
return None
def add_arguments(argname, type, default, help, argparser, **kwargs):
"""Add argparse's argument.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args()
"""
type = strtobool if type == bool else type
argparser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def find_free_ports(num):
def __free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
# Note(wangxi): Close the connection with a TCP RST instead
# of a TCP FIN, to avoid time_wait state.
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
s.bind(('', 0))
return s.getsockname()[1]
port_set = set()
step = 0
while True:
port = __free_port()
if port not in port_set:
port_set.add(port)
if len(port_set) >= num:
return port_set
step += 1
if step > 400:
print(
"can't find avilable port and use the specified static port now!"
)
return None
return None
def get_ports(num, offset):
if os.environ.get('FLAGS_START_PORT') is None:
ports = find_free_ports(num)
if ports is not None:
ports = list(ports)
else:
start_port = int(os.environ.get('FLAGS_START_PORT'))
ports = range(start_port + offset, start_port + offset + num, 1)
return ports
def pretty_print_envs(envs, header=None):
spacing = 2
max_k = 40
max_v = 45
for k, v in envs.items():
max_k = max(max_k, len(k))
h_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format(max_k, " " * spacing,
max_v)
l_format = " " + "|{{:>{}s}}{{}}{{:^{}s}}|\n".format(max_k, max_v)
length = max_k + max_v + spacing
border = " +" + "".join(["="] * length) + "+"
line = " +" + "".join(["-"] * length) + "+"
draws = ""
draws += border + "\n"
if header:
draws += h_format.format(header[0], header[1])
else:
draws += h_format.format("fleetrun Distributed Envs", "Value")
draws += line + "\n"
for k, v in envs.items():
if isinstance(v, str) and len(v) >= max_v:
str_v = "... " + v[-41:]
else:
str_v = v
draws += l_format.format(k, " " * spacing, str(str_v))
draws += border
_str = "\n{}\n".format(draws)
return _str
class TrainerProc(object):
def __init__(self):
self.proc = None
self.log_fn = None
self.log_offset = None
self.rank = None
self.local_rank = None
self.cmd = None
_run_with_coverage = False
def run_with_coverage(*args):
global _run_with_coverage
assert len(args) <= 1, "len(args) {} should <= 1".format(len(args))
if len(args) == 1:
assert isinstance(args[0], bool)
_run_with_coverage = args[0]
return _run_with_coverage
def start_local_trainers(cluster,
pod,
training_script,
training_script_args,
log_dir=None,
envs=None):
if envs is None:
current_env = copy.copy(os.environ.copy())
else:
current_env = copy.copy(envs)
# paddle broadcast ncclUniqueId use socket, and
# proxy maybe make trainers unreachable, so delete them.
# if we set them to "", grpc will log error message "bad uri"
# so just delete them.
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
ids = cluster.world_device_ids()
res = [':'.join(ele) for ele in ids]
procs = []
for idx, t in enumerate(pod.trainers):
proc_env = {
"PADDLE_TRAINER_ID": "%d" % t.rank,
"PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint,
"PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(),
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
"PADDLE_RANK_IN_NODE": str(idx),
"PADDLE_LOCAL_DEVICE_IDS":
",".join([str(acc) for acc in t.accelerators]),
"PADDLE_WORLD_DEVICE_IDS": ",".join(res),
}
if len(t.accelerators) > 0 and pod.device_mode == DeviceMode.GPU:
proc_env["FLAGS_selected_gpus"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
elif len(t.
accelerators) > 0 and pod.device_mode == DeviceMode.ASCEND_NPU:
proc_env["FLAGS_selected_npus"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
if len(t.accelerators) > 0:
proc_env["FLAGS_selected_accelerators"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
# to do: same code style in future
if fluid.core.is_compiled_with_xpu() and len(t.accelerators) > 0:
proc_env["FLAGS_selected_xpus"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
current_env.update(proc_env)
coverage_args = []
if run_with_coverage():
coverage_args = ["-m", "coverage", "run", "--branch", "-p"]
cmd = [sys.executable, "-u"] + coverage_args + [training_script
] + training_script_args
logger.debug("start trainer proc{} env:{}".format(cmd, current_env))
if idx == 0:
logger.info("Local start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.trainers),
pretty_print_envs(proc_env, ("Distributed Envs",
"Value"))))
logger.info(
"details about PADDLE_TRAINER_ENDPOINTS can be found in "
"{}/endpoints.log, and detail running logs maybe found in "
"{}/workerlog.0".format(log_dir, log_dir))
fn = None
pre_fn = None if os.name == 'nt' else os.setsid
if log_dir is not None:
os.system("mkdir -p {}".format(log_dir))
if os.path.exists("%s/endpoints.log" % log_dir):
os.system("rm -f {}/endpoints.log".format(log_dir))
with open("%s/endpoints.log" % log_dir, "w") as f:
f.write("PADDLE_TRAINER_ENDPOINTS: \n")
f.write("\n".join(cluster.trainers_endpoints()))
fn = open("%s/workerlog.%d" % (log_dir, idx), "a")
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn, preexec_fn=pre_fn)
else:
proc = subprocess.Popen(cmd, env=current_env, preexec_fn=pre_fn)
tp = TrainerProc()
tp.proc = proc
tp.rank = t.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
procs.append(tp)
return procs
def pull_worker_log(tp):
if tp.log_fn:
with open(tp.log_fn.name, 'r') as fin:
fin.seek(tp.log_offset, 0)
for line in fin:
try:
sys.stdout.write(line)
except UnicodeEncodeError:
sys.stdout.write(
'UnicodeEncodeError occurs at this line. '
'Please refer to the original log file "%s"\n' %
tp.log_fn.name)
tp.log_offset = fin.tell()
def watch_local_trainers(procs, nranks):
try:
error = False
error_rank = []
# wait all process finish or one error
alive = False
for p in procs:
if p.log_fn and p.local_rank == 0:
pull_worker_log(p)
ret = p.proc.poll()
if ret is None:
alive = True
elif ret != 0:
error = True
error_rank.append(p.rank)
if error:
terminate_local_procs(procs)
exit(1)
except KeyboardInterrupt:
logger.warning("KeyboardInterrupt, exit")
terminate_local_procs(procs)
return
except SystemExit:
logger.error(
"ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".
format(nranks, error_rank))
terminate_local_procs(procs)
return
except:
logger.error(
"ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".
format(nranks, error_rank))
terminate_local_procs(procs)
return
return alive
def get_gpus(gpus):
if gpus is None:
gpus_num = fluid.core.get_cuda_device_count()
res_gpus = [str(x) for x in range(0, gpus_num)]
else:
cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
if cuda_visible_devices is None or cuda_visible_devices == "":
res_gpus = [x.strip() for x in gpus.split(',')]
else:
# change gpus into relative values
# e.g. CUDA_VISIBLE_DEVICES=4,5,6,7; args.gpus=4,5,6,7;
# therefore gpus=0,1,2,3
cuda_visible_devices_list = cuda_visible_devices.split(',')
for x in gpus.split(','):
assert x in cuda_visible_devices_list, "Can't find "\
"your gpus %s in CUDA_VISIBLE_DEVICES[%s]."\
% (x, cuda_visible_devices)
res_gpus = [
cuda_visible_devices_list.index(x.strip())
for x in gpus.split(',')
]
logger.info("Change selected_gpus into reletive values. --ips:{} "
"will change into relative_ips:{} according to your "
"CUDA_VISIBLE_DEVICES:{}".format(
gpus, res_gpus, cuda_visible_devices_list))
return res_gpus
def get_xpus(xpus):
if xpus is None:
xpus_num = fluid.core.get_xpu_device_count()
res_xpus = [str(x) for x in range(0, xpus_num)]
else:
xpu_visible_devices = os.getenv("XPU_VISIBLE_DEVICES")
if xpu_visible_devices is None or xpu_visible_devices == "":
res_xpus = [x.strip() for x in xpus.split(',')]
else:
# change xpus into relative values
# e.g. XPU_VISIBLE_DEVICES=4,5,6,7; args.xpus=4,5,6,7;
# therefore xpus=0,1,2,3
xpu_visible_devices_list = xpu_visible_devices.split(',')
for x in xpus.split(','):
assert x in xpu_visible_devices_list, "Can't find "\
"your xpus %s in XPU_VISIBLE_DEVICES[%s]."\
% (x, xpu_visible_devices)
res_xpus = [
xpu_visible_devices_list.index(x.strip())
for x in xpus.split(',')
]
logger.info("Change selected_xpus into reletive values. --ips:{} "
"will change into relative_ips:{} according to your "
"XPU_VISIBLE_DEVICES:{}".format(
xpus, res_xpus, xpu_visible_devices_list))
return res_xpus
def get_device_mode(backend):
if fluid.core.is_compiled_with_npu() and \
fluid.core.get_npu_device_count() > 0:
print("launch train in ascend npu mode!")
return DeviceMode.ASCEND_NPU
if backend == 'nccl' and \
fluid.core.get_cuda_device_count() > 0:
print("launch train in GPU mode!")
return DeviceMode.GPU
if backend == 'bkcl' and fluid.core.get_xpu_device_count() > 0:
print("launch train in XPU mode")
return DeviceMode.XPU
if backend == 'gloo':
print("launch train in CPU mode")
return DeviceMode.CPU
raise RuntimeError("Don't supported devices")
def get_device_proc_info(args):
# device_mode
device_mode = get_device_mode(args.backend)
# devices
devices_per_proc = []
if device_mode == DeviceMode.GPU:
gpus = get_gpus(args.gpus)
if args.nproc_per_node is not None:
assert (len(gpus) % int(args.nproc_per_node)) ==0, \
"gpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(gpus), args.nproc_per_node)
n = int(len(gpus) / int(args.nproc_per_node))
devices_per_proc = [
gpus[i:i + n] for i in six.moves.range(0, len(gpus), n)
]
else:
devices_per_proc = gpus
elif device_mode == DeviceMode.ASCEND_NPU:
devices_per_proc = None
elif device_mode == DeviceMode.XPU:
xpus = get_xpus(args.xpus)
if args.nproc_per_node is not None:
assert (len(xpus) % int(args.nproc_per_node)) == 0, \
"xpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(xpus), args.nproc_per_node)
n = int(len(xpus) / int(args.nproc_per_node))
devices_per_proc = [
xpus[i:i + n] for i in six.moves.range(0, len(xpus), n)
]
else:
devices_per_proc = xpus
elif device_mode == DeviceMode.CPU:
if hasattr(args, "paddle_cpuonly") and args.nproc_per_node is None:
#NOTE (xiongkun03) set it to cpu core number
args.nproc_per_node = multiprocessing.cpu_count()
if args.nproc_per_node is None:
devices_per_proc = [0]
else:
devices_per_proc = [x for x in range(0, args.nproc_per_node)]
else:
assert False, "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format(
device_mode)
return (device_mode, devices_per_proc)
def direct_start(args):
# run ps-cpu mode on paddlecloud, using given envs
cmd = [sys.executable, "-u", args.training_script] + \
args.training_script_args
proc = subprocess.Popen(cmd)
proc.wait()
return
def get_custom_endpoints(origin_endpoints, offset=0):
"""
origin_endpoint: ip:port
user_define_endpoint: ip:(port+offset)
"""
assert origin_endpoints != None
paddle_user_define_endpoints_list = []
for ip_port in origin_endpoints.split(","):
ip = ip_port.split(":")[0]
port = ip_port.split(":")[1]
new_port = int(port) + offset
paddle_user_define_endpoints_list.append(":".join((ip, str(new_port))))
paddle_user_define_endpoints = ",".join(paddle_user_define_endpoints_list)
return paddle_user_define_endpoints
#def cloud_ps_heter_env_set(args):
# environs = {}
#
# paddle_trainer_endpoints = os.getenv("TRAINER_IP_PORT_LIST", "")
# assert paddle_trainer_endpoints != None
#
# paddle_pserver_endpoints = os.getenv("PSERVER_IP_PORT_LIST", "")
# assert paddle_pserver_endpoints != None
#
# # hard code for paddlecloud custom-framework
# avilable_ports = os.getenv("TRAINER_PORTS", "").split(",")
# assert len(
# avilable_ports
# ) >= 2, "set paddle_ports_num >= 2 in config.ini for paddlecloud job submit"
#
# # hard code for paddlecloud custom-framework
# trainers_num = len(paddle_pserver_endpoints.split(","))
# assert trainers_num != 0
# environs["PADDLE_TRAINERS_NUM"] = trainers_num
# environs["TRAINERS_NUM"] = trainers_num
#
# # hard code for paddlecloud custom-framework
# environs["PADDLE_HETER_TRAINER_IP_PORT_LIST"] = paddle_trainer_endpoints
# environs["PADDLE_PSERVERS_IP_PORT_LIST"] = paddle_pserver_endpoints
# environs["PADDLE_TRAINER_ENDPOINTS"] = get_custom_endpoints(
# paddle_pserver_endpoints, 1)
# heter_worker_num = len(paddle_trainer_endpoints.split(","))
# if (args.heter_worker_num != None) and (
# heter_worker_num != args.heter_worker_num):
# warnings.warn(
# "Your fleetrun setting: heter_worker_num is {}, but we find {} device can be used, this setting has been changed.".
# format(args.heter_worker_num, heter_worker_num))
# args.heter_worker_num = heter_worker_num
#
# for k, v in environs.items():
# os.environ[k] = str(v)
# logger.info("Set heter parameter server env: {}".format(
# pretty_print_envs(environs)))
def get_mapped_cluster(node_ips, node_ip, trainer_endpoints, device_mode,
node_mapping_ranks):
assert type(trainer_endpoints) is list, "trainer_endpoints must be list"
assert device_mode == DeviceMode.GPU, \
"Only support get mapped cluster for gpu now."
cluster = Cluster(hdfs=None)
for node_rank, ip in enumerate(node_ips):
pod = Pod()
pod.rank = node_rank
pod.addr = ip
pod.device_mode = device_mode
cur_node_endpoints = trainer_endpoints[node_rank]
# choose rank from global mapped ranks and set it to the trainer.
ranks_per_node = node_mapping_ranks[node_rank]
for i in range(len(ranks_per_node)):
trainer = Trainer()
# change global rank(mapped) to local rank within each node.
# e.g. mapped ranks of node: 3,4,7 -> 0,1,2
local_rank = ranks_per_node.index(ranks_per_node[i])
trainer.accelerators.append(local_rank)
trainer.endpoint = "%s" % (cur_node_endpoints[i])
# global mapped ranks
trainer.rank = ranks_per_node[i]
pod.trainers.append(trainer)
cluster.pods.append(pod)
pod_rank = node_ips.index(node_ip)
return cluster, cluster.pods[pod_rank]
def get_mapped_cluster_from_args(args, device_mode):
assert device_mode == DeviceMode.GPU, \
"Only support get mapped cluster for gpu now."
gpus_num = fluid.core.get_cuda_device_count()
# parse ip-ranks json file
json_data = None
with args.rank_mapping_file as json_file:
json_data = json.load(json_file)
node_ips = []
node_ranks_mapping = []
ip_ranks_list = json_data['ip_ranks']
for ip_ranks in ip_ranks_list:
node_ips.append(ip_ranks['ip'])
node_ranks_mapping.append(ip_ranks['ranks'])
if len(node_ips) == 1:
node_ip = node_ips[0]
else:
if args.host:
node_ip = args.host
else:
_, node_ip = get_host_name_ip()
assert node_ip in node_ips, \
"Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips)
node_rank = node_ips.index(node_ip)
assert len(node_ranks_mapping[node_rank]) <= gpus_num, \
"number of ranks mapped to one node should not exceed the avaiable ones."
assert len(node_ranks_mapping) == len(node_ips), \
"ranks length should be equal to ips length."
logger.debug("parsed from args: node_ips:{} node_ip:{} "
"node_rank:{} node_ranks_mapping:{}".format(
node_ips, node_ip, node_rank, node_ranks_mapping[
node_rank]))
# NOTE: there are different number of global mapped ranks on each node.
free_ports = []
trainer_endpoints = []
for ip in node_ips:
node_rank = node_ips.index(ip)
if os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(start_port, start_port + len(node_ranks_mapping[
node_rank]))
]
else:
free_ports = find_free_ports(len(node_ranks_mapping[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
return get_mapped_cluster(node_ips, node_ip, trainer_endpoints, device_mode,
node_ranks_mapping)
class ParameterServerLauncher(object):
def __init__(self, args, distribute_mode):
self.args = args
self.distribute_mode = distribute_mode
self.server_num = 0
self.worker_num = 0
self.heter_worker_num = 0
self.server_endpoints = ""
self.server_endpoints_ips = []
self.server_endpoints_port = []
self.worker_endpoints = ""
self.worker_endpoints_ips = []
self.worker_endpoints_port = []
self.heter_worker_endpoints = ""
self.heter_worker_endpoints_ips = []
self.heter_worker_endpoints_port = []
self.is_local = True
self.current_node_ip = ""
self.stage_trainer_num = []
self.stage_heter_map = {}
self.stage_list = []
self.stage_device_map = {}
self.stage_num = 0
self.get_role_endpoints(args)
def get_role_endpoints(self, args):
if args.server_num:
self.server_num = args.server_num
if args.servers:
assert len(
args.servers.split(",")
) == self.server_num, "The server_num and servers doesn't match. Expect servers endpoints num epual to server_num, but received servers enpoint num: {} and server_num {}".format(
len(args.servers.split(",")), self.server_num)
self.server_endpoints = args.servers
else:
ports = get_ports(self.server_num, 0)
self.server_endpoints = ",".join(
["127.0.0.1:" + str(x) for x in ports])
else:
assert args.servers != "", "The setting of Parameter-Server must has server_num or servers."
self.server_endpoints = args.servers
self.server_num = len(self.server_endpoints.split(","))
# get worker envs
if args.worker_num:
self.worker_num = args.worker_num
if args.workers:
assert len(
args.workers.split(",")
) == self.worker_num, "The worker_num and workers doesn't match. Expect workers endpoints num epual to worker_num, but received workers enpoint num: {} and worker_num {}".format(
len(args.workers.split(",")), self.worker_num)
self.worker_endpoints = args.workers
else:
ports = get_ports(self.worker_num, self.server_num)
self.worker_endpoints = ",".join(
["127.0.0.1:" + str(x) for x in ports])
else:
assert args.workers != "", "The setting of Parameter-Server must has worker_num or workers."
worker_endpoints_ips = [
x.strip().split(":")[0] for x in args.workers.split(",")
]
self.worker_num = len(worker_endpoints_ips)
worker_endpoints_len = [
len(x.strip().split(":")) for x in args.workers.split(",")
]
if 1 in worker_endpoints_len:
# if no port value in worker_endpoints, will set default port values.
start_port = 6170
worker_endpoints_port = range(
start_port + self.server_num,
start_port + self.server_num + self.worker_num, 1)
# create endpoints str
worker_endpoints = []
for i in range(self.worker_num):
worker_endpoints.append(":".join((worker_endpoints_ips[
i], str(worker_endpoints_port[i]))))
self.worker_endpoints = ",".join(worker_endpoints)
else:
self.worker_endpoints = args.workers
# get heter worker envs
if self.distribute_mode == DistributeMode.PS_HETER:
assert args.heter_devices != "", "The setting of Parameter-Server heter mode must has heter_devices."
self.stage_device_map[1] = "cpu" # for cpu trainer
heter_devices_list = args.heter_devices.split(";")
for i in range(len(heter_devices_list)):
self.stage_device_map[i + 2] = heter_devices_list[i]
self.stage_heter_map[1] = self.worker_endpoints
if args.heter_worker_num:
self.stage_heter_trainer_num = args.heter_worker_num.split(";")
self.stage_heter_trainer_num = [
int(trainer_num)
for trainer_num in self.stage_heter_trainer_num
]
if args.heter_workers:
assert len(args.heter_workers.split(";")) == len(
self.stage_heter_trainer_num
), "The stage_num and heter_workers doesn't match. Expect heter_workers endpoints stage num epual to heter_worker_num stage, but received heter_workers enpoint stage num: {} and heter_worker_num stage {}".format(
len(args.heter_workers.split(";")),
len(self.stage_heter_trainer_num))
heter_worker_endpoints_list = args.heter_workers.split(";")
self.heter_worker_endpoints = ""
for i in range(len(self.stage_heter_trainer_num)):
if self.heter_worker_endpoints != "":
self.heter_worker_endpoints += ","
heter_worker_endpoints = heter_worker_endpoints_list[
i].split(",")
assert len(
heter_worker_endpoints
) == self.stage_heter_trainer_num[
i], "The heter trainer num in stage {} is not equal in args.heter_worker_num and args.heter_workers".format(
i)
heter_worker_endpoints_ips = [
x.strip().split(":")[0]
for x in heter_worker_endpoints
]
heter_worker_endpoints_len = [
len(x.strip().split(":"))
for x in heter_worker_endpoints
]
if 1 in heter_worker_endpoints_len:
# if no port value in heter_worker_endpoint, will set default port values.
heter_worker_endpoints_port = get_ports(
len(heter_worker_endpoints_ips), self.worker_num
+ self.server_num + self.heter_worker_num)
new_heter_worker_endpoints = []
for j in range(len(heter_worker_endpoints_ips)):
new_heter_worker_endpoints.append(":".join((
heter_worker_endpoints_ips[j], str(
heter_worker_endpoints_port[j]))))
ip_port_list = ",".join(new_heter_worker_endpoints)
else:
ip_port_list = ",".join(heter_worker_endpoints)
self.stage_heter_map[i + 2] = ip_port_list
self.stage_list.extend([i + 2] *
len(ip_port_list.split(',')))
self.heter_worker_num += self.stage_heter_trainer_num[i]
self.heter_worker_endpoints += ip_port_list
else:
for i in range(len(self.stage_heter_trainer_num)):
heter_trainer_num = self.stage_heter_trainer_num[i]
ports = get_ports(heter_trainer_num,
self.server_num + self.worker_num +
self.heter_worker_num)
ip_port_list = ",".join(
["127.0.0.1:" + str(x) for x in ports])
self.stage_heter_map[i + 2] = ip_port_list
self.stage_list.extend([i + 2] *
len(ip_port_list.split(',')))
self.heter_worker_num += heter_trainer_num
if self.heter_worker_endpoints != "":
self.heter_worker_endpoints += ","
self.heter_worker_endpoints += ip_port_list
else:
assert args.heter_workers != "", "The setting of Parameter-Server heter mode must has heter_worker_num or heter_workers."
self.stage_heter_trainer_num = []
heter_worker_endpoints_list = args.heter_workers.split(";")
self.heter_worker_endpoints = ""
for i in range(len(heter_worker_endpoints_list)):
heter_worker_endpoints = heter_worker_endpoints_list[
i].split(",")
self.stage_heter_trainer_num.append(
len(heter_worker_endpoints))
heter_worker_endpoints_ips = [
x.strip().split(":")[0] for x in heter_worker_endpoints
]
heter_worker_endpoints_len = [
len(x.strip().split(":"))
for x in heter_worker_endpoints
]
if 1 in heter_worker_endpoints_len:
# if no port value in heter_worker_endpoint, will set default port values.
heter_worker_endpoints_port = get_ports(
len(heter_worker_endpoints_ips), self.worker_num +
self.server_num + self.heter_worker_num)
new_heter_worker_endpoints = []
for j in range(len(heter_worker_endpoints_ips)):
new_heter_worker_endpoints.append(":".join((
heter_worker_endpoints_ips[j], str(
heter_worker_endpoints_port[j]))))
ip_port_list = ",".join(new_heter_worker_endpoints)
else:
ip_port_list = ",".join(heter_worker_endpoints)
self.stage_heter_map[i + 2] = ip_port_list
self.stage_list.extend([i + 2] *
len(ip_port_list.split(',')))
self.heter_worker_num += self.stage_heter_trainer_num[-1]
if self.heter_worker_endpoints != "":
self.heter_worker_endpoints += ","
self.heter_worker_endpoints += ip_port_list
self.stage_trainer_num = [self.worker_num
] + self.stage_heter_trainer_num
self.stage_num = len(self.stage_trainer_num)
# get http_port
if args.http_port:
http_port = [args.http_port]
else:
http_port = get_ports(
1, self.server_num + self.worker_num + self.heter_worker_num)
http_ip = self.server_endpoints.split(",")[0].split(":")[0]
self.http_port = http_ip + ":" + str(http_port[0])
# check local or user define
self.server_endpoints_ips = [
x.strip().split(":")[0] for x in self.server_endpoints.split(",")
]
self.worker_endpoints_ips = [
x.strip().split(":")[0] for x in self.worker_endpoints.split(",")
]
self.server_endpoints_port = [
x.strip().split(":")[1] for x in self.server_endpoints.split(",")
]
self.worker_endpoints_port = [
x.strip().split(":")[1] for x in self.worker_endpoints.split(",")
]
self.node_ips = []
for ip in self.server_endpoints_ips:
if ip not in self.node_ips:
self.node_ips.append(ip)
for ip in self.worker_endpoints_ips:
if ip not in self.node_ips:
self.node_ips.append(ip)
if self.distribute_mode == DistributeMode.PS_HETER:
self.heter_worker_endpoints_ips = [
x.strip().split(":")[0]
for x in self.heter_worker_endpoints.split(",")
]
self.heter_worker_endpoints_port = [
x.strip().split(":")[1]
for x in self.heter_worker_endpoints.split(",")
]
for ip in self.heter_worker_endpoints_ips:
if ip not in self.node_ips:
self.node_ips.append(ip)
if len(set(self.node_ips)) == 1:
self.is_local = True
self.current_node_ip = self.node_ips[0]
else:
self.is_local = False
pod_ip = os.getenv("POD_IP", None)
if pod_ip == None:
_, self.current_node_ip = get_host_name_ip()
else:
self.current_node_ip = pod_ip
if not self.distribute_mode == DistributeMode.PS_HETER:
assert self.current_node_ip in self.node_ips, "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" \
% (self.current_node_ip, self.node_ips)
if self.current_node_ip in self.node_ips:
self.node_rank = self.node_ips.index(self.current_node_ip)
logger.debug(
"parsed from args: node_ips:{} current_node_ip:{} node_rank:{}".
format(self.node_ips, self.current_node_ip, self.node_rank))
def start_ps(self):
if not self.current_node_ip in self.node_ips:
return
cluster = Cluster(hdfs=None)
server_rank = 0
worker_rank = 0
heter_worker_rank = 0
for node_rank, ip in enumerate(self.node_ips):
pod = Pod()
pod.rank = node_rank
pod.addr = ip
for i in range(len(self.server_endpoints_ips)):
if ip == self.server_endpoints_ips[i]:
server = Trainer()
server.endpoint = "%s:%s" % (ip,
self.server_endpoints_port[i])
server.rank = server_rank
server_rank += 1
pod.servers.append(server)
for j in range(len(self.worker_endpoints_ips)):
if ip == self.worker_endpoints_ips[j]:
worker = Trainer()
worker.endpoint = "%s:%s" % (ip,
self.worker_endpoints_port[j])
worker.rank = worker_rank
worker.stage = 1
worker_rank += 1
pod.workers.append(worker)
for k in range(len(self.heter_worker_endpoints_ips)):
if ip == self.heter_worker_endpoints_ips[k]:
heter_worker = Trainer()
heter_worker.endpoint = "%s:%s" % (
ip, self.heter_worker_endpoints_port[k])
heter_worker.rank = heter_worker_rank
heter_worker.stage = self.stage_list[k]
heter_worker_rank += 1
pod.heter_workers.append(heter_worker)
cluster.pods.append(pod)
pod = cluster.pods[self.node_rank]
self.gloo_rendezvous_dir = tempfile.mkdtemp()
# 3. subproces start
self.procs = {"worker": [], "server": [], "heter_worker": []}
self.cmds = {"worker": [], "server": [], "heter_worker": []}
self.log_fns = {"worker": [], "server": [], "heter_worker": []}
self.start_pod_server(self.args, pod)
self.start_pod_worker(self.args, pod)
if self.distribute_mode == DistributeMode.PS_HETER:
self.start_pod_heter_worker(self.args, pod)
logger.info(
"Please check servers, workers and heter_worker logs in {}/workerlog.*, {}/serverlog.* and {}/heterlog.*".
format(self.args.log_dir, self.args.log_dir, self.args.log_dir))
# 4. wait for finish training
if len(self.procs["worker"]) > 0:
# if node has worker procs
# only wait worker to finish here
for i, proc in enumerate(self.procs["worker"]):
self.procs["worker"][i].proc.wait()
if len(self.log_fns["worker"]) > 0:
self.log_fns["worker"][i].close()
logger.info(
"all workers exit, going to finish parameter server and heter_worker."
)
if len(self.procs["heter_worker"]) > 0:
for i, proc in enumerate(self.procs["heter_worker"]):
self.log_fns["heter_worker"][i].close()
self.procs["heter_worker"][i].proc.terminate()
logger.info("all heter_worker are killed")
if len(self.procs["server"]) > 0:
for i, proc in enumerate(self.procs["server"]):
self.log_fns["server"][i].close()
self.procs["server"][i].proc.terminate()
logger.info("all parameter server are killed")
else:
# if node has not worker procs
# blocking training process
if len(self.procs["server"]) > 0:
for i, proc in enumerate(self.procs["server"]):
self.procs["server"][i].proc.wait()
if len(self.procs["heter_worker"]) > 0:
for i, proc in enumerate(self.procs["heter_worker"]):
self.procs["heter_worker"][i].proc.wait()
if os.path.exists(self.gloo_rendezvous_dir):
shutil.rmtree(self.gloo_rendezvous_dir)
def start_pod_server(self, args, pod):
default_env = os.environ.copy()
current_env = copy.copy(default_env)
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
for idx, cur_server in enumerate(pod.servers):
if self.distribute_mode == DistributeMode.PS_HETER:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST":
self.heter_worker_endpoints,
"PADDLE_PORT": cur_server.endpoint.split(":")[1],
"TRAINING_ROLE": "PSERVER",
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"POD_IP": cur_server.endpoint.split(":")[0],
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
else:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_PORT": cur_server.endpoint.split(":")[1],
"TRAINING_ROLE": "PSERVER",
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"POD_IP": cur_server.endpoint.split(":")[0],
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
self.cmds["server"].append(cmd)
if idx == 0:
logger.info(
"Local server start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.servers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"
))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/serverlog.%d" % (args.log_dir, idx), "w")
self.log_fns["server"].append(fn)
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_server.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
self.procs["server"].append(tp)
def start_pod_worker(self, args, pod):
default_env = os.environ.copy()
current_env = copy.copy(default_env)
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
heter_device_num = 0
device_list = []
if fluid.core.is_compiled_with_cuda():
device_list = get_gpus(args.gpus)
heter_device_num = len(device_list)
elif fluid.core.is_compiled_with_xpu():
heter_device_num = fluid.core.get_xpu_device_count()
device_list = [str(x) for x in range(0, heter_device_num)]
for idx, cur_worker in enumerate(pod.workers):
device_id = "0" if heter_device_num == 0 else str(device_list[(
idx) % heter_device_num])
if self.distribute_mode == DistributeMode.PS_HETER:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num),
"STAGE_ID": "1",
"STAGE_NUM": str(self.stage_num),
"PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": "",
"PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST":
self.stage_heter_map[2],
"PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST":
self.heter_worker_endpoints,
"HETER_DEVICE_TYPE": self.stage_device_map[1],
"TRAINING_ROLE": "TRAINER",
"POD_IP": cur_worker.endpoint.split(":")[0],
"PADDLE_PORT": cur_worker.endpoint.split(":")[1],
"PADDLE_TRAINER_ID": str(cur_worker.rank),
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"FLAGS_selected_gpus": "0",
"FLAGS_selected_xpus": "0",
"CUDA_VISIBLE_DEVICES": device_id,
"XPU_VISIBLE_DEVICES": device_id,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
else:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"TRAINING_ROLE": "TRAINER",
"POD_IP": cur_worker.endpoint.split(":")[0],
"PADDLE_PORT": cur_worker.endpoint.split(":")[1],
"PADDLE_TRAINER_ID": str(cur_worker.rank),
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"FLAGS_selected_gpus": "0",
"FLAGS_selected_xpus": "0",
"CUDA_VISIBLE_DEVICES": device_id,
"XPU_VISIBLE_DEVICES": device_id,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
self.cmds["worker"].append(cmd)
if idx == 0:
logger.info(
"Local worker start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.workers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"
))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/workerlog.%d" % (args.log_dir, idx), "w")
self.log_fns["worker"].append(fn)
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_worker.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
self.procs["worker"].append(tp)
def start_pod_heter_worker(self, args, pod):
default_env = os.environ.copy()
current_env = copy.copy(default_env)
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
heter_device_num = 0
device_list = []
if fluid.core.is_compiled_with_cuda():
device_list = get_gpus(args.gpus)
heter_device_num = len(device_list)
elif fluid.core.is_compiled_with_xpu():
heter_device_num = fluid.core.get_xpu_device_count()
device_list = [str(x) for x in range(0, heter_device_num)]
for idx, cur_heter_worker in enumerate(pod.heter_workers):
device_id = "0" if heter_device_num == 0 else str(device_list[(
idx) % heter_device_num])
stage_id = cur_heter_worker.stage
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST":
self.stage_heter_map[stage_id + 1]
if stage_id <= self.stage_num - 1 else "",
"PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST":
self.stage_heter_map[stage_id - 1],
"PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST":
self.heter_worker_endpoints,
"HETER_DEVICE_TYPE": self.stage_device_map[stage_id],
"STAGE_ID": str(stage_id),
"STAGE_NUM": str(self.stage_num),
"PADDLE_PORT": cur_heter_worker.endpoint.split(":")[1],
"TRAINING_ROLE": "HETER_TRAINER",
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num),
"POD_IP": cur_heter_worker.endpoint.split(":")[0],
"PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"FLAGS_selected_gpus": "0",
"FLAGS_selected_xpus": "0",
"CUDA_VISIBLE_DEVICES": device_id,
"XPU_VISIBLE_DEVICES": device_id,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
self.cmds["heter_worker"].append(cmd)
if idx == 0:
logger.info(
"Local heter_worker start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.heter_workers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"
))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/heterlog.%d" % (args.log_dir, idx), "w")
self.log_fns["heter_worker"].append(fn)
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_heter_worker.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
self.procs["heter_worker"].append(tp)
def check_backend(backend):
if backend not in ['nccl', 'gloo', 'bkcl', 'auto']:
raise ValueError(
"paddle.distributed initialize error, "
"backend argument can only be one of 'nccl', 'gloo', 'bkcl', 'auto', but got %s"
% backend)
if backend == 'nccl' and not fluid.core.is_compiled_with_cuda():
raise ValueError(
"paddle.distributed initialize error, "
"your paddle is not compiled with cuda but you assign 'nccl' as backend."
)
if backend == 'bkcl' and not fluid.core.is_compiled_with_xpu():
raise ValueError(
"paddle.distributed initialize error, "
"your paddle is not compiled with xpu but you assign 'bkcl' as backend."
)
def block_windows_and_macos(backend):
if backend != 'gloo': return
if utils.OS_NAME.startswith('darwin'): # MACOS , block
raise ValueError(
"You are going to using gloo on macos, but currently is not supported"
)
if utils.IS_WINDOWS: # MACOS , block
raise ValueError(
"You are going to using gloo on windows, but currently is not supported"
)
def get_backend_by_compile_flag():
if fluid.core.is_compiled_with_cuda():
return 'nccl'
if fluid.core.is_compiled_with_xpu():
return 'bkcl'
return 'gloo'
| 38.752066 | 232 | 0.552537 |
import logging
import time
import os
import signal
import copy
import sys
import subprocess
import tempfile
import shutil
from contextlib import closing
import multiprocessing
import socket
import warnings
import six
import struct
import json
import paddle
import paddle.fluid as fluid
from distutils.util import strtobool
import paddle.utils.cpp_extension.extension_utils as utils
logger = logging.getLogger("root")
logger.propagate = False
class DistributeMode():
COLLECTIVE = 0
PS = 1
PS_HETER = 2
class DeviceMode():
UNKNOWN = -1
CPU = 0
GPU = 1
KUNLUN = 2
XPU = 2
ASCEND_NPU = 3
UNKNOWN = 3
class Cluster(object):
def __init__(self, hdfs):
self.job_server = None
self.pods = []
self.hdfs = None
self.job_stage_flag = None
def __str__(self):
return "job_server:{} pods:{} job_stage_flag:{} hdfs:{}".format(
self.job_server, [str(pod) for pod in self.pods],
self.job_stage_flag, self.hdfs)
def __eq__(self, cluster):
if len(self.pods) != len(cluster.pods):
return False
for a, b in zip(self.pods, cluster.pods):
if a != b:
return False
if self.job_stage_flag != cluster.job_stage_flag:
return False
return True
def __ne__(self, cluster):
return not self.__eq__(cluster)
def update_pods(self, cluster):
self.pods = copy.copy(cluster.pods)
def trainers_nranks(self):
return len(self.trainers_endpoints())
def pods_nranks(self):
return len(self.pods)
def trainers_endpoints(self):
r = []
for pod in self.pods:
for t in pod.trainers:
r.append(t.endpoint)
return r
def world_device_ids(self):
r = []
for pod in self.pods:
for t in pod.trainers:
str_accelerators = [str(acc) for acc in t.accelerators]
r.append(str_accelerators)
return r
def pods_endpoints(self):
r = []
for pod in self.pods:
ep = "{}:{}".format(pod.addr, pod.port)
assert pod.port != None and pod.addr != None, "{} not a valid endpoint".format(
ep)
r.append(ep)
return r
def get_pod_by_id(self, pod_id):
for pod in self.pods:
if str(pod_id) == str(pod.id):
return pod
return None
class JobServer(object):
def __init__(self):
self.endpoint = None
def __str__(self):
return "{}".format(self.endpoint)
def __eq__(self, j):
return self.endpint == j.endpoint
def __ne__(self, j):
return not self == j
class Trainer(object):
def __init__(self):
self.accelerators = []
self.endpoint = None
self.rank = None
self.stage = None
def __str__(self):
return "accelerator:{} endpoint:{} rank:{}".format(
self.accelerators, self.endpoint, self.rank)
def __eq__(self, t):
if len(self.accelerators) != len(t.accelerators):
return False
if self.endpoint != t.endpoint or \
self.rank != t.rank:
return False
for a, b in zip(self.accelerators, t.accelerators):
if a != b:
return False
return True
def __ne__(self, t):
return not self == t
def rank(self):
return self.rank
class Pod(object):
def __init__(self):
self.rank = None
self.id = None
self.addr = None
self.port = None
self.trainers = []
self.servers = []
self.workers = []
self.heter_workers = []
self.accelerators = []
self.device_mode = None
def __str__(self):
return "rank:{} id:{} addr:{} port:{} visible_accelerator:{} trainers:{} servers:{} \
workers:{} heter_workers:{}".format(
self.rank, self.id, self.addr, self.port, self.accelerators, [
str(t) for t in self.trainers
], [str(s) for s in self.servers], [str(w) for w in self.workers],
[str(h) for h in self.heter_workers])
def __eq__(self, pod):
if self.rank != pod.rank or \
self.id != pod.id or \
self.addr != pod.addr or \
self.port != pod.port:
logger.debug("pod {} != {}".format(self, pod))
return False
if len(self.trainers) != len(pod.trainers):
logger.debug("trainers {} != {}".format(self.trainers,
pod.trainers))
return False
for i in range(len(self.trainers)):
if self.trainers[i] != pod.trainers[i]:
logger.debug("trainer {} != {}".format(self.trainers[i],
pod.trainers[i]))
return False
if len(self.servers) != len(pod.servers):
logger.debug("servers {} != {}".format(self.servers, pod.servers))
return False
for i in range(len(self.servers)):
if self.servers[i] != pod.servers[i]:
logger.debug("servers {} != {}".format(self.servers[i],
pod.servers[i]))
return False
if len(self.workers) != len(pod.workers):
logger.debug("workers {} != {}".format(self.workers, pod.workers))
return False
for i in range(len(self.workers)):
if self.workers[i] != pod.workers[i]:
logger.debug("workers {} != {}".format(self.workers[i],
pod.workers[i]))
return False
return True
def __ne__(self, pod):
return not self == pod
def parse_response(self, res_pods):
pass
def rank(self):
return self.rank
def get_visible_accelerators(self):
r = ""
for g in self.accelerators:
r += "{},".format(g)
assert r != "", "this pod {} can't see any accelerators".format(self)
r = r[:-1]
return r
def get_logger(log_level=20, name="root"):
logger = logging.getLogger(name)
logger.setLevel(log_level)
log_handler = logging.StreamHandler()
log_format = logging.Formatter(
'%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
return logger
def get_cluster(node_ips, node_ip, trainer_endpoints, device_mode,
devices_per_proc):
assert type(trainer_endpoints) is list, "trainer_endpoints must be list"
cluster = Cluster(hdfs=None)
trainer_rank = 0
for node_rank, ip in enumerate(node_ips):
pod = Pod()
pod.rank = node_rank
pod.addr = ip
pod.device_mode = device_mode
cur_node_endpoints = trainer_endpoints[node_rank]
# when use paddlecloud, endpoints may > devices_per_proc(user_defined)
assert len(cur_node_endpoints) >= len(
devices_per_proc
), "current trainer_endpoints size should be greater equal than acclerators size."
for i in range(len(devices_per_proc)):
trainer = Trainer()
if device_mode == DeviceMode.GPU or device_mode == DeviceMode.ASCEND_NPU:
if isinstance(devices_per_proc[i], (list, tuple)):
trainer.accelerators.extend(devices_per_proc[i])
pod.accelerators.extend(devices_per_proc[i])
else:
trainer.accelerators.append(devices_per_proc[i])
pod.accelerators.append(devices_per_proc[i])
elif device_mode == DeviceMode.XPU:
if isinstance(devices_per_proc[i], (list, tuple)):
trainer.accelerators.extend(devices_per_proc[i])
else:
trainer.accelerators.append(devices_per_proc[i])
trainer.endpoint = "%s" % (cur_node_endpoints[i])
trainer.rank = trainer_rank
trainer_rank += 1
pod.trainers.append(trainer)
cluster.pods.append(pod)
pod_rank = node_ips.index(node_ip)
return cluster, cluster.pods[pod_rank]
def terminate_local_procs(procs):
# try to terminate process by group, this happend in multiprocess senario in user process
if os.name != 'nt':
for p in procs:
if p.proc.poll() is None:
os.killpg(os.getpgid(p.proc.pid), signal.SIGTERM)
if p.log_fn:
p.log_fn.close()
logger.info("terminate process group gid:{}".format(p.proc.pid))
time.sleep(1)
for p in procs:
if p.proc.poll() is None:
p.proc.terminate()
if p.log_fn:
p.log_fn.close()
logger.debug("terminate process id:{}".format(p.proc.pid))
# wait all process terminiated
time.sleep(3)
for step in range(0, 50):
alive = False
for p in procs:
if p.proc.poll() is None: # not termniate
os.kill(p.proc.pid, signal.SIGKILL)
alive = True
if not alive:
logger.info("terminate all the procs")
return
time.sleep(3)
logger.fatal("can't kill all process and exit")
exit(1)
def get_host_name_ip():
try:
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
return host_name, host_ip
except:
return None
def add_arguments(argname, type, default, help, argparser, **kwargs):
type = strtobool if type == bool else type
argparser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def find_free_ports(num):
def __free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
s.bind(('', 0))
return s.getsockname()[1]
port_set = set()
step = 0
while True:
port = __free_port()
if port not in port_set:
port_set.add(port)
if len(port_set) >= num:
return port_set
step += 1
if step > 400:
print(
"can't find avilable port and use the specified static port now!"
)
return None
return None
def get_ports(num, offset):
if os.environ.get('FLAGS_START_PORT') is None:
ports = find_free_ports(num)
if ports is not None:
ports = list(ports)
else:
start_port = int(os.environ.get('FLAGS_START_PORT'))
ports = range(start_port + offset, start_port + offset + num, 1)
return ports
def pretty_print_envs(envs, header=None):
spacing = 2
max_k = 40
max_v = 45
for k, v in envs.items():
max_k = max(max_k, len(k))
h_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format(max_k, " " * spacing,
max_v)
l_format = " " + "|{{:>{}s}}{{}}{{:^{}s}}|\n".format(max_k, max_v)
length = max_k + max_v + spacing
border = " +" + "".join(["="] * length) + "+"
line = " +" + "".join(["-"] * length) + "+"
draws = ""
draws += border + "\n"
if header:
draws += h_format.format(header[0], header[1])
else:
draws += h_format.format("fleetrun Distributed Envs", "Value")
draws += line + "\n"
for k, v in envs.items():
if isinstance(v, str) and len(v) >= max_v:
str_v = "... " + v[-41:]
else:
str_v = v
draws += l_format.format(k, " " * spacing, str(str_v))
draws += border
_str = "\n{}\n".format(draws)
return _str
class TrainerProc(object):
def __init__(self):
self.proc = None
self.log_fn = None
self.log_offset = None
self.rank = None
self.local_rank = None
self.cmd = None
_run_with_coverage = False
def run_with_coverage(*args):
global _run_with_coverage
assert len(args) <= 1, "len(args) {} should <= 1".format(len(args))
if len(args) == 1:
assert isinstance(args[0], bool)
_run_with_coverage = args[0]
return _run_with_coverage
def start_local_trainers(cluster,
pod,
training_script,
training_script_args,
log_dir=None,
envs=None):
if envs is None:
current_env = copy.copy(os.environ.copy())
else:
current_env = copy.copy(envs)
# paddle broadcast ncclUniqueId use socket, and
# proxy maybe make trainers unreachable, so delete them.
# if we set them to "", grpc will log error message "bad uri"
# so just delete them.
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
ids = cluster.world_device_ids()
res = [':'.join(ele) for ele in ids]
procs = []
for idx, t in enumerate(pod.trainers):
proc_env = {
"PADDLE_TRAINER_ID": "%d" % t.rank,
"PADDLE_CURRENT_ENDPOINT": "%s" % t.endpoint,
"PADDLE_TRAINERS_NUM": "%d" % cluster.trainers_nranks(),
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
"PADDLE_RANK_IN_NODE": str(idx),
"PADDLE_LOCAL_DEVICE_IDS":
",".join([str(acc) for acc in t.accelerators]),
"PADDLE_WORLD_DEVICE_IDS": ",".join(res),
}
if len(t.accelerators) > 0 and pod.device_mode == DeviceMode.GPU:
proc_env["FLAGS_selected_gpus"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
elif len(t.
accelerators) > 0 and pod.device_mode == DeviceMode.ASCEND_NPU:
proc_env["FLAGS_selected_npus"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
if len(t.accelerators) > 0:
proc_env["FLAGS_selected_accelerators"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
# to do: same code style in future
if fluid.core.is_compiled_with_xpu() and len(t.accelerators) > 0:
proc_env["FLAGS_selected_xpus"] = "%s" % ",".join(
[str(g) for g in t.accelerators])
current_env.update(proc_env)
coverage_args = []
if run_with_coverage():
coverage_args = ["-m", "coverage", "run", "--branch", "-p"]
cmd = [sys.executable, "-u"] + coverage_args + [training_script
] + training_script_args
logger.debug("start trainer proc{} env:{}".format(cmd, current_env))
if idx == 0:
logger.info("Local start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.trainers),
pretty_print_envs(proc_env, ("Distributed Envs",
"Value"))))
logger.info(
"details about PADDLE_TRAINER_ENDPOINTS can be found in "
"{}/endpoints.log, and detail running logs maybe found in "
"{}/workerlog.0".format(log_dir, log_dir))
fn = None
pre_fn = None if os.name == 'nt' else os.setsid
if log_dir is not None:
os.system("mkdir -p {}".format(log_dir))
if os.path.exists("%s/endpoints.log" % log_dir):
os.system("rm -f {}/endpoints.log".format(log_dir))
with open("%s/endpoints.log" % log_dir, "w") as f:
f.write("PADDLE_TRAINER_ENDPOINTS: \n")
f.write("\n".join(cluster.trainers_endpoints()))
fn = open("%s/workerlog.%d" % (log_dir, idx), "a")
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn, preexec_fn=pre_fn)
else:
proc = subprocess.Popen(cmd, env=current_env, preexec_fn=pre_fn)
tp = TrainerProc()
tp.proc = proc
tp.rank = t.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
procs.append(tp)
return procs
def pull_worker_log(tp):
if tp.log_fn:
with open(tp.log_fn.name, 'r') as fin:
fin.seek(tp.log_offset, 0)
for line in fin:
try:
sys.stdout.write(line)
except UnicodeEncodeError:
sys.stdout.write(
'UnicodeEncodeError occurs at this line. '
'Please refer to the original log file "%s"\n' %
tp.log_fn.name)
tp.log_offset = fin.tell()
def watch_local_trainers(procs, nranks):
try:
error = False
error_rank = []
# wait all process finish or one error
alive = False
for p in procs:
if p.log_fn and p.local_rank == 0:
pull_worker_log(p)
ret = p.proc.poll()
if ret is None:
alive = True
elif ret != 0:
error = True
error_rank.append(p.rank)
if error:
terminate_local_procs(procs)
exit(1)
except KeyboardInterrupt:
logger.warning("KeyboardInterrupt, exit")
terminate_local_procs(procs)
return
except SystemExit:
logger.error(
"ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".
format(nranks, error_rank))
terminate_local_procs(procs)
return
except:
logger.error(
"ABORT!!! Out of all {} trainers, the trainer process with rank={} was aborted. Please check its log.".
format(nranks, error_rank))
terminate_local_procs(procs)
return
return alive
def get_gpus(gpus):
if gpus is None:
gpus_num = fluid.core.get_cuda_device_count()
res_gpus = [str(x) for x in range(0, gpus_num)]
else:
cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
if cuda_visible_devices is None or cuda_visible_devices == "":
res_gpus = [x.strip() for x in gpus.split(',')]
else:
# change gpus into relative values
# e.g. CUDA_VISIBLE_DEVICES=4,5,6,7; args.gpus=4,5,6,7;
# therefore gpus=0,1,2,3
cuda_visible_devices_list = cuda_visible_devices.split(',')
for x in gpus.split(','):
assert x in cuda_visible_devices_list, "Can't find "\
"your gpus %s in CUDA_VISIBLE_DEVICES[%s]."\
% (x, cuda_visible_devices)
res_gpus = [
cuda_visible_devices_list.index(x.strip())
for x in gpus.split(',')
]
logger.info("Change selected_gpus into reletive values. --ips:{} "
"will change into relative_ips:{} according to your "
"CUDA_VISIBLE_DEVICES:{}".format(
gpus, res_gpus, cuda_visible_devices_list))
return res_gpus
def get_xpus(xpus):
if xpus is None:
xpus_num = fluid.core.get_xpu_device_count()
res_xpus = [str(x) for x in range(0, xpus_num)]
else:
xpu_visible_devices = os.getenv("XPU_VISIBLE_DEVICES")
if xpu_visible_devices is None or xpu_visible_devices == "":
res_xpus = [x.strip() for x in xpus.split(',')]
else:
xpu_visible_devices_list = xpu_visible_devices.split(',')
for x in xpus.split(','):
assert x in xpu_visible_devices_list, "Can't find "\
"your xpus %s in XPU_VISIBLE_DEVICES[%s]."\
% (x, xpu_visible_devices)
res_xpus = [
xpu_visible_devices_list.index(x.strip())
for x in xpus.split(',')
]
logger.info("Change selected_xpus into reletive values. --ips:{} "
"will change into relative_ips:{} according to your "
"XPU_VISIBLE_DEVICES:{}".format(
xpus, res_xpus, xpu_visible_devices_list))
return res_xpus
def get_device_mode(backend):
if fluid.core.is_compiled_with_npu() and \
fluid.core.get_npu_device_count() > 0:
print("launch train in ascend npu mode!")
return DeviceMode.ASCEND_NPU
if backend == 'nccl' and \
fluid.core.get_cuda_device_count() > 0:
print("launch train in GPU mode!")
return DeviceMode.GPU
if backend == 'bkcl' and fluid.core.get_xpu_device_count() > 0:
print("launch train in XPU mode")
return DeviceMode.XPU
if backend == 'gloo':
print("launch train in CPU mode")
return DeviceMode.CPU
raise RuntimeError("Don't supported devices")
def get_device_proc_info(args):
device_mode = get_device_mode(args.backend)
devices_per_proc = []
if device_mode == DeviceMode.GPU:
gpus = get_gpus(args.gpus)
if args.nproc_per_node is not None:
assert (len(gpus) % int(args.nproc_per_node)) ==0, \
"gpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(gpus), args.nproc_per_node)
n = int(len(gpus) / int(args.nproc_per_node))
devices_per_proc = [
gpus[i:i + n] for i in six.moves.range(0, len(gpus), n)
]
else:
devices_per_proc = gpus
elif device_mode == DeviceMode.ASCEND_NPU:
devices_per_proc = None
elif device_mode == DeviceMode.XPU:
xpus = get_xpus(args.xpus)
if args.nproc_per_node is not None:
assert (len(xpus) % int(args.nproc_per_node)) == 0, \
"xpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(xpus), args.nproc_per_node)
n = int(len(xpus) / int(args.nproc_per_node))
devices_per_proc = [
xpus[i:i + n] for i in six.moves.range(0, len(xpus), n)
]
else:
devices_per_proc = xpus
elif device_mode == DeviceMode.CPU:
if hasattr(args, "paddle_cpuonly") and args.nproc_per_node is None:
args.nproc_per_node = multiprocessing.cpu_count()
if args.nproc_per_node is None:
devices_per_proc = [0]
else:
devices_per_proc = [x for x in range(0, args.nproc_per_node)]
else:
assert False, "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format(
device_mode)
return (device_mode, devices_per_proc)
def direct_start(args):
# run ps-cpu mode on paddlecloud, using given envs
cmd = [sys.executable, "-u", args.training_script] + \
args.training_script_args
proc = subprocess.Popen(cmd)
proc.wait()
return
def get_custom_endpoints(origin_endpoints, offset=0):
assert origin_endpoints != None
paddle_user_define_endpoints_list = []
for ip_port in origin_endpoints.split(","):
ip = ip_port.split(":")[0]
port = ip_port.split(":")[1]
new_port = int(port) + offset
paddle_user_define_endpoints_list.append(":".join((ip, str(new_port))))
paddle_user_define_endpoints = ",".join(paddle_user_define_endpoints_list)
return paddle_user_define_endpoints
#def cloud_ps_heter_env_set(args):
# environs = {}
#
# paddle_trainer_endpoints = os.getenv("TRAINER_IP_PORT_LIST", "")
# assert paddle_trainer_endpoints != None
#
# paddle_pserver_endpoints = os.getenv("PSERVER_IP_PORT_LIST", "")
# assert paddle_pserver_endpoints != None
#
# # hard code for paddlecloud custom-framework
# avilable_ports = os.getenv("TRAINER_PORTS", "").split(",")
# assert len(
# avilable_ports
# ) >= 2, "set paddle_ports_num >= 2 in config.ini for paddlecloud job submit"
#
# # hard code for paddlecloud custom-framework
# trainers_num = len(paddle_pserver_endpoints.split(","))
# assert trainers_num != 0
# environs["PADDLE_TRAINERS_NUM"] = trainers_num
# environs["TRAINERS_NUM"] = trainers_num
#
# # hard code for paddlecloud custom-framework
# environs["PADDLE_HETER_TRAINER_IP_PORT_LIST"] = paddle_trainer_endpoints
# environs["PADDLE_PSERVERS_IP_PORT_LIST"] = paddle_pserver_endpoints
# environs["PADDLE_TRAINER_ENDPOINTS"] = get_custom_endpoints(
# paddle_pserver_endpoints, 1)
# heter_worker_num = len(paddle_trainer_endpoints.split(","))
# if (args.heter_worker_num != None) and (
# heter_worker_num != args.heter_worker_num):
# warnings.warn(
# "Your fleetrun setting: heter_worker_num is {}, but we find {} device can be used, this setting has been changed.".
# format(args.heter_worker_num, heter_worker_num))
# args.heter_worker_num = heter_worker_num
#
# for k, v in environs.items():
# os.environ[k] = str(v)
# logger.info("Set heter parameter server env: {}".format(
# pretty_print_envs(environs)))
def get_mapped_cluster(node_ips, node_ip, trainer_endpoints, device_mode,
node_mapping_ranks):
assert type(trainer_endpoints) is list, "trainer_endpoints must be list"
assert device_mode == DeviceMode.GPU, \
"Only support get mapped cluster for gpu now."
cluster = Cluster(hdfs=None)
for node_rank, ip in enumerate(node_ips):
pod = Pod()
pod.rank = node_rank
pod.addr = ip
pod.device_mode = device_mode
cur_node_endpoints = trainer_endpoints[node_rank]
# choose rank from global mapped ranks and set it to the trainer.
ranks_per_node = node_mapping_ranks[node_rank]
for i in range(len(ranks_per_node)):
trainer = Trainer()
# change global rank(mapped) to local rank within each node.
# e.g. mapped ranks of node: 3,4,7 -> 0,1,2
local_rank = ranks_per_node.index(ranks_per_node[i])
trainer.accelerators.append(local_rank)
trainer.endpoint = "%s" % (cur_node_endpoints[i])
# global mapped ranks
trainer.rank = ranks_per_node[i]
pod.trainers.append(trainer)
cluster.pods.append(pod)
pod_rank = node_ips.index(node_ip)
return cluster, cluster.pods[pod_rank]
def get_mapped_cluster_from_args(args, device_mode):
assert device_mode == DeviceMode.GPU, \
"Only support get mapped cluster for gpu now."
gpus_num = fluid.core.get_cuda_device_count()
# parse ip-ranks json file
json_data = None
with args.rank_mapping_file as json_file:
json_data = json.load(json_file)
node_ips = []
node_ranks_mapping = []
ip_ranks_list = json_data['ip_ranks']
for ip_ranks in ip_ranks_list:
node_ips.append(ip_ranks['ip'])
node_ranks_mapping.append(ip_ranks['ranks'])
if len(node_ips) == 1:
node_ip = node_ips[0]
else:
if args.host:
node_ip = args.host
else:
_, node_ip = get_host_name_ip()
assert node_ip in node_ips, \
"Can't find your local ip {%s} in node_ips: {%s}" % (node_ip, node_ips)
node_rank = node_ips.index(node_ip)
assert len(node_ranks_mapping[node_rank]) <= gpus_num, \
"number of ranks mapped to one node should not exceed the avaiable ones."
assert len(node_ranks_mapping) == len(node_ips), \
"ranks length should be equal to ips length."
logger.debug("parsed from args: node_ips:{} node_ip:{} "
"node_rank:{} node_ranks_mapping:{}".format(
node_ips, node_ip, node_rank, node_ranks_mapping[
node_rank]))
free_ports = []
trainer_endpoints = []
for ip in node_ips:
node_rank = node_ips.index(ip)
if os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(start_port, start_port + len(node_ranks_mapping[
node_rank]))
]
else:
free_ports = find_free_ports(len(node_ranks_mapping[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
return get_mapped_cluster(node_ips, node_ip, trainer_endpoints, device_mode,
node_ranks_mapping)
class ParameterServerLauncher(object):
def __init__(self, args, distribute_mode):
self.args = args
self.distribute_mode = distribute_mode
self.server_num = 0
self.worker_num = 0
self.heter_worker_num = 0
self.server_endpoints = ""
self.server_endpoints_ips = []
self.server_endpoints_port = []
self.worker_endpoints = ""
self.worker_endpoints_ips = []
self.worker_endpoints_port = []
self.heter_worker_endpoints = ""
self.heter_worker_endpoints_ips = []
self.heter_worker_endpoints_port = []
self.is_local = True
self.current_node_ip = ""
self.stage_trainer_num = []
self.stage_heter_map = {}
self.stage_list = []
self.stage_device_map = {}
self.stage_num = 0
self.get_role_endpoints(args)
def get_role_endpoints(self, args):
if args.server_num:
self.server_num = args.server_num
if args.servers:
assert len(
args.servers.split(",")
) == self.server_num, "The server_num and servers doesn't match. Expect servers endpoints num epual to server_num, but received servers enpoint num: {} and server_num {}".format(
len(args.servers.split(",")), self.server_num)
self.server_endpoints = args.servers
else:
ports = get_ports(self.server_num, 0)
self.server_endpoints = ",".join(
["127.0.0.1:" + str(x) for x in ports])
else:
assert args.servers != "", "The setting of Parameter-Server must has server_num or servers."
self.server_endpoints = args.servers
self.server_num = len(self.server_endpoints.split(","))
# get worker envs
if args.worker_num:
self.worker_num = args.worker_num
if args.workers:
assert len(
args.workers.split(",")
) == self.worker_num, "The worker_num and workers doesn't match. Expect workers endpoints num epual to worker_num, but received workers enpoint num: {} and worker_num {}".format(
len(args.workers.split(",")), self.worker_num)
self.worker_endpoints = args.workers
else:
ports = get_ports(self.worker_num, self.server_num)
self.worker_endpoints = ",".join(
["127.0.0.1:" + str(x) for x in ports])
else:
assert args.workers != "", "The setting of Parameter-Server must has worker_num or workers."
worker_endpoints_ips = [
x.strip().split(":")[0] for x in args.workers.split(",")
]
self.worker_num = len(worker_endpoints_ips)
worker_endpoints_len = [
len(x.strip().split(":")) for x in args.workers.split(",")
]
if 1 in worker_endpoints_len:
start_port = 6170
worker_endpoints_port = range(
start_port + self.server_num,
start_port + self.server_num + self.worker_num, 1)
worker_endpoints = []
for i in range(self.worker_num):
worker_endpoints.append(":".join((worker_endpoints_ips[
i], str(worker_endpoints_port[i]))))
self.worker_endpoints = ",".join(worker_endpoints)
else:
self.worker_endpoints = args.workers
if self.distribute_mode == DistributeMode.PS_HETER:
assert args.heter_devices != "", "The setting of Parameter-Server heter mode must has heter_devices."
self.stage_device_map[1] = "cpu"
heter_devices_list = args.heter_devices.split(";")
for i in range(len(heter_devices_list)):
self.stage_device_map[i + 2] = heter_devices_list[i]
self.stage_heter_map[1] = self.worker_endpoints
if args.heter_worker_num:
self.stage_heter_trainer_num = args.heter_worker_num.split(";")
self.stage_heter_trainer_num = [
int(trainer_num)
for trainer_num in self.stage_heter_trainer_num
]
if args.heter_workers:
assert len(args.heter_workers.split(";")) == len(
self.stage_heter_trainer_num
), "The stage_num and heter_workers doesn't match. Expect heter_workers endpoints stage num epual to heter_worker_num stage, but received heter_workers enpoint stage num: {} and heter_worker_num stage {}".format(
len(args.heter_workers.split(";")),
len(self.stage_heter_trainer_num))
heter_worker_endpoints_list = args.heter_workers.split(";")
self.heter_worker_endpoints = ""
for i in range(len(self.stage_heter_trainer_num)):
if self.heter_worker_endpoints != "":
self.heter_worker_endpoints += ","
heter_worker_endpoints = heter_worker_endpoints_list[
i].split(",")
assert len(
heter_worker_endpoints
) == self.stage_heter_trainer_num[
i], "The heter trainer num in stage {} is not equal in args.heter_worker_num and args.heter_workers".format(
i)
heter_worker_endpoints_ips = [
x.strip().split(":")[0]
for x in heter_worker_endpoints
]
heter_worker_endpoints_len = [
len(x.strip().split(":"))
for x in heter_worker_endpoints
]
if 1 in heter_worker_endpoints_len:
# if no port value in heter_worker_endpoint, will set default port values.
heter_worker_endpoints_port = get_ports(
len(heter_worker_endpoints_ips), self.worker_num
+ self.server_num + self.heter_worker_num)
new_heter_worker_endpoints = []
for j in range(len(heter_worker_endpoints_ips)):
new_heter_worker_endpoints.append(":".join((
heter_worker_endpoints_ips[j], str(
heter_worker_endpoints_port[j]))))
ip_port_list = ",".join(new_heter_worker_endpoints)
else:
ip_port_list = ",".join(heter_worker_endpoints)
self.stage_heter_map[i + 2] = ip_port_list
self.stage_list.extend([i + 2] *
len(ip_port_list.split(',')))
self.heter_worker_num += self.stage_heter_trainer_num[i]
self.heter_worker_endpoints += ip_port_list
else:
for i in range(len(self.stage_heter_trainer_num)):
heter_trainer_num = self.stage_heter_trainer_num[i]
ports = get_ports(heter_trainer_num,
self.server_num + self.worker_num +
self.heter_worker_num)
ip_port_list = ",".join(
["127.0.0.1:" + str(x) for x in ports])
self.stage_heter_map[i + 2] = ip_port_list
self.stage_list.extend([i + 2] *
len(ip_port_list.split(',')))
self.heter_worker_num += heter_trainer_num
if self.heter_worker_endpoints != "":
self.heter_worker_endpoints += ","
self.heter_worker_endpoints += ip_port_list
else:
assert args.heter_workers != "", "The setting of Parameter-Server heter mode must has heter_worker_num or heter_workers."
self.stage_heter_trainer_num = []
heter_worker_endpoints_list = args.heter_workers.split(";")
self.heter_worker_endpoints = ""
for i in range(len(heter_worker_endpoints_list)):
heter_worker_endpoints = heter_worker_endpoints_list[
i].split(",")
self.stage_heter_trainer_num.append(
len(heter_worker_endpoints))
heter_worker_endpoints_ips = [
x.strip().split(":")[0] for x in heter_worker_endpoints
]
heter_worker_endpoints_len = [
len(x.strip().split(":"))
for x in heter_worker_endpoints
]
if 1 in heter_worker_endpoints_len:
# if no port value in heter_worker_endpoint, will set default port values.
heter_worker_endpoints_port = get_ports(
len(heter_worker_endpoints_ips), self.worker_num +
self.server_num + self.heter_worker_num)
new_heter_worker_endpoints = []
for j in range(len(heter_worker_endpoints_ips)):
new_heter_worker_endpoints.append(":".join((
heter_worker_endpoints_ips[j], str(
heter_worker_endpoints_port[j]))))
ip_port_list = ",".join(new_heter_worker_endpoints)
else:
ip_port_list = ",".join(heter_worker_endpoints)
self.stage_heter_map[i + 2] = ip_port_list
self.stage_list.extend([i + 2] *
len(ip_port_list.split(',')))
self.heter_worker_num += self.stage_heter_trainer_num[-1]
if self.heter_worker_endpoints != "":
self.heter_worker_endpoints += ","
self.heter_worker_endpoints += ip_port_list
self.stage_trainer_num = [self.worker_num
] + self.stage_heter_trainer_num
self.stage_num = len(self.stage_trainer_num)
# get http_port
if args.http_port:
http_port = [args.http_port]
else:
http_port = get_ports(
1, self.server_num + self.worker_num + self.heter_worker_num)
http_ip = self.server_endpoints.split(",")[0].split(":")[0]
self.http_port = http_ip + ":" + str(http_port[0])
# check local or user define
self.server_endpoints_ips = [
x.strip().split(":")[0] for x in self.server_endpoints.split(",")
]
self.worker_endpoints_ips = [
x.strip().split(":")[0] for x in self.worker_endpoints.split(",")
]
self.server_endpoints_port = [
x.strip().split(":")[1] for x in self.server_endpoints.split(",")
]
self.worker_endpoints_port = [
x.strip().split(":")[1] for x in self.worker_endpoints.split(",")
]
self.node_ips = []
for ip in self.server_endpoints_ips:
if ip not in self.node_ips:
self.node_ips.append(ip)
for ip in self.worker_endpoints_ips:
if ip not in self.node_ips:
self.node_ips.append(ip)
if self.distribute_mode == DistributeMode.PS_HETER:
self.heter_worker_endpoints_ips = [
x.strip().split(":")[0]
for x in self.heter_worker_endpoints.split(",")
]
self.heter_worker_endpoints_port = [
x.strip().split(":")[1]
for x in self.heter_worker_endpoints.split(",")
]
for ip in self.heter_worker_endpoints_ips:
if ip not in self.node_ips:
self.node_ips.append(ip)
if len(set(self.node_ips)) == 1:
self.is_local = True
self.current_node_ip = self.node_ips[0]
else:
self.is_local = False
pod_ip = os.getenv("POD_IP", None)
if pod_ip == None:
_, self.current_node_ip = get_host_name_ip()
else:
self.current_node_ip = pod_ip
if not self.distribute_mode == DistributeMode.PS_HETER:
assert self.current_node_ip in self.node_ips, "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" \
% (self.current_node_ip, self.node_ips)
if self.current_node_ip in self.node_ips:
self.node_rank = self.node_ips.index(self.current_node_ip)
logger.debug(
"parsed from args: node_ips:{} current_node_ip:{} node_rank:{}".
format(self.node_ips, self.current_node_ip, self.node_rank))
def start_ps(self):
if not self.current_node_ip in self.node_ips:
return
cluster = Cluster(hdfs=None)
server_rank = 0
worker_rank = 0
heter_worker_rank = 0
for node_rank, ip in enumerate(self.node_ips):
pod = Pod()
pod.rank = node_rank
pod.addr = ip
for i in range(len(self.server_endpoints_ips)):
if ip == self.server_endpoints_ips[i]:
server = Trainer()
server.endpoint = "%s:%s" % (ip,
self.server_endpoints_port[i])
server.rank = server_rank
server_rank += 1
pod.servers.append(server)
for j in range(len(self.worker_endpoints_ips)):
if ip == self.worker_endpoints_ips[j]:
worker = Trainer()
worker.endpoint = "%s:%s" % (ip,
self.worker_endpoints_port[j])
worker.rank = worker_rank
worker.stage = 1
worker_rank += 1
pod.workers.append(worker)
for k in range(len(self.heter_worker_endpoints_ips)):
if ip == self.heter_worker_endpoints_ips[k]:
heter_worker = Trainer()
heter_worker.endpoint = "%s:%s" % (
ip, self.heter_worker_endpoints_port[k])
heter_worker.rank = heter_worker_rank
heter_worker.stage = self.stage_list[k]
heter_worker_rank += 1
pod.heter_workers.append(heter_worker)
cluster.pods.append(pod)
pod = cluster.pods[self.node_rank]
self.gloo_rendezvous_dir = tempfile.mkdtemp()
self.procs = {"worker": [], "server": [], "heter_worker": []}
self.cmds = {"worker": [], "server": [], "heter_worker": []}
self.log_fns = {"worker": [], "server": [], "heter_worker": []}
self.start_pod_server(self.args, pod)
self.start_pod_worker(self.args, pod)
if self.distribute_mode == DistributeMode.PS_HETER:
self.start_pod_heter_worker(self.args, pod)
logger.info(
"Please check servers, workers and heter_worker logs in {}/workerlog.*, {}/serverlog.* and {}/heterlog.*".
format(self.args.log_dir, self.args.log_dir, self.args.log_dir))
if len(self.procs["worker"]) > 0:
for i, proc in enumerate(self.procs["worker"]):
self.procs["worker"][i].proc.wait()
if len(self.log_fns["worker"]) > 0:
self.log_fns["worker"][i].close()
logger.info(
"all workers exit, going to finish parameter server and heter_worker."
)
if len(self.procs["heter_worker"]) > 0:
for i, proc in enumerate(self.procs["heter_worker"]):
self.log_fns["heter_worker"][i].close()
self.procs["heter_worker"][i].proc.terminate()
logger.info("all heter_worker are killed")
if len(self.procs["server"]) > 0:
for i, proc in enumerate(self.procs["server"]):
self.log_fns["server"][i].close()
self.procs["server"][i].proc.terminate()
logger.info("all parameter server are killed")
else:
if len(self.procs["server"]) > 0:
for i, proc in enumerate(self.procs["server"]):
self.procs["server"][i].proc.wait()
if len(self.procs["heter_worker"]) > 0:
for i, proc in enumerate(self.procs["heter_worker"]):
self.procs["heter_worker"][i].proc.wait()
if os.path.exists(self.gloo_rendezvous_dir):
shutil.rmtree(self.gloo_rendezvous_dir)
def start_pod_server(self, args, pod):
default_env = os.environ.copy()
current_env = copy.copy(default_env)
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
for idx, cur_server in enumerate(pod.servers):
if self.distribute_mode == DistributeMode.PS_HETER:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST":
self.heter_worker_endpoints,
"PADDLE_PORT": cur_server.endpoint.split(":")[1],
"TRAINING_ROLE": "PSERVER",
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"POD_IP": cur_server.endpoint.split(":")[0],
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
else:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_PORT": cur_server.endpoint.split(":")[1],
"TRAINING_ROLE": "PSERVER",
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"POD_IP": cur_server.endpoint.split(":")[0],
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
self.cmds["server"].append(cmd)
if idx == 0:
logger.info(
"Local server start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.servers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"
))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/serverlog.%d" % (args.log_dir, idx), "w")
self.log_fns["server"].append(fn)
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_server.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
self.procs["server"].append(tp)
def start_pod_worker(self, args, pod):
default_env = os.environ.copy()
current_env = copy.copy(default_env)
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
heter_device_num = 0
device_list = []
if fluid.core.is_compiled_with_cuda():
device_list = get_gpus(args.gpus)
heter_device_num = len(device_list)
elif fluid.core.is_compiled_with_xpu():
heter_device_num = fluid.core.get_xpu_device_count()
device_list = [str(x) for x in range(0, heter_device_num)]
for idx, cur_worker in enumerate(pod.workers):
device_id = "0" if heter_device_num == 0 else str(device_list[(
idx) % heter_device_num])
if self.distribute_mode == DistributeMode.PS_HETER:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num),
"STAGE_ID": "1",
"STAGE_NUM": str(self.stage_num),
"PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST": "",
"PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST":
self.stage_heter_map[2],
"PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST":
self.heter_worker_endpoints,
"HETER_DEVICE_TYPE": self.stage_device_map[1],
"TRAINING_ROLE": "TRAINER",
"POD_IP": cur_worker.endpoint.split(":")[0],
"PADDLE_PORT": cur_worker.endpoint.split(":")[1],
"PADDLE_TRAINER_ID": str(cur_worker.rank),
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"FLAGS_selected_gpus": "0",
"FLAGS_selected_xpus": "0",
"CUDA_VISIBLE_DEVICES": device_id,
"XPU_VISIBLE_DEVICES": device_id,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
else:
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"TRAINING_ROLE": "TRAINER",
"POD_IP": cur_worker.endpoint.split(":")[0],
"PADDLE_PORT": cur_worker.endpoint.split(":")[1],
"PADDLE_TRAINER_ID": str(cur_worker.rank),
"PADDLE_WITH_GLOO":
str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"FLAGS_selected_gpus": "0",
"FLAGS_selected_xpus": "0",
"CUDA_VISIBLE_DEVICES": device_id,
"XPU_VISIBLE_DEVICES": device_id,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
self.cmds["worker"].append(cmd)
if idx == 0:
logger.info(
"Local worker start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.workers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"
))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/workerlog.%d" % (args.log_dir, idx), "w")
self.log_fns["worker"].append(fn)
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_worker.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
self.procs["worker"].append(tp)
def start_pod_heter_worker(self, args, pod):
default_env = os.environ.copy()
current_env = copy.copy(default_env)
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
heter_device_num = 0
device_list = []
if fluid.core.is_compiled_with_cuda():
device_list = get_gpus(args.gpus)
heter_device_num = len(device_list)
elif fluid.core.is_compiled_with_xpu():
heter_device_num = fluid.core.get_xpu_device_count()
device_list = [str(x) for x in range(0, heter_device_num)]
for idx, cur_heter_worker in enumerate(pod.heter_workers):
device_id = "0" if heter_device_num == 0 else str(device_list[(
idx) % heter_device_num])
stage_id = cur_heter_worker.stage
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": self.server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": self.worker_endpoints,
"PADDLE_NEXT_HETER_TRAINER_IP_PORT_LIST":
self.stage_heter_map[stage_id + 1]
if stage_id <= self.stage_num - 1 else "",
"PADDLE_PREVIOUS_HETER_TRAINER_IP_PORT_LIST":
self.stage_heter_map[stage_id - 1],
"PADDLE_ALL_HETER_TRAINER_IP_PORT_LIST":
self.heter_worker_endpoints,
"HETER_DEVICE_TYPE": self.stage_device_map[stage_id],
"STAGE_ID": str(stage_id),
"STAGE_NUM": str(self.stage_num),
"PADDLE_PORT": cur_heter_worker.endpoint.split(":")[1],
"TRAINING_ROLE": "HETER_TRAINER",
"PADDLE_TRAINERS_NUM": str(self.worker_num),
"PADDLE_STAGE_TRAINERS_NUM": str(self.stage_trainer_num),
"POD_IP": cur_heter_worker.endpoint.split(":")[0],
"PADDLE_WITH_GLOO": str(os.getenv("PADDLE_WITH_GLOO", "0")),
"PADDLE_GLOO_RENDEZVOUS": "3",
"PADDLE_GLOO_FS_PATH": self.gloo_rendezvous_dir,
"FLAGS_selected_gpus": "0",
"FLAGS_selected_xpus": "0",
"CUDA_VISIBLE_DEVICES": device_id,
"XPU_VISIBLE_DEVICES": device_id,
"PADDLE_GLOO_HTTP_ENDPOINT": self.http_port
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
self.cmds["heter_worker"].append(cmd)
if idx == 0:
logger.info(
"Local heter_worker start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.heter_workers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"
))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/heterlog.%d" % (args.log_dir, idx), "w")
self.log_fns["heter_worker"].append(fn)
proc = subprocess.Popen(
cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_heter_worker.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
self.procs["heter_worker"].append(tp)
def check_backend(backend):
if backend not in ['nccl', 'gloo', 'bkcl', 'auto']:
raise ValueError(
"paddle.distributed initialize error, "
"backend argument can only be one of 'nccl', 'gloo', 'bkcl', 'auto', but got %s"
% backend)
if backend == 'nccl' and not fluid.core.is_compiled_with_cuda():
raise ValueError(
"paddle.distributed initialize error, "
"your paddle is not compiled with cuda but you assign 'nccl' as backend."
)
if backend == 'bkcl' and not fluid.core.is_compiled_with_xpu():
raise ValueError(
"paddle.distributed initialize error, "
"your paddle is not compiled with xpu but you assign 'bkcl' as backend."
)
def block_windows_and_macos(backend):
if backend != 'gloo': return
if utils.OS_NAME.startswith('darwin'):
raise ValueError(
"You are going to using gloo on macos, but currently is not supported"
)
if utils.IS_WINDOWS:
raise ValueError(
"You are going to using gloo on windows, but currently is not supported"
)
def get_backend_by_compile_flag():
if fluid.core.is_compiled_with_cuda():
return 'nccl'
if fluid.core.is_compiled_with_xpu():
return 'bkcl'
return 'gloo'
| true | true |
f7f50f4589dcc8f5ffae7557b59427a601c3edb8 | 11,936 | py | Python | mystore/serializers.py | oreon/rtbp | c35f1a712bdc36c725e68a98b21105654c5f5fdc | [
"MIT"
] | 1 | 2017-12-12T17:28:16.000Z | 2017-12-12T17:28:16.000Z | mystore/serializers.py | oreon/rtbp | c35f1a712bdc36c725e68a98b21105654c5f5fdc | [
"MIT"
] | null | null | null | mystore/serializers.py | oreon/rtbp | c35f1a712bdc36c725e68a98b21105654c5f5fdc | [
"MIT"
] | null | null | null |
import sys
from django.db import transaction
from django.forms.models import ModelForm
from rest_framework import serializers, status
from rest_framework.response import Response
from users.serializers import AppUserLookupSerializer
from .models import *
class ProductLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Product
fields = ('displayName', 'id',)
class CategoryLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Category
fields = ('displayName', 'id',)
class CustomerLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Customer
fields = ('displayName', 'id',)
class CustomerOrderLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = CustomerOrder
fields = ('displayName', 'id',)
class OrderItemLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = OrderItem
fields = ('displayName', 'id',)
class EmployeeLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Employee
fields = ('displayName', 'id',)
class CustomerReviewLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = CustomerReview
fields = ('displayName', 'id',)
class ProductSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Product
class CategorySerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Category
class OrderItemSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrder = CustomerOrderLookupSerializer()
product = ProductLookupSerializer()
class Meta:
model = OrderItem
class CustomerOrderSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer = CustomerLookupSerializer()
orderItems = OrderItemSerializer(many=True, read_only = True)
class Meta:
model = CustomerOrder
class CustomerReviewSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer = CustomerLookupSerializer()
class Meta:
model = CustomerReview
class CustomerSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrders = CustomerOrderSerializer(many=True, read_only = True)
customerReviews = CustomerReviewSerializer(many=True, read_only = True)
class Meta:
model = Customer
class EmployeeSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
appUser = AppUserLookupSerializer()
class Meta:
model = Employee
class ProductWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
#image = serializers.ReadOnlyField()
class Meta:
model = Product
class CategoryWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Category
class OrderItemWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrder_displayName = serializers.ReadOnlyField(source='customerOrderDisplayName')
product_displayName = serializers.ReadOnlyField(source='productDisplayName')
product = serializers.PrimaryKeyRelatedField(queryset = Product.objects.all())
class Meta:
model = OrderItem
exclude = ('customerOrder',)
class CustomerOrderWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer_displayName = serializers.ReadOnlyField(source='customerDisplayName')
orderItems = OrderItemWritableSerializer(many=True)
customer = serializers.PrimaryKeyRelatedField(queryset=Customer.objects.all())
@transaction.atomic
def create(self, validated_data):
try:
orderItemsCurrent = validated_data.pop('orderItems')
customerOrder = CustomerOrder.objects.create(**validated_data)
for item in orderItemsCurrent:
item['customerOrder'] = customerOrder
s = OrderItemWritableSerializer(data=item)
if(s.is_valid()):
s.create(item)
return customerOrder
except :
e = sys.exc_info()[0]
print (e)
raise
@transaction.atomic
def update(self, instance, validated_data):
try:
self.updateOrderItems(instance, validated_data)
return super(CustomerOrderWritableSerializer, self).update( instance, validated_data)
except :
e = sys.exc_info()[0]
def updateOrderItems(self, instance , validated_data):
if not 'orderItems' in validated_data.keys() : return;
orderItemsCurrent = validated_data.pop('orderItems')
ids = [item['id'] for item in orderItemsCurrent if 'id' in item.keys() ]
for item in instance.orderItems.all() :
if item.id not in ids:
item.delete()
for item in orderItemsCurrent:
OrderItem(customerOrder=instance, **item).save()
class Meta:
model = CustomerOrder
exclude = ('customer',)
class CustomerReviewWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer_displayName = serializers.ReadOnlyField(source='customerDisplayName')
class Meta:
model = CustomerReview
exclude = ('customer',)
class CustomerForm(ModelForm):
class Meta:
model = Customer
fields = '__all__'
class CustomerWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrders = CustomerOrderWritableSerializer(many=True)
customerReviews = CustomerReviewWritableSerializer(many=True)
def create(self, validated_data):
form = CustomerForm(data=self.context['request'].data)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return instance
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
@transaction.atomic
def createme(self, validated_data):
customerOrdersCurrent = validated_data.pop('customerOrders')
customerReviewsCurrent = validated_data.pop('customerReviews')
customer = Customer.objects.create(**validated_data)
for item in customerOrdersCurrent:
item['customer'] = customer
CustomerOrder.objects.create_Or_update_custom(item)
for item in customerReviewsCurrent:
CustomerReview(customer=customer, **item).save()
return customer
@transaction.atomic
def update(self, instance, validated_data):
customerOrdersCurrent = validated_data.pop('customerOrders')
customerReviewsCurrent = validated_data.pop('customerReviews')
instance.customerOrders.all().delete()
for item in customerOrdersCurrent:
item['customer'] = instance
CustomerOrder.objects.create_Or_update_custom(item)
self.updateCustomerReviews(instance, validated_data)
return super(CustomerWritableSerializer, self).update( instance, validated_data)
def updateCustomerOrders(self, instance , validated_data):
if not 'customerOrders' in validated_data.keys() : return;
customerOrdersCurrent = validated_data.pop('customerOrders')
for item in customerOrdersCurrent:
#print(item['notes'])
if(not 'id' in item.keys() ):
#CustomerOrder.objects.create(item)
item['customer'] = instance
s = CustomerOrderWritableSerializer(data=item)
if(s.is_valid()):
s.create(item)
else:
raise Exception(s.errors)
#co.orderItems = item['orderItems']
#co.notes = item['notes']
#print(co.orderItems)
#co.save()
#CustomerOrder.objects.create(**item)
else:
CustomerOrder.objects.update( **item)
def updateCustomerReviews(self, instance , validated_data):
if not 'customerReviews' in validated_data.keys() : return;
customerReviewsCurrent = validated_data.pop('customerReviews')
ids = [item['id'] for item in customerReviewsCurrent if 'id' in item.keys() ]
for item in instance.customerReviews.all() :
if item.id not in ids:
item.delete()
for item in customerReviewsCurrent:
CustomerReview(customer=instance, **item).save()
class Meta:
model = Customer
class EmployeeWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
appUser_displayName = serializers.ReadOnlyField(source='appUserDisplayName')
class Meta:
model = Employee
class FullProductSerializer(ProductSerializer):
class Meta(ProductSerializer.Meta):
model = Product
class FullCategorySerializer(CategorySerializer):
class Meta(CategorySerializer.Meta):
model = Category
class FullCustomerSerializer(CustomerSerializer):
class Meta(CustomerSerializer.Meta):
model = Customer
class FullCustomerOrderSerializer(CustomerOrderSerializer):
class Meta(CustomerOrderSerializer.Meta):
model = CustomerOrder
class FullOrderItemSerializer(OrderItemSerializer):
class Meta(OrderItemSerializer.Meta):
model = OrderItem
class FullEmployeeSerializer(EmployeeSerializer):
class Meta(EmployeeSerializer.Meta):
model = Employee
class FullCustomerReviewSerializer(CustomerReviewSerializer):
class Meta(CustomerReviewSerializer.Meta):
model = CustomerReview
| 24.559671 | 98 | 0.594169 |
import sys
from django.db import transaction
from django.forms.models import ModelForm
from rest_framework import serializers, status
from rest_framework.response import Response
from users.serializers import AppUserLookupSerializer
from .models import *
class ProductLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Product
fields = ('displayName', 'id',)
class CategoryLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Category
fields = ('displayName', 'id',)
class CustomerLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Customer
fields = ('displayName', 'id',)
class CustomerOrderLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = CustomerOrder
fields = ('displayName', 'id',)
class OrderItemLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = OrderItem
fields = ('displayName', 'id',)
class EmployeeLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Employee
fields = ('displayName', 'id',)
class CustomerReviewLookupSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = CustomerReview
fields = ('displayName', 'id',)
class ProductSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Product
class CategorySerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Category
class OrderItemSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrder = CustomerOrderLookupSerializer()
product = ProductLookupSerializer()
class Meta:
model = OrderItem
class CustomerOrderSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer = CustomerLookupSerializer()
orderItems = OrderItemSerializer(many=True, read_only = True)
class Meta:
model = CustomerOrder
class CustomerReviewSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer = CustomerLookupSerializer()
class Meta:
model = CustomerReview
class CustomerSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrders = CustomerOrderSerializer(many=True, read_only = True)
customerReviews = CustomerReviewSerializer(many=True, read_only = True)
class Meta:
model = Customer
class EmployeeSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
appUser = AppUserLookupSerializer()
class Meta:
model = Employee
class ProductWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Product
class CategoryWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
class Meta:
model = Category
class OrderItemWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrder_displayName = serializers.ReadOnlyField(source='customerOrderDisplayName')
product_displayName = serializers.ReadOnlyField(source='productDisplayName')
product = serializers.PrimaryKeyRelatedField(queryset = Product.objects.all())
class Meta:
model = OrderItem
exclude = ('customerOrder',)
class CustomerOrderWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer_displayName = serializers.ReadOnlyField(source='customerDisplayName')
orderItems = OrderItemWritableSerializer(many=True)
customer = serializers.PrimaryKeyRelatedField(queryset=Customer.objects.all())
@transaction.atomic
def create(self, validated_data):
try:
orderItemsCurrent = validated_data.pop('orderItems')
customerOrder = CustomerOrder.objects.create(**validated_data)
for item in orderItemsCurrent:
item['customerOrder'] = customerOrder
s = OrderItemWritableSerializer(data=item)
if(s.is_valid()):
s.create(item)
return customerOrder
except :
e = sys.exc_info()[0]
print (e)
raise
@transaction.atomic
def update(self, instance, validated_data):
try:
self.updateOrderItems(instance, validated_data)
return super(CustomerOrderWritableSerializer, self).update( instance, validated_data)
except :
e = sys.exc_info()[0]
def updateOrderItems(self, instance , validated_data):
if not 'orderItems' in validated_data.keys() : return;
orderItemsCurrent = validated_data.pop('orderItems')
ids = [item['id'] for item in orderItemsCurrent if 'id' in item.keys() ]
for item in instance.orderItems.all() :
if item.id not in ids:
item.delete()
for item in orderItemsCurrent:
OrderItem(customerOrder=instance, **item).save()
class Meta:
model = CustomerOrder
exclude = ('customer',)
class CustomerReviewWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customer_displayName = serializers.ReadOnlyField(source='customerDisplayName')
class Meta:
model = CustomerReview
exclude = ('customer',)
class CustomerForm(ModelForm):
class Meta:
model = Customer
fields = '__all__'
class CustomerWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
customerOrders = CustomerOrderWritableSerializer(many=True)
customerReviews = CustomerReviewWritableSerializer(many=True)
def create(self, validated_data):
form = CustomerForm(data=self.context['request'].data)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return instance
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
@transaction.atomic
def createme(self, validated_data):
customerOrdersCurrent = validated_data.pop('customerOrders')
customerReviewsCurrent = validated_data.pop('customerReviews')
customer = Customer.objects.create(**validated_data)
for item in customerOrdersCurrent:
item['customer'] = customer
CustomerOrder.objects.create_Or_update_custom(item)
for item in customerReviewsCurrent:
CustomerReview(customer=customer, **item).save()
return customer
@transaction.atomic
def update(self, instance, validated_data):
customerOrdersCurrent = validated_data.pop('customerOrders')
customerReviewsCurrent = validated_data.pop('customerReviews')
instance.customerOrders.all().delete()
for item in customerOrdersCurrent:
item['customer'] = instance
CustomerOrder.objects.create_Or_update_custom(item)
self.updateCustomerReviews(instance, validated_data)
return super(CustomerWritableSerializer, self).update( instance, validated_data)
def updateCustomerOrders(self, instance , validated_data):
if not 'customerOrders' in validated_data.keys() : return;
customerOrdersCurrent = validated_data.pop('customerOrders')
for item in customerOrdersCurrent:
if(not 'id' in item.keys() ):
item['customer'] = instance
s = CustomerOrderWritableSerializer(data=item)
if(s.is_valid()):
s.create(item)
else:
raise Exception(s.errors)
else:
CustomerOrder.objects.update( **item)
def updateCustomerReviews(self, instance , validated_data):
if not 'customerReviews' in validated_data.keys() : return;
customerReviewsCurrent = validated_data.pop('customerReviews')
ids = [item['id'] for item in customerReviewsCurrent if 'id' in item.keys() ]
for item in instance.customerReviews.all() :
if item.id not in ids:
item.delete()
for item in customerReviewsCurrent:
CustomerReview(customer=instance, **item).save()
class Meta:
model = Customer
class EmployeeWritableSerializer(serializers.ModelSerializer):
displayName = serializers.ReadOnlyField()
appUser_displayName = serializers.ReadOnlyField(source='appUserDisplayName')
class Meta:
model = Employee
class FullProductSerializer(ProductSerializer):
class Meta(ProductSerializer.Meta):
model = Product
class FullCategorySerializer(CategorySerializer):
class Meta(CategorySerializer.Meta):
model = Category
class FullCustomerSerializer(CustomerSerializer):
class Meta(CustomerSerializer.Meta):
model = Customer
class FullCustomerOrderSerializer(CustomerOrderSerializer):
class Meta(CustomerOrderSerializer.Meta):
model = CustomerOrder
class FullOrderItemSerializer(OrderItemSerializer):
class Meta(OrderItemSerializer.Meta):
model = OrderItem
class FullEmployeeSerializer(EmployeeSerializer):
class Meta(EmployeeSerializer.Meta):
model = Employee
class FullCustomerReviewSerializer(CustomerReviewSerializer):
class Meta(CustomerReviewSerializer.Meta):
model = CustomerReview
| true | true |
f7f50f52b6d3518e10c5321694f6c4aec8a31d4c | 1,767 | py | Python | ee/api/chalicelib/core/authorizers.py | champkeh/openreplay | 440634dca1e78464e96f9397105d04b5c3cecdfd | [
"MIT"
] | 1 | 2021-09-28T15:24:31.000Z | 2021-09-28T15:24:31.000Z | ee/api/chalicelib/core/authorizers.py | aayushgautam/openreplay | 3298230c3a04fe537794bf396bdaf695c81301c6 | [
"MIT"
] | 2 | 2022-02-15T00:07:39.000Z | 2022-02-27T22:54:49.000Z | ee/api/chalicelib/core/authorizers.py | aayushgautam/openreplay | 3298230c3a04fe537794bf396bdaf695c81301c6 | [
"MIT"
] | null | null | null | from chalicelib.utils.helper import environ
import jwt
from chalicelib.utils import helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.core import tenants
from chalicelib.core import users
def jwt_authorizer(token):
token = token.split(" ")
if len(token) != 2 or token[0].lower() != "bearer":
return None
try:
payload = jwt.decode(
token[1],
environ["jwt_secret"],
algorithms=environ["jwt_algorithm"],
audience=[f"plugin:{helper.get_stage_name()}", f"front:{helper.get_stage_name()}"]
)
except jwt.ExpiredSignatureError:
print("! JWT Expired signature")
return None
except BaseException as e:
print("! JWT Base Exception")
return None
return payload
def jwt_context(context):
user = users.get(user_id=context["userId"], tenant_id=context["tenantId"])
if user is None:
return None
return {
"tenantId": context["tenantId"],
"userId": context["userId"],
**user
}
def generate_jwt(id, tenant_id, iat, aud, exp=None):
token = jwt.encode(
payload={
"userId": id,
"tenantId": tenant_id,
"exp": iat // 1000 + int(environ["jwt_exp_delta_seconds"]) + TimeUTC.get_utc_offset() // 1000 \
if exp is None else exp,
"iss": environ["jwt_issuer"],
"iat": iat // 1000,
"aud": aud
},
key=environ["jwt_secret"],
algorithm=environ["jwt_algorithm"]
)
return token.decode("utf-8")
def api_key_authorizer(token):
t = tenants.get_by_api_key(token)
if t is not None:
t["createdAt"] = TimeUTC.datetime_to_timestamp(t["createdAt"])
return t
| 28.047619 | 107 | 0.601585 | from chalicelib.utils.helper import environ
import jwt
from chalicelib.utils import helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.core import tenants
from chalicelib.core import users
def jwt_authorizer(token):
token = token.split(" ")
if len(token) != 2 or token[0].lower() != "bearer":
return None
try:
payload = jwt.decode(
token[1],
environ["jwt_secret"],
algorithms=environ["jwt_algorithm"],
audience=[f"plugin:{helper.get_stage_name()}", f"front:{helper.get_stage_name()}"]
)
except jwt.ExpiredSignatureError:
print("! JWT Expired signature")
return None
except BaseException as e:
print("! JWT Base Exception")
return None
return payload
def jwt_context(context):
user = users.get(user_id=context["userId"], tenant_id=context["tenantId"])
if user is None:
return None
return {
"tenantId": context["tenantId"],
"userId": context["userId"],
**user
}
def generate_jwt(id, tenant_id, iat, aud, exp=None):
token = jwt.encode(
payload={
"userId": id,
"tenantId": tenant_id,
"exp": iat // 1000 + int(environ["jwt_exp_delta_seconds"]) + TimeUTC.get_utc_offset() // 1000 \
if exp is None else exp,
"iss": environ["jwt_issuer"],
"iat": iat // 1000,
"aud": aud
},
key=environ["jwt_secret"],
algorithm=environ["jwt_algorithm"]
)
return token.decode("utf-8")
def api_key_authorizer(token):
t = tenants.get_by_api_key(token)
if t is not None:
t["createdAt"] = TimeUTC.datetime_to_timestamp(t["createdAt"])
return t
| true | true |
f7f511afa62f0c28a2ccdcf23413a24fb55f76f9 | 7,307 | py | Python | adwords_video_csv.py | Gulshan016/vogon | ef2048c57d43fdfb1ac5af0ad7b4a23bf16cb1c4 | [
"Apache-2.0"
] | 98 | 2015-01-20T05:57:01.000Z | 2022-01-04T12:03:39.000Z | adwords_video_csv.py | Gulshan016/vogon | ef2048c57d43fdfb1ac5af0ad7b4a23bf16cb1c4 | [
"Apache-2.0"
] | 13 | 2016-08-28T05:58:59.000Z | 2021-01-14T10:55:01.000Z | adwords_video_csv.py | Gulshan016/vogon | ef2048c57d43fdfb1ac5af0ad7b4a23bf16cb1c4 | [
"Apache-2.0"
] | 57 | 2015-01-24T09:23:34.000Z | 2021-12-09T06:18:27.000Z | # vim: set fileencoding=utf-8 :
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
SECTION_TYPE_HEADERS = {
'campaign': "Input campaigns here",
'ad': "Input ads here",
'targeting_group': "Input targeting groups here",
'campaign_target': "Input targets here (campaign settings)",
'target': "Input targets here"
}
CSV_COLUMNS = 15
class AwvCsv():
def __init__(self, campaigns, ads, targets):
self.sections = []
campaign_values = []
targeting_group_values = []
campaign_target_values = []
target_values = []
targeting_groups_by_campaign = {}
for c in campaigns.values():
values = {
'Action': "Add",
'Status': c['Status'],
'Campaign': c['name'],
'Budget': c['Budget'],
'Budget ID': "#n/a",
'Network': c['Network'],
'Delivery method': c['Delivery method'],
'Start date': c['Start date'],
'End date': c['End date'],
'Ad rotation': c['Ad rotation'],
'Frequency cap': c['Frequency cap'],
'Mobile bid modifier': c['Mobile bid modifier']
}
campaign_values.append(values)
# Target types here: https://support.google.com/adwords/answer/3344649?hl=en
# We can have more than one targeting per video, so iterating
i = 1;
for t in targets[c['name']]:
istring = "%03d" % i
# Creating the target group
targeting_group_name = c['name'] + " Targeting Group_" + istring
values = {
'Action': "Add",
'Status': c['Status'],
'Targeting group': targeting_group_name,
'Campaign': c['name'],
'Max CPV': c['Max CPV']
}
targeting_group_values.append(values)
# Putting the target groups in a dict so the ads can refer to it later on
campaign_targeting_groups = targeting_groups_by_campaign.setdefault(c['name'], [])
campaign_targeting_groups.append(targeting_group_name)
# Targeting comes in two flavors: campaign and target group (if none specified, then target group)
target_level = t.get('level', None)
# You can concatenate targets by separating them with a comma
target_array = t['value'].split(",")
for tgv in target_array:
if target_level is not None and target_level == 'Campaign':
values = {
'Action': "Add",
'Type': t['type'],
'Campaign target': tgv,
'Campaign': c['name']
}
campaign_target_values.append(values)
else:
values = {
'Action': "Add",
'Type': t['type'],
'Status': 'Enabled',
'Target': tgv,
'Targeting group': targeting_group_name,
'Max CPV' : t.get('max_cpv', '#n/a')
}
target_values.append(values)
i += 1
self.add_section(campaign_values, 'campaign')
self.add_section(targeting_group_values, 'targeting_group')
self.add_section(campaign_target_values, 'campaign_target')
self.add_section(target_values, 'target')
ad_values = []
for ad in ads.values():
target_groups_for_this_ad = ", ".join(targeting_groups_by_campaign[ad['Campaign']])
values = {
'Action': "Add",
'Status': campaigns.values()[0]['Status'],
'Ad': ad['name'],
'Video id': ad['Video id'],
'Thumbnail': ad['Thumbnail'],
'Headline': ad['Headline'],
'Description line one': ad['Description line one'],
'Description line two': ad['Description line two'],
'Display Url': ad['Display Url'],
'Destination Url': ad['Destination Url'],
'YouTube destination': ad['YouTube destination'],
'Showing on': ad['Showing on'],
'Companion banner': ad['Companion banner'],
'Enable ad for': target_groups_for_this_ad,
'Campaign': ad['Campaign']
}
ad_values.append(values)
self.add_section(ad_values, 'ad')
def add_section(self, values, type):
section = AwvCsvSection(values, type)
self.sections.append(section)
def get_csv(self):
retval = []
for s in self.sections:
s_csv = s.get_csv()
if s_csv is not None and len(s_csv) > 0:
retval += s_csv
for i in range(0, 2):
retval.append(pad_line([]))
return retval
def write_to_file(self, file_name):
with open(file_name, 'w') as f:
csvwriter = csv.writer(
f, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerows(self.get_csv())
class AwvCsvSection():
def __init__(self, values, section_type, locale='en_US'):
"""Create a new AdWords for Video CSV section.
Arguments:
values -- array of dictionaries, each dict represents one line in the CSV. the
keys of the first values are used for column headers.
section_type -- the type of entity represented in the section, such as ad and
campaign.
locale -- ISO 639 language code.
"""
self.values = values
self.section_type = section_type
self.locale = locale
def get_csv(self):
retval = []
if len(self.values) > 0:
retval.append(pad_line([SECTION_TYPE_HEADERS[self.section_type],
"locale=%s" % (self.locale)]))
column_header = self.values[0].keys()
retval.append(pad_line([c.encode('utf-8') for c in column_header]))
for line in self.values:
retval.append(pad_line([c.encode('utf-8') for c in line.values()]))
return retval
def pad_line(arr):
return arr + ([None] * (CSV_COLUMNS - len(arr)))
| 40.594444 | 114 | 0.517449 |
import csv
SECTION_TYPE_HEADERS = {
'campaign': "Input campaigns here",
'ad': "Input ads here",
'targeting_group': "Input targeting groups here",
'campaign_target': "Input targets here (campaign settings)",
'target': "Input targets here"
}
CSV_COLUMNS = 15
class AwvCsv():
def __init__(self, campaigns, ads, targets):
self.sections = []
campaign_values = []
targeting_group_values = []
campaign_target_values = []
target_values = []
targeting_groups_by_campaign = {}
for c in campaigns.values():
values = {
'Action': "Add",
'Status': c['Status'],
'Campaign': c['name'],
'Budget': c['Budget'],
'Budget ID': "#n/a",
'Network': c['Network'],
'Delivery method': c['Delivery method'],
'Start date': c['Start date'],
'End date': c['End date'],
'Ad rotation': c['Ad rotation'],
'Frequency cap': c['Frequency cap'],
'Mobile bid modifier': c['Mobile bid modifier']
}
campaign_values.append(values)
i = 1;
for t in targets[c['name']]:
istring = "%03d" % i
targeting_group_name = c['name'] + " Targeting Group_" + istring
values = {
'Action': "Add",
'Status': c['Status'],
'Targeting group': targeting_group_name,
'Campaign': c['name'],
'Max CPV': c['Max CPV']
}
targeting_group_values.append(values)
campaign_targeting_groups = targeting_groups_by_campaign.setdefault(c['name'], [])
campaign_targeting_groups.append(targeting_group_name)
target_level = t.get('level', None)
target_array = t['value'].split(",")
for tgv in target_array:
if target_level is not None and target_level == 'Campaign':
values = {
'Action': "Add",
'Type': t['type'],
'Campaign target': tgv,
'Campaign': c['name']
}
campaign_target_values.append(values)
else:
values = {
'Action': "Add",
'Type': t['type'],
'Status': 'Enabled',
'Target': tgv,
'Targeting group': targeting_group_name,
'Max CPV' : t.get('max_cpv', '#n/a')
}
target_values.append(values)
i += 1
self.add_section(campaign_values, 'campaign')
self.add_section(targeting_group_values, 'targeting_group')
self.add_section(campaign_target_values, 'campaign_target')
self.add_section(target_values, 'target')
ad_values = []
for ad in ads.values():
target_groups_for_this_ad = ", ".join(targeting_groups_by_campaign[ad['Campaign']])
values = {
'Action': "Add",
'Status': campaigns.values()[0]['Status'],
'Ad': ad['name'],
'Video id': ad['Video id'],
'Thumbnail': ad['Thumbnail'],
'Headline': ad['Headline'],
'Description line one': ad['Description line one'],
'Description line two': ad['Description line two'],
'Display Url': ad['Display Url'],
'Destination Url': ad['Destination Url'],
'YouTube destination': ad['YouTube destination'],
'Showing on': ad['Showing on'],
'Companion banner': ad['Companion banner'],
'Enable ad for': target_groups_for_this_ad,
'Campaign': ad['Campaign']
}
ad_values.append(values)
self.add_section(ad_values, 'ad')
def add_section(self, values, type):
section = AwvCsvSection(values, type)
self.sections.append(section)
def get_csv(self):
retval = []
for s in self.sections:
s_csv = s.get_csv()
if s_csv is not None and len(s_csv) > 0:
retval += s_csv
for i in range(0, 2):
retval.append(pad_line([]))
return retval
def write_to_file(self, file_name):
with open(file_name, 'w') as f:
csvwriter = csv.writer(
f, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerows(self.get_csv())
class AwvCsvSection():
def __init__(self, values, section_type, locale='en_US'):
self.values = values
self.section_type = section_type
self.locale = locale
def get_csv(self):
retval = []
if len(self.values) > 0:
retval.append(pad_line([SECTION_TYPE_HEADERS[self.section_type],
"locale=%s" % (self.locale)]))
column_header = self.values[0].keys()
retval.append(pad_line([c.encode('utf-8') for c in column_header]))
for line in self.values:
retval.append(pad_line([c.encode('utf-8') for c in line.values()]))
return retval
def pad_line(arr):
return arr + ([None] * (CSV_COLUMNS - len(arr)))
| true | true |
f7f511bd961d8d0f7c4276bbddbfe33b8b7bed08 | 1,375 | py | Python | api/migrations/0009_auto_20180607_1905.py | BehindLoader/bandcamp-parser | bb1d2278d8275bd29888ce9a4fd5627400543cd0 | [
"MIT"
] | null | null | null | api/migrations/0009_auto_20180607_1905.py | BehindLoader/bandcamp-parser | bb1d2278d8275bd29888ce9a4fd5627400543cd0 | [
"MIT"
] | null | null | null | api/migrations/0009_auto_20180607_1905.py | BehindLoader/bandcamp-parser | bb1d2278d8275bd29888ce9a4fd5627400543cd0 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-06-07 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_sysactions'),
]
operations = [
migrations.CreateModel(
name='SysQueue',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('model', models.CharField(choices=[('TAG', 'Теги'), ('RELEASE', 'Релиз'), ('ARTIST', 'Исполнитель'), ('TRACK', 'Композиция')], max_length=128, verbose_name='Модель')),
('act', models.CharField(choices=[('ADD', 'Добавление'), ('EDIT', 'Редактирование'), ('DELETE', 'Удаление')], max_length=128, verbose_name='Действие')),
('value', models.CharField(default='{}', max_length=128, verbose_name='Значение (Опционально)')),
('scheduled', models.DateTimeField(null=True, verbose_name='Назначено на')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Создано')),
],
options={
'verbose_name': 'Участник очереди',
'verbose_name_plural': 'Участники очереди',
},
),
migrations.AlterModelOptions(
name='sysactions',
options={'verbose_name': 'Действие', 'verbose_name_plural': 'Действия'},
),
]
| 41.666667 | 184 | 0.576727 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_sysactions'),
]
operations = [
migrations.CreateModel(
name='SysQueue',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('model', models.CharField(choices=[('TAG', 'Теги'), ('RELEASE', 'Релиз'), ('ARTIST', 'Исполнитель'), ('TRACK', 'Композиция')], max_length=128, verbose_name='Модель')),
('act', models.CharField(choices=[('ADD', 'Добавление'), ('EDIT', 'Редактирование'), ('DELETE', 'Удаление')], max_length=128, verbose_name='Действие')),
('value', models.CharField(default='{}', max_length=128, verbose_name='Значение (Опционально)')),
('scheduled', models.DateTimeField(null=True, verbose_name='Назначено на')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Создано')),
],
options={
'verbose_name': 'Участник очереди',
'verbose_name_plural': 'Участники очереди',
},
),
migrations.AlterModelOptions(
name='sysactions',
options={'verbose_name': 'Действие', 'verbose_name_plural': 'Действия'},
),
]
| true | true |
f7f51238767faf70ff1946429ce87c7f838b4fce | 304 | py | Python | mmis/config/docs.py | KofiDark/frappe_mmis | 977ebe7851819b2b710ace949ed72809604bad92 | [
"MIT"
] | null | null | null | mmis/config/docs.py | KofiDark/frappe_mmis | 977ebe7851819b2b710ace949ed72809604bad92 | [
"MIT"
] | null | null | null | mmis/config/docs.py | KofiDark/frappe_mmis | 977ebe7851819b2b710ace949ed72809604bad92 | [
"MIT"
] | null | null | null | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/mmis"
# docs_base_url = "https://[org_name].github.io/mmis"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "MMIS"
| 25.333333 | 68 | 0.713816 |
def get_context(context):
context.brand_html = "MMIS"
| true | true |
f7f5129fba09e1a8c6193213121ed1bbcc7951df | 3,310 | py | Python | corepy/arch/spu/isa/__init__.py | matthiaskramm/corepy | b2aad4e86adca10420e825fb65dcbd031cf44bb1 | [
"BSD-3-Clause"
] | 8 | 2016-02-20T03:52:58.000Z | 2022-01-24T15:04:14.000Z | corepy/arch/spu/isa/__init__.py | matthiaskramm/corepy | b2aad4e86adca10420e825fb65dcbd031cf44bb1 | [
"BSD-3-Clause"
] | null | null | null | corepy/arch/spu/isa/__init__.py | matthiaskramm/corepy | b2aad4e86adca10420e825fb65dcbd031cf44bb1 | [
"BSD-3-Clause"
] | 6 | 2015-12-11T05:21:15.000Z | 2020-12-11T10:59:15.000Z | # Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from spu_isa import *
# Nothing to see here, move along... ;)
__active_code = None
def set_active_code(code):
global __active_code
if __active_code is not None:
__active_code.set_active_callback(None)
__active_code = code
if code is not None:
code.set_active_callback(set_active_code)
return
# Property version
def __get_active_code(self):
global __active_code
return __active_code
# Free function version
def get_active_code():
global __active_code
return __active_code
# Build the instructions
#for inst in spu_isa.SPU_ISA:
# name = inst[0]
# machine_inst = getattr(machine, name)
# asm_order = inst[1]['asm']
# members = {}
# for key in inst[1].keys():
# members[key] = inst[1][key]
# members['asm_order'] = members['asm']
# members['machine_inst'] = machine_inst
# members['active_code'] = property(__get_active_code)
# globals()[inst[0]] = type(name, (spe.Instruction,), members)
for l in locals().values():
if isinstance(l, type):
if issubclass(l, Instruction) or issubclass(l, DispatchInstruction):
l.active_code = property(__get_active_code)
| 42.435897 | 80 | 0.610876 |
from spu_isa import *
__active_code = None
def set_active_code(code):
global __active_code
if __active_code is not None:
__active_code.set_active_callback(None)
__active_code = code
if code is not None:
code.set_active_callback(set_active_code)
return
def __get_active_code(self):
global __active_code
return __active_code
def get_active_code():
global __active_code
return __active_code
for l in locals().values():
if isinstance(l, type):
if issubclass(l, Instruction) or issubclass(l, DispatchInstruction):
l.active_code = property(__get_active_code)
| true | true |
f7f5138073aa479e4a455b881e1b672c3a2c1fb9 | 815 | py | Python | modules/signatures/windows/rat_swrort.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 24 | 2021-06-21T07:35:37.000Z | 2022-03-22T03:33:59.000Z | modules/signatures/windows/rat_swrort.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 3 | 2021-07-01T08:09:05.000Z | 2022-01-28T03:38:36.000Z | modules/signatures/windows/rat_swrort.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 6 | 2021-06-22T05:32:57.000Z | 2022-02-11T02:05:45.000Z | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# This signature was contributed by RedSocks - http://redsocks.nl
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Swrort(Signature):
name = "rat_swrort"
description = "Creates known Swrort Backdoor files, registry keys and/or mutexes"
severity = 3
categories = ["rat"]
families = ["swrort"]
authors = ["RedSocks"]
minimum = "2.0"
files_re = [
".*torchat"
]
def on_complete(self):
for indicator in self.files_re:
match = self.check_file(pattern=indicator, regex=True)
if match:
self.mark_ioc("file", match)
return self.has_marks()
| 29.107143 | 85 | 0.651534 |
from lib.cuckoo.common.abstracts import Signature
class Swrort(Signature):
name = "rat_swrort"
description = "Creates known Swrort Backdoor files, registry keys and/or mutexes"
severity = 3
categories = ["rat"]
families = ["swrort"]
authors = ["RedSocks"]
minimum = "2.0"
files_re = [
".*torchat"
]
def on_complete(self):
for indicator in self.files_re:
match = self.check_file(pattern=indicator, regex=True)
if match:
self.mark_ioc("file", match)
return self.has_marks()
| true | true |
f7f5139568ceadf10cb0023def6924f7a7da4377 | 5,732 | py | Python | docs/conf.py | bcanyelles/django-object-authority | a3c5a3f511a04251af9a5bc8631a9f0a9fe87ca0 | [
"BSD-2-Clause"
] | 5 | 2017-06-16T15:32:46.000Z | 2018-02-20T14:41:08.000Z | docs/conf.py | APSL/django-object-authority | a3c5a3f511a04251af9a5bc8631a9f0a9fe87ca0 | [
"BSD-2-Clause"
] | null | null | null | docs/conf.py | APSL/django-object-authority | a3c5a3f511a04251af9a5bc8631a9f0a9fe87ca0 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# django-object-authority documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 1 11:27:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
def get_version():
"""
Return package version as listed in `__version__` in `init.py` of the source package.
"""
import os
import re
import inspect
try:
docs_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())).rsplit('/', 1)[0])
init_py = open(os.path.join(docs_path, 'django_object_authority', '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
except Exception:
return 'latest'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-object-authority'
copyright = u'2017, Tomeu Canyelles'
author = u'Tomeu Canyelles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'navigation_depth': 3,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-object-authoritydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-object-authority.tex', u'django-object-authority Documentation',
u'Tomeu Canyelles', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-object-authority', u'django-object-authority Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-object-authority', u'django-object-authority Documentation',
author, 'django-object-authority', 'One line description of project.',
'Miscellaneous'),
]
| 31.152174 | 111 | 0.682833 |
def get_version():
import os
import re
import inspect
try:
docs_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())).rsplit('/', 1)[0])
init_py = open(os.path.join(docs_path, 'django_object_authority', '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
except Exception:
return 'latest'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-object-authority'
copyright = u'2017, Tomeu Canyelles'
author = u'Tomeu Canyelles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'navigation_depth': 3,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-object-authoritydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-object-authority.tex', u'django-object-authority Documentation',
u'Tomeu Canyelles', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-object-authority', u'django-object-authority Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-object-authority', u'django-object-authority Documentation',
author, 'django-object-authority', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
f7f514de588eca23ef71fa4e94b0201402438ae0 | 47,843 | py | Python | core/controllers/admin.py | Rijuta-s/oppia | f4f3cd71f90285abee3b0f74062586aaafadce7d | [
"Apache-2.0"
] | null | null | null | core/controllers/admin.py | Rijuta-s/oppia | f4f3cd71f90285abee3b0f74062586aaafadce7d | [
"Apache-2.0"
] | 3 | 2021-02-13T08:35:34.000Z | 2021-05-18T12:17:06.000Z | core/controllers/admin.py | Rijuta-s/oppia | f4f3cd71f90285abee3b0f74062586aaafadce7d | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the admin view."""
from __future__ import annotations
import io
import logging
import random
from core import feconf
from core import utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.controllers import domain_objects_validator as validation_method
from core.domain import auth_services
from core.domain import blog_services
from core.domain import collection_services
from core.domain import config_domain
from core.domain import config_services
from core.domain import email_manager
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import opportunity_services
from core.domain import platform_feature_services as feature_services
from core.domain import platform_parameter_domain as parameter_domain
from core.domain import question_domain
from core.domain import question_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import role_services
from core.domain import search_services
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.domain import wipeout_service
class AdminPage(base.BaseHandler):
"""Admin page shown in the App Engine admin console."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
"""Handles GET requests."""
self.render_template('admin-page.mainpage.html')
class AdminHandler(base.BaseHandler):
"""Handler for the admin page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {},
'POST': {
'action': {
'schema': {
'type': 'basestring',
'choices': [
'reload_exploration', 'reload_collection',
'generate_dummy_explorations', 'clear_search_index',
'generate_dummy_new_structures_data',
'generate_dummy_new_skill_data',
'save_config_properties', 'revert_config_property',
'upload_topic_similarities',
'regenerate_topic_related_opportunities',
'update_feature_flag_rules'
]
},
# TODO(#13331): Remove default_value when it is confirmed that,
# for clearing the search indices of exploration & collection
# 'action' field must be provided in the payload.
'default_value': None
},
'exploration_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'collection_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'num_dummy_exps_to_generate': {
'schema': {
'type': 'int'
},
'default_value': None
},
'num_dummy_exps_to_publish': {
'schema': {
'type': 'int'
},
'default_value': None
},
'new_config_property_values': {
'schema': {
'type': 'object_dict',
'validation_method': (
validation_method.validate_new_config_property_values)
},
'default_value': None
},
'config_property_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'data': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'topic_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'feature_name': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'commit_message': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'new_rules': {
'schema': {
'type': 'list',
'items': {
'type': 'object_dict',
'object_class': parameter_domain.PlatformParameterRule
}
},
'default_value': None
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
"""Handles GET requests."""
demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys())
topic_summaries = topic_fetchers.get_all_topic_summaries()
topic_summary_dicts = [
summary.to_dict() for summary in topic_summaries]
feature_flag_dicts = feature_services.get_all_feature_flag_dicts()
config_properties = config_domain.Registry.get_config_property_schemas()
# Removes promo-bar related configs as promo-bar is handlded by
# release coordinators in /release-coordinator page.
del config_properties['promo_bar_enabled']
del config_properties['promo_bar_message']
# Remove blog related configs as they will be handled by 'blog admins'
# on blog admin page.
del config_properties['max_number_of_tags_assigned_to_blog_post']
del config_properties['list_of_default_tags_for_blog_post']
self.render_json({
'config_properties': config_properties,
'demo_collections': sorted(feconf.DEMO_COLLECTIONS.items()),
'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.items()),
'demo_exploration_ids': demo_exploration_ids,
'updatable_roles': role_services.UPDATABLE_ROLES,
'viewable_roles': role_services.VIEWABLE_ROLES,
'human_readable_roles': role_services.HUMAN_READABLE_ROLES,
'role_to_actions': role_services.get_role_actions(),
'topic_summaries': topic_summary_dicts,
'feature_flags': feature_flag_dicts,
})
@acl_decorators.can_access_admin_page
def post(self):
"""Handles POST requests."""
action = self.normalized_payload.get('action')
try:
result = {}
if action == 'reload_exploration':
exploration_id = self.normalized_payload.get('exploration_id')
self._reload_exploration(exploration_id)
elif action == 'reload_collection':
collection_id = self.normalized_payload.get('collection_id')
self._reload_collection(collection_id)
elif action == 'generate_dummy_explorations':
num_dummy_exps_to_generate = self.normalized_payload.get(
'num_dummy_exps_to_generate')
num_dummy_exps_to_publish = self.normalized_payload.get(
'num_dummy_exps_to_publish')
if num_dummy_exps_to_generate < num_dummy_exps_to_publish:
raise self.InvalidInputException(
'Generate count cannot be less than publish count')
else:
self._generate_dummy_explorations(
num_dummy_exps_to_generate, num_dummy_exps_to_publish)
elif action == 'clear_search_index':
search_services.clear_collection_search_index()
search_services.clear_exploration_search_index()
elif action == 'generate_dummy_new_structures_data':
self._load_dummy_new_structures_data()
elif action == 'generate_dummy_new_skill_data':
self._generate_dummy_skill_and_questions()
elif action == 'save_config_properties':
new_config_property_values = self.normalized_payload.get(
'new_config_property_values')
logging.info(
'[ADMIN] %s saved config property values: %s' %
(self.user_id, new_config_property_values))
for (name, value) in new_config_property_values.items():
config_services.set_property(self.user_id, name, value)
elif action == 'revert_config_property':
config_property_id = self.normalized_payload.get(
'config_property_id')
logging.info(
'[ADMIN] %s reverted config property: %s' %
(self.user_id, config_property_id))
config_services.revert_property(
self.user_id, config_property_id)
elif action == 'upload_topic_similarities':
data = self.normalized_payload.get('data')
recommendations_services.update_topic_similarities(data)
elif action == 'regenerate_topic_related_opportunities':
topic_id = self.normalized_payload.get('topic_id')
opportunities_count = (
opportunity_services
.regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=True))
result = {
'opportunities_count': opportunities_count
}
elif action == 'update_feature_flag_rules':
feature_name = self.normalized_payload.get('feature_name')
new_rule_dicts = self.normalized_payload.get('new_rules')
commit_message = self.normalized_payload.get('commit_message')
try:
feature_services.update_feature_flag_rules(
feature_name, self.user_id, commit_message,
new_rule_dicts)
except (
utils.ValidationError,
feature_services.FeatureFlagNotFoundException) as e:
raise self.InvalidInputException(e)
logging.info(
'[ADMIN] %s updated feature %s with new rules: '
'%s.' % (self.user_id, feature_name, new_rule_dicts))
self.render_json(result)
except Exception as e:
logging.exception('[ADMIN] %s', e)
self.render_json({'error': str(e)})
raise e
def _reload_exploration(self, exploration_id):
"""Reloads the exploration in dev_mode corresponding to the given
exploration id.
Args:
exploration_id: str. The exploration id.
Raises:
Exception. Cannot reload an exploration in production.
"""
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded exploration %s' %
(self.user_id, exploration_id))
exp_services.load_demo(exploration_id)
rights_manager.release_ownership_of_exploration(
user_services.get_system_user(), exploration_id)
else:
raise Exception('Cannot reload an exploration in production.')
def _create_dummy_question(
self, question_id, question_content, linked_skill_ids):
"""Creates a dummy question object with the given question ID.
Args:
question_id: str. The ID of the question to be created.
question_content: str. The question content.
linked_skill_ids: list(str). The IDs of the skills to which the
question is linked to.
Returns:
Question. The dummy question with given values.
"""
state = state_domain.State.create_default_state(
'ABC', is_initial_state=True)
state.update_interaction_id('TextInput')
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
})
state.update_next_content_id_index(1)
state.update_linked_skill_id(None)
state.update_content(state_domain.SubtitledHtml('1', question_content))
recorded_voiceovers = state_domain.RecordedVoiceovers({})
written_translations = state_domain.WrittenTranslations({})
recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0')
recorded_voiceovers.add_content_id_for_voiceover('1')
recorded_voiceovers.add_content_id_for_voiceover('default_outcome')
written_translations.add_content_id_for_translation('ca_placeholder_0')
written_translations.add_content_id_for_translation('1')
written_translations.add_content_id_for_translation('default_outcome')
state.update_recorded_voiceovers(recorded_voiceovers)
state.update_written_translations(written_translations)
solution = state_domain.Solution(
'TextInput', False, 'Solution', state_domain.SubtitledHtml(
'solution', '<p>This is a solution.</p>'))
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')
)
]
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_default_outcome(
state_domain.Outcome(
None, state_domain.SubtitledHtml(
'feedback_id', '<p>Dummy Feedback</p>'),
True, [], None, None
)
)
question = question_domain.Question(
question_id, state,
feconf.CURRENT_STATE_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, [])
return question
def _create_dummy_skill(self, skill_id, skill_description, explanation):
"""Creates a dummy skill object with the given values.
Args:
skill_id: str. The ID of the skill to be created.
skill_description: str. The description of the skill.
explanation: str. The review material for the skill.
Returns:
Skill. The dummy skill with given values.
"""
rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])]
skill = skill_domain.Skill.create_default_skill(
skill_id, skill_description, rubrics)
skill.update_explanation(state_domain.SubtitledHtml('1', explanation))
return skill
def _load_dummy_new_structures_data(self):
"""Loads the database with two topics (one of which is empty), a story
and three skills in the topic (two of them in a subtopic) and a question
attached to each skill.
Raises:
Exception. Cannot load new structures data in production mode.
Exception. User does not have enough rights to generate data.
"""
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
topic_id_1 = topic_fetchers.get_new_topic_id()
topic_id_2 = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
skill_id_1 = skill_services.get_new_skill_id()
skill_id_2 = skill_services.get_new_skill_id()
skill_id_3 = skill_services.get_new_skill_id()
question_id_1 = question_services.get_new_question_id()
question_id_2 = question_services.get_new_question_id()
question_id_3 = question_services.get_new_question_id()
skill_1 = self._create_dummy_skill(
skill_id_1, 'Dummy Skill 1', '<p>Dummy Explanation 1</p>')
skill_2 = self._create_dummy_skill(
skill_id_2, 'Dummy Skill 2', '<p>Dummy Explanation 2</p>')
skill_3 = self._create_dummy_skill(
skill_id_3, 'Dummy Skill 3', '<p>Dummy Explanation 3</p>')
question_1 = self._create_dummy_question(
question_id_1, 'Question 1', [skill_id_1])
question_2 = self._create_dummy_question(
question_id_2, 'Question 2', [skill_id_2])
question_3 = self._create_dummy_question(
question_id_3, 'Question 3', [skill_id_3])
question_services.add_question(self.user_id, question_1)
question_services.add_question(self.user_id, question_2)
question_services.add_question(self.user_id, question_3)
question_services.create_new_question_skill_link(
self.user_id, question_id_1, skill_id_1, 0.3)
question_services.create_new_question_skill_link(
self.user_id, question_id_2, skill_id_2, 0.5)
question_services.create_new_question_skill_link(
self.user_id, question_id_3, skill_id_3, 0.7)
topic_1 = topic_domain.Topic.create_default_topic(
topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description')
topic_2 = topic_domain.Topic.create_default_topic(
topic_id_2, 'Empty Topic', 'empty-topic', 'description')
topic_1.add_canonical_story(story_id)
topic_1.add_uncategorized_skill_id(skill_id_1)
topic_1.add_uncategorized_skill_id(skill_id_2)
topic_1.add_uncategorized_skill_id(skill_id_3)
topic_1.add_subtopic(1, 'Dummy Subtopic Title')
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2)
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3)
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
1, topic_id_1))
# These explorations were chosen since they pass the validations
# for published stories.
self._reload_exploration('15')
self._reload_exploration('25')
self._reload_exploration('13')
exp_services.update_exploration(
self.user_id, '15', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '25', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '13', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
story = story_domain.Story.create_default_story(
story_id, 'Help Jaime win the Arcade', 'Description',
topic_id_1, 'help-jamie-win-arcade')
story_node_dicts = [{
'exp_id': '15',
'title': 'What are the place values?',
'description': 'Jaime learns the place value of each digit ' +
'in a big number.'
}, {
'exp_id': '25',
'title': 'Finding the value of a number',
'description': 'Jaime understands the value of his ' +
'arcade score.'
}, {
'exp_id': '13',
'title': 'Comparing Numbers',
'description': 'Jaime learns if a number is smaller or ' +
'greater than another number.'
}]
def generate_dummy_story_nodes(node_id, exp_id, title, description):
"""Generates and connects sequential story nodes.
Args:
node_id: int. The node id.
exp_id: str. The exploration id.
title: str. The title of the story node.
description: str. The description of the story node.
"""
story.add_node(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
title)
story.update_node_description(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
description)
story.update_node_exploration_id(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id), exp_id)
if node_id != len(story_node_dicts):
story.update_node_destination_node_ids(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
['%s%d' % (story_domain.NODE_ID_PREFIX, node_id + 1)])
exp_services.update_exploration(
self.user_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'category',
'new_value': 'Astronomy'
})], 'Change category')
for i, story_node_dict in enumerate(story_node_dicts):
generate_dummy_story_nodes(i + 1, **story_node_dict)
skill_services.save_new_skill(self.user_id, skill_1)
skill_services.save_new_skill(self.user_id, skill_2)
skill_services.save_new_skill(self.user_id, skill_3)
story_services.save_new_story(self.user_id, story)
topic_services.save_new_topic(self.user_id, topic_1)
topic_services.save_new_topic(self.user_id, topic_2)
subtopic_page_services.save_subtopic_page(
self.user_id, subtopic_page, 'Added subtopic',
[topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'subtopic_id': 1,
'title': 'Dummy Subtopic Title'
})]
)
# Generates translation opportunities for the Contributor Dashboard.
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
opportunity_services.add_new_exploration_opportunities(
story_id, exp_ids_in_story)
topic_services.publish_story(topic_id_1, story_id, self.user_id)
else:
raise Exception('Cannot load new structures data in production.')
def _generate_dummy_skill_and_questions(self):
"""Generate and loads the database with a skill and 15 questions
linked to the skill.
Raises:
Exception. Cannot load new structures data in production mode.
Exception. User does not have enough rights to generate data.
"""
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
skill_id = skill_services.get_new_skill_id()
skill_name = 'Dummy Skill %s' % str(random.getrandbits(32))
skill = self._create_dummy_skill(
skill_id, skill_name, '<p>Dummy Explanation 1</p>')
skill_services.save_new_skill(self.user_id, skill)
for i in range(15):
question_id = question_services.get_new_question_id()
question_name = 'Question number %s %s' % (str(i), skill_name)
question = self._create_dummy_question(
question_id, question_name, [skill_id])
question_services.add_question(self.user_id, question)
question_difficulty = list(
constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values())
random_difficulty = random.choice(question_difficulty)
question_services.create_new_question_skill_link(
self.user_id, question_id, skill_id, random_difficulty)
else:
raise Exception('Cannot generate dummy skills in production.')
def _reload_collection(self, collection_id):
"""Reloads the collection in dev_mode corresponding to the given
collection id.
Args:
collection_id: str. The collection id.
Raises:
Exception. Cannot reload a collection in production.
"""
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded collection %s' %
(self.user_id, collection_id))
collection_services.load_demo(collection_id)
rights_manager.release_ownership_of_collection(
user_services.get_system_user(), collection_id)
else:
raise Exception('Cannot reload a collection in production.')
def _generate_dummy_explorations(
self, num_dummy_exps_to_generate, num_dummy_exps_to_publish):
"""Generates and publishes the given number of dummy explorations.
Args:
num_dummy_exps_to_generate: int. Count of dummy explorations to
be generated.
num_dummy_exps_to_publish: int. Count of explorations to
be published.
Raises:
Exception. Environment is not DEVMODE.
"""
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s generated %s number of dummy explorations' %
(self.user_id, num_dummy_exps_to_generate))
possible_titles = ['Hulk Neuroscience', 'Quantum Starks',
'Wonder Anatomy',
'Elvish, language of "Lord of the Rings',
'The Science of Superheroes']
exploration_ids_to_publish = []
for i in range(num_dummy_exps_to_generate):
title = random.choice(possible_titles)
category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES)
new_exploration_id = exp_fetchers.get_new_exploration_id()
exploration = exp_domain.Exploration.create_default_exploration(
new_exploration_id, title=title, category=category,
objective='Dummy Objective')
exp_services.save_new_exploration(self.user_id, exploration)
if i <= num_dummy_exps_to_publish - 1:
exploration_ids_to_publish.append(new_exploration_id)
rights_manager.publish_exploration(
self.user, new_exploration_id)
exp_services.index_explorations_given_ids(
exploration_ids_to_publish)
else:
raise Exception('Cannot generate dummy explorations in production.')
class AdminRoleHandler(base.BaseHandler):
"""Handler for roles tab of admin page. Used to view and update roles."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'filter_criterion': {
'schema': {
'type': 'basestring',
'choices': [
feconf.USER_FILTER_CRITERION_ROLE,
feconf.USER_FILTER_CRITERION_USERNAME
]
}
},
'role': {
'schema': {
'type': 'basestring',
'choices': role_services.VIEWABLE_ROLES
},
'default_value': None
},
'username': {
'schema': {
'type': 'basestring'
},
'default_value': None
}
},
'PUT': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
filter_criterion = self.normalized_request.get(
'filter_criterion')
if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE:
role = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_ROLE)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE,
role=role)
self.render_json({
'usernames': user_services.get_usernames_by_role(role)
})
elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME:
username = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_USERNAME)
user_id = user_services.get_user_id_from_username(username)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME,
username=username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_settings = user_services.get_user_settings(user_id)
user_roles = user_settings.roles
managed_topic_ids = []
if feconf.ROLE_ID_TOPIC_MANAGER in user_roles:
managed_topic_ids = [
rights.id for rights in
topic_fetchers.get_topic_rights_with_user(user_id)]
user_roles_dict = {
'roles': user_roles,
'managed_topic_ids': managed_topic_ids,
'banned': user_settings.banned
}
self.render_json(user_roles_dict)
@acl_decorators.can_access_admin_page
def put(self):
username = self.payload.get('username')
role = self.payload.get('role')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
# The Topic manager role assignment is handled via
# TopicManagerRoleHandler.
raise self.InvalidInputException(
'Unsupported role for this handler.')
user_services.add_user_role(user_settings.user_id, role)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.request.get('username')
role = self.request.get('role')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.remove_user_role(user_id, role)
self.render_json({})
class TopicManagerRoleHandler(base.BaseHandler):
"""Handler to assign or deassigning manager to a topic."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
},
'action': {
'schema': {
'type': 'basestring',
'choices': ['assign', 'deassign']
}
},
'topic_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
action = self.normalized_payload.get('action')
topic_id = self.normalized_payload.get('topic_id')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_id = user_settings.user_id
if action == 'assign':
if not feconf.ROLE_ID_TOPIC_MANAGER in user_settings.roles:
user_services.add_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
topic_manager = user_services.get_user_actions_info(user_id)
topic_services.assign_role(
user_services.get_system_user(),
topic_manager, topic_domain.ROLE_MANAGER, topic_id)
elif action == 'deassign':
topic_services.deassign_manager_role_from_topic(
user_services.get_system_user(), user_id, topic_id)
if not topic_fetchers.get_topic_rights_with_user(user_id):
user_services.remove_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
self.render_json({})
class BannedUsersHandler(base.BaseHandler):
"""Handler to ban and unban users."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.mark_user_banned(user_id)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.normalized_request.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_services.unmark_user_banned(user_id)
self.render_json({})
class AdminSuperAdminPrivilegesHandler(base.BaseHandler):
"""Handler for granting a user super admin privileges."""
PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException('No such user exists')
auth_services.grant_super_admin_privileges(user_id)
self.render_json(self.values)
@acl_decorators.can_access_admin_page
def delete(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_request.get('username')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException('No such user exists')
if user_settings.email == feconf.ADMIN_EMAIL_ADDRESS:
raise self.InvalidInputException(
'Cannot revoke privileges from the default super admin account')
auth_services.revoke_super_admin_privileges(user_settings.user_id)
self.render_json(self.values)
class AdminTopicsCsvFileDownloader(base.BaseHandler):
"""Retrieves topic similarity data for download."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
topic_similarities = (
recommendations_services.get_topic_similarities_as_csv()
)
# Downloadable file accepts only bytes, so we need to encode
# topic_similarities to bytes.
self.render_downloadable_file(
io.BytesIO(topic_similarities.encode('utf-8')),
'topic_similarities.csv',
'text/csv'
)
class DataExtractionQueryHandler(base.BaseHandler):
"""Handler for data extraction query."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'exp_id': {
'schema': {
'type': 'basestring'
}
},
'exp_version': {
'schema': {
'type': 'int'
}
},
'state_name': {
'schema': {
'type': 'basestring'
}
},
'num_answers': {
'schema': {
'type': 'int'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
exp_id = self.normalized_request.get('exp_id')
exp_version = self.normalized_request.get('exp_version')
exploration = exp_fetchers.get_exploration_by_id(
exp_id, strict=False, version=exp_version)
if exploration is None:
raise self.InvalidInputException(
'Entity for exploration with id %s and version %s not found.'
% (exp_id, exp_version))
state_name = self.normalized_request.get('state_name')
num_answers = self.normalized_request.get('num_answers')
if state_name not in exploration.states:
raise self.InvalidInputException(
'Exploration \'%s\' does not have \'%s\' state.'
% (exp_id, state_name))
state_answers = stats_services.get_state_answers(
exp_id, exp_version, state_name)
extracted_answers = state_answers.get_submitted_answer_dict_list()
if num_answers > 0:
extracted_answers = extracted_answers[:num_answers]
response = {
'data': extracted_answers
}
self.render_json(response)
class SendDummyMailToAdminHandler(base.BaseHandler):
"""This function handles sending test emails."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'POST': {}}
@acl_decorators.can_access_admin_page
def post(self):
username = self.username
if feconf.CAN_SEND_EMAILS:
email_manager.send_dummy_mail_to_admin(username)
self.render_json({})
else:
raise self.InvalidInputException('This app cannot send emails.')
class UpdateUsernameHandler(base.BaseHandler):
"""Handler for renaming usernames."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'old_username': {
'schema': {
'type': 'basestring'
}
},
'new_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
old_username = self.normalized_payload.get('old_username')
new_username = self.normalized_payload.get('new_username')
user_id = user_services.get_user_id_from_username(old_username)
if user_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % old_username)
if user_services.is_username_taken(new_username):
raise self.InvalidInputException('Username already taken.')
user_services.set_username(user_id, new_username)
user_services.log_username_change(
self.user_id, old_username, new_username)
self.render_json({})
class NumberOfDeletionRequestsHandler(base.BaseHandler):
"""Handler for getting the number of pending deletion requests via admin
page.
"""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
self.render_json({
'number_of_pending_deletion_models': (
wipeout_service.get_number_of_pending_deletion_requests())
})
class VerifyUserModelsDeletedHandler(base.BaseHandler):
"""Handler for getting whether any models exist for specific user ID."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'user_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
user_id = self.normalized_request.get('user_id')
user_is_deleted = wipeout_service.verify_user_deleted(
user_id, include_delete_at_end_models=True)
self.render_json({'related_models_exist': not user_is_deleted})
class DeleteUserHandler(base.BaseHandler):
"""Handler for deleting a user with specific ID."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'DELETE': {
'user_id': {
'schema': {
'type': 'basestring'
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_delete_any_user
def delete(self):
user_id = self.normalized_request.get('user_id')
username = self.normalized_request.get('username')
user_id_from_username = (
user_services.get_user_id_from_username(username))
if user_id_from_username is None:
raise self.InvalidInputException(
'The username doesn\'t belong to any user'
)
if user_id_from_username != user_id:
raise self.InvalidInputException(
'The user ID retrieved from the username and '
'the user ID provided by admin differ.'
)
wipeout_service.pre_delete_user(user_id)
self.render_json({'success': True})
class UpdateBlogPostHandler(base.BaseHandler):
"""Handler for changing author ids and published on date in
blog posts."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'blog_post_id': {
'schema': {
'type': 'basestring'
}
},
'author_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
},
'published_on': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
blog_post_id = self.normalized_payload.get('blog_post_id')
author_username = self.normalized_payload.get('author_username')
published_on = self.normalized_payload.get('published_on')
author_id = user_services.get_user_id_from_username(author_username)
if author_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % author_username)
user_actions = user_services.get_user_actions_info(author_id).actions
if role_services.ACTION_ACCESS_BLOG_DASHBOARD not in user_actions:
raise self.InvalidInputException(
'User does not have enough rights to be blog post author.')
blog_post = (
blog_services.get_blog_post_by_id(blog_post_id, strict=False))
if blog_post is None:
raise self.PageNotFoundException(
Exception(
'The blog post with the given id or url doesn\'t exist.'))
blog_services.update_blog_models_author_and_published_on_date(
blog_post_id, author_id, published_on)
self.render_json({})
| 38.770665 | 80 | 0.587212 |
from __future__ import annotations
import io
import logging
import random
from core import feconf
from core import utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.controllers import domain_objects_validator as validation_method
from core.domain import auth_services
from core.domain import blog_services
from core.domain import collection_services
from core.domain import config_domain
from core.domain import config_services
from core.domain import email_manager
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import opportunity_services
from core.domain import platform_feature_services as feature_services
from core.domain import platform_parameter_domain as parameter_domain
from core.domain import question_domain
from core.domain import question_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import role_services
from core.domain import search_services
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.domain import wipeout_service
class AdminPage(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
self.render_template('admin-page.mainpage.html')
class AdminHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {},
'POST': {
'action': {
'schema': {
'type': 'basestring',
'choices': [
'reload_exploration', 'reload_collection',
'generate_dummy_explorations', 'clear_search_index',
'generate_dummy_new_structures_data',
'generate_dummy_new_skill_data',
'save_config_properties', 'revert_config_property',
'upload_topic_similarities',
'regenerate_topic_related_opportunities',
'update_feature_flag_rules'
]
},
ult_value': None
},
'exploration_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'collection_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'num_dummy_exps_to_generate': {
'schema': {
'type': 'int'
},
'default_value': None
},
'num_dummy_exps_to_publish': {
'schema': {
'type': 'int'
},
'default_value': None
},
'new_config_property_values': {
'schema': {
'type': 'object_dict',
'validation_method': (
validation_method.validate_new_config_property_values)
},
'default_value': None
},
'config_property_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'data': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'topic_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'feature_name': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'commit_message': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'new_rules': {
'schema': {
'type': 'list',
'items': {
'type': 'object_dict',
'object_class': parameter_domain.PlatformParameterRule
}
},
'default_value': None
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys())
topic_summaries = topic_fetchers.get_all_topic_summaries()
topic_summary_dicts = [
summary.to_dict() for summary in topic_summaries]
feature_flag_dicts = feature_services.get_all_feature_flag_dicts()
config_properties = config_domain.Registry.get_config_property_schemas()
del config_properties['promo_bar_enabled']
del config_properties['promo_bar_message']
del config_properties['max_number_of_tags_assigned_to_blog_post']
del config_properties['list_of_default_tags_for_blog_post']
self.render_json({
'config_properties': config_properties,
'demo_collections': sorted(feconf.DEMO_COLLECTIONS.items()),
'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.items()),
'demo_exploration_ids': demo_exploration_ids,
'updatable_roles': role_services.UPDATABLE_ROLES,
'viewable_roles': role_services.VIEWABLE_ROLES,
'human_readable_roles': role_services.HUMAN_READABLE_ROLES,
'role_to_actions': role_services.get_role_actions(),
'topic_summaries': topic_summary_dicts,
'feature_flags': feature_flag_dicts,
})
@acl_decorators.can_access_admin_page
def post(self):
action = self.normalized_payload.get('action')
try:
result = {}
if action == 'reload_exploration':
exploration_id = self.normalized_payload.get('exploration_id')
self._reload_exploration(exploration_id)
elif action == 'reload_collection':
collection_id = self.normalized_payload.get('collection_id')
self._reload_collection(collection_id)
elif action == 'generate_dummy_explorations':
num_dummy_exps_to_generate = self.normalized_payload.get(
'num_dummy_exps_to_generate')
num_dummy_exps_to_publish = self.normalized_payload.get(
'num_dummy_exps_to_publish')
if num_dummy_exps_to_generate < num_dummy_exps_to_publish:
raise self.InvalidInputException(
'Generate count cannot be less than publish count')
else:
self._generate_dummy_explorations(
num_dummy_exps_to_generate, num_dummy_exps_to_publish)
elif action == 'clear_search_index':
search_services.clear_collection_search_index()
search_services.clear_exploration_search_index()
elif action == 'generate_dummy_new_structures_data':
self._load_dummy_new_structures_data()
elif action == 'generate_dummy_new_skill_data':
self._generate_dummy_skill_and_questions()
elif action == 'save_config_properties':
new_config_property_values = self.normalized_payload.get(
'new_config_property_values')
logging.info(
'[ADMIN] %s saved config property values: %s' %
(self.user_id, new_config_property_values))
for (name, value) in new_config_property_values.items():
config_services.set_property(self.user_id, name, value)
elif action == 'revert_config_property':
config_property_id = self.normalized_payload.get(
'config_property_id')
logging.info(
'[ADMIN] %s reverted config property: %s' %
(self.user_id, config_property_id))
config_services.revert_property(
self.user_id, config_property_id)
elif action == 'upload_topic_similarities':
data = self.normalized_payload.get('data')
recommendations_services.update_topic_similarities(data)
elif action == 'regenerate_topic_related_opportunities':
topic_id = self.normalized_payload.get('topic_id')
opportunities_count = (
opportunity_services
.regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=True))
result = {
'opportunities_count': opportunities_count
}
elif action == 'update_feature_flag_rules':
feature_name = self.normalized_payload.get('feature_name')
new_rule_dicts = self.normalized_payload.get('new_rules')
commit_message = self.normalized_payload.get('commit_message')
try:
feature_services.update_feature_flag_rules(
feature_name, self.user_id, commit_message,
new_rule_dicts)
except (
utils.ValidationError,
feature_services.FeatureFlagNotFoundException) as e:
raise self.InvalidInputException(e)
logging.info(
'[ADMIN] %s updated feature %s with new rules: '
'%s.' % (self.user_id, feature_name, new_rule_dicts))
self.render_json(result)
except Exception as e:
logging.exception('[ADMIN] %s', e)
self.render_json({'error': str(e)})
raise e
def _reload_exploration(self, exploration_id):
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded exploration %s' %
(self.user_id, exploration_id))
exp_services.load_demo(exploration_id)
rights_manager.release_ownership_of_exploration(
user_services.get_system_user(), exploration_id)
else:
raise Exception('Cannot reload an exploration in production.')
def _create_dummy_question(
self, question_id, question_content, linked_skill_ids):
state = state_domain.State.create_default_state(
'ABC', is_initial_state=True)
state.update_interaction_id('TextInput')
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
})
state.update_next_content_id_index(1)
state.update_linked_skill_id(None)
state.update_content(state_domain.SubtitledHtml('1', question_content))
recorded_voiceovers = state_domain.RecordedVoiceovers({})
written_translations = state_domain.WrittenTranslations({})
recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0')
recorded_voiceovers.add_content_id_for_voiceover('1')
recorded_voiceovers.add_content_id_for_voiceover('default_outcome')
written_translations.add_content_id_for_translation('ca_placeholder_0')
written_translations.add_content_id_for_translation('1')
written_translations.add_content_id_for_translation('default_outcome')
state.update_recorded_voiceovers(recorded_voiceovers)
state.update_written_translations(written_translations)
solution = state_domain.Solution(
'TextInput', False, 'Solution', state_domain.SubtitledHtml(
'solution', '<p>This is a solution.</p>'))
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')
)
]
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_default_outcome(
state_domain.Outcome(
None, state_domain.SubtitledHtml(
'feedback_id', '<p>Dummy Feedback</p>'),
True, [], None, None
)
)
question = question_domain.Question(
question_id, state,
feconf.CURRENT_STATE_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, [])
return question
def _create_dummy_skill(self, skill_id, skill_description, explanation):
rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])]
skill = skill_domain.Skill.create_default_skill(
skill_id, skill_description, rubrics)
skill.update_explanation(state_domain.SubtitledHtml('1', explanation))
return skill
def _load_dummy_new_structures_data(self):
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
topic_id_1 = topic_fetchers.get_new_topic_id()
topic_id_2 = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
skill_id_1 = skill_services.get_new_skill_id()
skill_id_2 = skill_services.get_new_skill_id()
skill_id_3 = skill_services.get_new_skill_id()
question_id_1 = question_services.get_new_question_id()
question_id_2 = question_services.get_new_question_id()
question_id_3 = question_services.get_new_question_id()
skill_1 = self._create_dummy_skill(
skill_id_1, 'Dummy Skill 1', '<p>Dummy Explanation 1</p>')
skill_2 = self._create_dummy_skill(
skill_id_2, 'Dummy Skill 2', '<p>Dummy Explanation 2</p>')
skill_3 = self._create_dummy_skill(
skill_id_3, 'Dummy Skill 3', '<p>Dummy Explanation 3</p>')
question_1 = self._create_dummy_question(
question_id_1, 'Question 1', [skill_id_1])
question_2 = self._create_dummy_question(
question_id_2, 'Question 2', [skill_id_2])
question_3 = self._create_dummy_question(
question_id_3, 'Question 3', [skill_id_3])
question_services.add_question(self.user_id, question_1)
question_services.add_question(self.user_id, question_2)
question_services.add_question(self.user_id, question_3)
question_services.create_new_question_skill_link(
self.user_id, question_id_1, skill_id_1, 0.3)
question_services.create_new_question_skill_link(
self.user_id, question_id_2, skill_id_2, 0.5)
question_services.create_new_question_skill_link(
self.user_id, question_id_3, skill_id_3, 0.7)
topic_1 = topic_domain.Topic.create_default_topic(
topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description')
topic_2 = topic_domain.Topic.create_default_topic(
topic_id_2, 'Empty Topic', 'empty-topic', 'description')
topic_1.add_canonical_story(story_id)
topic_1.add_uncategorized_skill_id(skill_id_1)
topic_1.add_uncategorized_skill_id(skill_id_2)
topic_1.add_uncategorized_skill_id(skill_id_3)
topic_1.add_subtopic(1, 'Dummy Subtopic Title')
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2)
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3)
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
1, topic_id_1))
self._reload_exploration('15')
self._reload_exploration('25')
self._reload_exploration('13')
exp_services.update_exploration(
self.user_id, '15', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '25', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '13', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
story = story_domain.Story.create_default_story(
story_id, 'Help Jaime win the Arcade', 'Description',
topic_id_1, 'help-jamie-win-arcade')
story_node_dicts = [{
'exp_id': '15',
'title': 'What are the place values?',
'description': 'Jaime learns the place value of each digit ' +
'in a big number.'
}, {
'exp_id': '25',
'title': 'Finding the value of a number',
'description': 'Jaime understands the value of his ' +
'arcade score.'
}, {
'exp_id': '13',
'title': 'Comparing Numbers',
'description': 'Jaime learns if a number is smaller or ' +
'greater than another number.'
}]
def generate_dummy_story_nodes(node_id, exp_id, title, description):
story.add_node(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
title)
story.update_node_description(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
description)
story.update_node_exploration_id(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id), exp_id)
if node_id != len(story_node_dicts):
story.update_node_destination_node_ids(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
['%s%d' % (story_domain.NODE_ID_PREFIX, node_id + 1)])
exp_services.update_exploration(
self.user_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'category',
'new_value': 'Astronomy'
})], 'Change category')
for i, story_node_dict in enumerate(story_node_dicts):
generate_dummy_story_nodes(i + 1, **story_node_dict)
skill_services.save_new_skill(self.user_id, skill_1)
skill_services.save_new_skill(self.user_id, skill_2)
skill_services.save_new_skill(self.user_id, skill_3)
story_services.save_new_story(self.user_id, story)
topic_services.save_new_topic(self.user_id, topic_1)
topic_services.save_new_topic(self.user_id, topic_2)
subtopic_page_services.save_subtopic_page(
self.user_id, subtopic_page, 'Added subtopic',
[topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'subtopic_id': 1,
'title': 'Dummy Subtopic Title'
})]
)
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
opportunity_services.add_new_exploration_opportunities(
story_id, exp_ids_in_story)
topic_services.publish_story(topic_id_1, story_id, self.user_id)
else:
raise Exception('Cannot load new structures data in production.')
def _generate_dummy_skill_and_questions(self):
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
skill_id = skill_services.get_new_skill_id()
skill_name = 'Dummy Skill %s' % str(random.getrandbits(32))
skill = self._create_dummy_skill(
skill_id, skill_name, '<p>Dummy Explanation 1</p>')
skill_services.save_new_skill(self.user_id, skill)
for i in range(15):
question_id = question_services.get_new_question_id()
question_name = 'Question number %s %s' % (str(i), skill_name)
question = self._create_dummy_question(
question_id, question_name, [skill_id])
question_services.add_question(self.user_id, question)
question_difficulty = list(
constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values())
random_difficulty = random.choice(question_difficulty)
question_services.create_new_question_skill_link(
self.user_id, question_id, skill_id, random_difficulty)
else:
raise Exception('Cannot generate dummy skills in production.')
def _reload_collection(self, collection_id):
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded collection %s' %
(self.user_id, collection_id))
collection_services.load_demo(collection_id)
rights_manager.release_ownership_of_collection(
user_services.get_system_user(), collection_id)
else:
raise Exception('Cannot reload a collection in production.')
def _generate_dummy_explorations(
self, num_dummy_exps_to_generate, num_dummy_exps_to_publish):
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s generated %s number of dummy explorations' %
(self.user_id, num_dummy_exps_to_generate))
possible_titles = ['Hulk Neuroscience', 'Quantum Starks',
'Wonder Anatomy',
'Elvish, language of "Lord of the Rings',
'The Science of Superheroes']
exploration_ids_to_publish = []
for i in range(num_dummy_exps_to_generate):
title = random.choice(possible_titles)
category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES)
new_exploration_id = exp_fetchers.get_new_exploration_id()
exploration = exp_domain.Exploration.create_default_exploration(
new_exploration_id, title=title, category=category,
objective='Dummy Objective')
exp_services.save_new_exploration(self.user_id, exploration)
if i <= num_dummy_exps_to_publish - 1:
exploration_ids_to_publish.append(new_exploration_id)
rights_manager.publish_exploration(
self.user, new_exploration_id)
exp_services.index_explorations_given_ids(
exploration_ids_to_publish)
else:
raise Exception('Cannot generate dummy explorations in production.')
class AdminRoleHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'filter_criterion': {
'schema': {
'type': 'basestring',
'choices': [
feconf.USER_FILTER_CRITERION_ROLE,
feconf.USER_FILTER_CRITERION_USERNAME
]
}
},
'role': {
'schema': {
'type': 'basestring',
'choices': role_services.VIEWABLE_ROLES
},
'default_value': None
},
'username': {
'schema': {
'type': 'basestring'
},
'default_value': None
}
},
'PUT': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
filter_criterion = self.normalized_request.get(
'filter_criterion')
if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE:
role = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_ROLE)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE,
role=role)
self.render_json({
'usernames': user_services.get_usernames_by_role(role)
})
elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME:
username = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_USERNAME)
user_id = user_services.get_user_id_from_username(username)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME,
username=username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_settings = user_services.get_user_settings(user_id)
user_roles = user_settings.roles
managed_topic_ids = []
if feconf.ROLE_ID_TOPIC_MANAGER in user_roles:
managed_topic_ids = [
rights.id for rights in
topic_fetchers.get_topic_rights_with_user(user_id)]
user_roles_dict = {
'roles': user_roles,
'managed_topic_ids': managed_topic_ids,
'banned': user_settings.banned
}
self.render_json(user_roles_dict)
@acl_decorators.can_access_admin_page
def put(self):
username = self.payload.get('username')
role = self.payload.get('role')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
# The Topic manager role assignment is handled via
# TopicManagerRoleHandler.
raise self.InvalidInputException(
'Unsupported role for this handler.')
user_services.add_user_role(user_settings.user_id, role)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.request.get('username')
role = self.request.get('role')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.remove_user_role(user_id, role)
self.render_json({})
class TopicManagerRoleHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
},
'action': {
'schema': {
'type': 'basestring',
'choices': ['assign', 'deassign']
}
},
'topic_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
action = self.normalized_payload.get('action')
topic_id = self.normalized_payload.get('topic_id')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_id = user_settings.user_id
if action == 'assign':
if not feconf.ROLE_ID_TOPIC_MANAGER in user_settings.roles:
user_services.add_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
topic_manager = user_services.get_user_actions_info(user_id)
topic_services.assign_role(
user_services.get_system_user(),
topic_manager, topic_domain.ROLE_MANAGER, topic_id)
elif action == 'deassign':
topic_services.deassign_manager_role_from_topic(
user_services.get_system_user(), user_id, topic_id)
if not topic_fetchers.get_topic_rights_with_user(user_id):
user_services.remove_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
self.render_json({})
class BannedUsersHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.mark_user_banned(user_id)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.normalized_request.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_services.unmark_user_banned(user_id)
self.render_json({})
class AdminSuperAdminPrivilegesHandler(base.BaseHandler):
PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException('No such user exists')
auth_services.grant_super_admin_privileges(user_id)
self.render_json(self.values)
@acl_decorators.can_access_admin_page
def delete(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_request.get('username')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException('No such user exists')
if user_settings.email == feconf.ADMIN_EMAIL_ADDRESS:
raise self.InvalidInputException(
'Cannot revoke privileges from the default super admin account')
auth_services.revoke_super_admin_privileges(user_settings.user_id)
self.render_json(self.values)
class AdminTopicsCsvFileDownloader(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
topic_similarities = (
recommendations_services.get_topic_similarities_as_csv()
)
# Downloadable file accepts only bytes, so we need to encode
# topic_similarities to bytes.
self.render_downloadable_file(
io.BytesIO(topic_similarities.encode('utf-8')),
'topic_similarities.csv',
'text/csv'
)
class DataExtractionQueryHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'exp_id': {
'schema': {
'type': 'basestring'
}
},
'exp_version': {
'schema': {
'type': 'int'
}
},
'state_name': {
'schema': {
'type': 'basestring'
}
},
'num_answers': {
'schema': {
'type': 'int'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
exp_id = self.normalized_request.get('exp_id')
exp_version = self.normalized_request.get('exp_version')
exploration = exp_fetchers.get_exploration_by_id(
exp_id, strict=False, version=exp_version)
if exploration is None:
raise self.InvalidInputException(
'Entity for exploration with id %s and version %s not found.'
% (exp_id, exp_version))
state_name = self.normalized_request.get('state_name')
num_answers = self.normalized_request.get('num_answers')
if state_name not in exploration.states:
raise self.InvalidInputException(
'Exploration \'%s\' does not have \'%s\' state.'
% (exp_id, state_name))
state_answers = stats_services.get_state_answers(
exp_id, exp_version, state_name)
extracted_answers = state_answers.get_submitted_answer_dict_list()
if num_answers > 0:
extracted_answers = extracted_answers[:num_answers]
response = {
'data': extracted_answers
}
self.render_json(response)
class SendDummyMailToAdminHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'POST': {}}
@acl_decorators.can_access_admin_page
def post(self):
username = self.username
if feconf.CAN_SEND_EMAILS:
email_manager.send_dummy_mail_to_admin(username)
self.render_json({})
else:
raise self.InvalidInputException('This app cannot send emails.')
class UpdateUsernameHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'old_username': {
'schema': {
'type': 'basestring'
}
},
'new_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
old_username = self.normalized_payload.get('old_username')
new_username = self.normalized_payload.get('new_username')
user_id = user_services.get_user_id_from_username(old_username)
if user_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % old_username)
if user_services.is_username_taken(new_username):
raise self.InvalidInputException('Username already taken.')
user_services.set_username(user_id, new_username)
user_services.log_username_change(
self.user_id, old_username, new_username)
self.render_json({})
class NumberOfDeletionRequestsHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
self.render_json({
'number_of_pending_deletion_models': (
wipeout_service.get_number_of_pending_deletion_requests())
})
class VerifyUserModelsDeletedHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'user_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
user_id = self.normalized_request.get('user_id')
user_is_deleted = wipeout_service.verify_user_deleted(
user_id, include_delete_at_end_models=True)
self.render_json({'related_models_exist': not user_is_deleted})
class DeleteUserHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'DELETE': {
'user_id': {
'schema': {
'type': 'basestring'
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_delete_any_user
def delete(self):
user_id = self.normalized_request.get('user_id')
username = self.normalized_request.get('username')
user_id_from_username = (
user_services.get_user_id_from_username(username))
if user_id_from_username is None:
raise self.InvalidInputException(
'The username doesn\'t belong to any user'
)
if user_id_from_username != user_id:
raise self.InvalidInputException(
'The user ID retrieved from the username and '
'the user ID provided by admin differ.'
)
wipeout_service.pre_delete_user(user_id)
self.render_json({'success': True})
class UpdateBlogPostHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'blog_post_id': {
'schema': {
'type': 'basestring'
}
},
'author_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
},
'published_on': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
blog_post_id = self.normalized_payload.get('blog_post_id')
author_username = self.normalized_payload.get('author_username')
published_on = self.normalized_payload.get('published_on')
author_id = user_services.get_user_id_from_username(author_username)
if author_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % author_username)
user_actions = user_services.get_user_actions_info(author_id).actions
if role_services.ACTION_ACCESS_BLOG_DASHBOARD not in user_actions:
raise self.InvalidInputException(
'User does not have enough rights to be blog post author.')
blog_post = (
blog_services.get_blog_post_by_id(blog_post_id, strict=False))
if blog_post is None:
raise self.PageNotFoundException(
Exception(
'The blog post with the given id or url doesn\'t exist.'))
blog_services.update_blog_models_author_and_published_on_date(
blog_post_id, author_id, published_on)
self.render_json({})
| true | true |
f7f5160771f046651d966352e176bcb8d8968d41 | 156 | py | Python | Python/Curos_Python_curemvid/Uteis/__init__.py | Jhonattan-rocha/Meus-primeiros-programas | f5971b66c0afd049b5d0493e8b7a116b391d058e | [
"MIT"
] | null | null | null | Python/Curos_Python_curemvid/Uteis/__init__.py | Jhonattan-rocha/Meus-primeiros-programas | f5971b66c0afd049b5d0493e8b7a116b391d058e | [
"MIT"
] | null | null | null | Python/Curos_Python_curemvid/Uteis/__init__.py | Jhonattan-rocha/Meus-primeiros-programas | f5971b66c0afd049b5d0493e8b7a116b391d058e | [
"MIT"
] | null | null | null | def fatorial(n):
f = 1
for c in range(1, (n+1)):
f *= c
return f
def dobro(n2):
return n2 * 2
def triplo(n3):
return n3 * 3
| 11.142857 | 29 | 0.487179 | def fatorial(n):
f = 1
for c in range(1, (n+1)):
f *= c
return f
def dobro(n2):
return n2 * 2
def triplo(n3):
return n3 * 3
| true | true |
f7f517eb41b22582e3d7ec8e137ac64c4d7ab25b | 309 | py | Python | Kooleposhti/commands/views.py | ParizanTeam/Kooleposhti-Backend | e7d819b3e93836f1a893cc51541056cbf681d1c6 | [
"MIT"
] | null | null | null | Kooleposhti/commands/views.py | ParizanTeam/Kooleposhti-Backend | e7d819b3e93836f1a893cc51541056cbf681d1c6 | [
"MIT"
] | null | null | null | Kooleposhti/commands/views.py | ParizanTeam/Kooleposhti-Backend | e7d819b3e93836f1a893cc51541056cbf681d1c6 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from rest_framework.decorators import api_view
import os
import datetime
@api_view(['GET'])
def push_to_repo(request):
bash_cmds = [
'git add .',
f'git commit -m {datetime.datetime.now()}',
'git push'
]
pass
| 18.176471 | 51 | 0.656958 | from django.shortcuts import render
from rest_framework.decorators import api_view
import os
import datetime
@api_view(['GET'])
def push_to_repo(request):
bash_cmds = [
'git add .',
f'git commit -m {datetime.datetime.now()}',
'git push'
]
pass
| true | true |
f7f519b3e6b8488a8315a597be4bb57144b77bb4 | 91,347 | py | Python | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/tools/gyp/pylib/gyp/generator/make.py | wterkaj/ApertusVR | 424ec5515ae08780542f33cc4841a8f9a96337b3 | [
"MIT"
] | 158 | 2016-11-17T19:37:51.000Z | 2022-03-21T19:57:55.000Z | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/tools/gyp/pylib/gyp/generator/make.py | wterkaj/ApertusVR | 424ec5515ae08780542f33cc4841a8f9a96337b3 | [
"MIT"
] | 94 | 2016-11-18T09:55:57.000Z | 2021-01-14T08:50:40.000Z | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/tools/gyp/pylib/gyp/generator/make.py | wterkaj/ApertusVR | 424ec5515ae08780542f33cc4841a8f9a96337b3 | [
"MIT"
] | 51 | 2017-05-24T10:20:25.000Z | 2022-03-17T15:07:02.000Z | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
import hashlib
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
if flavor == 'aix':
default_variables.setdefault('SHARED_LIB_SUFFIX', '.a')
else:
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) $(LIBS) -Wl,--end-group
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
if self.flavor == 'aix':
target_ext = '.a'
else:
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
# Hash the target name to avoid generating overlong filenames.
cmddigest = hashlib.sha1(command if command else self.target).hexdigest()
intermediate = "%s.intermediate" % (cmddigest)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| 40.96278 | 180 | 0.64868 |
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
import hashlib
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
if flavor == 'aix':
default_variables.setdefault('SHARED_LIB_SUFFIX', '.a')
else:
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) $(LIBS) -Wl,--end-group
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '
# treat it as the start of a comment.
return s.replace('def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
if self.flavor == 'aix':
target_ext = '.a'
else:
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
# Hash the target name to avoid generating overlong filenames.
cmddigest = hashlib.sha1(command if command else self.target).hexdigest()
intermediate = "%s.intermediate" % (cmddigest)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| false | true |
f7f519d97459caf4f0fe25c34e9def15c1d06e1a | 1,943 | py | Python | analytics/purchases/urls.py | EDario333/minegocito | 5dd0869fa2510bb8152f4a117f33b2a30bb6d69c | [
"MIT"
] | null | null | null | analytics/purchases/urls.py | EDario333/minegocito | 5dd0869fa2510bb8152f4a117f33b2a30bb6d69c | [
"MIT"
] | null | null | null | analytics/purchases/urls.py | EDario333/minegocito | 5dd0869fa2510bb8152f4a117f33b2a30bb6d69c | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from . views import \
index, by_range, by_provider, by_user, \
by_product, by_brand, by_shop, \
advanced_cfg_by_range, \
advanced_cfg_by_providers, \
advanced_cfg_by_users, \
advanced_cfg_by_products, \
advanced_cfg_by_brands, \
advanced_cfg_by_shops, \
all_providers, all_users, all_products, all_brands, all_shops
urlpatterns = [
url(r'^$', index, name='index-analytics-purchases'),
url(r'^by-range/', by_range, name='analytics-purchases-by-range'),
url(r'^by-provider/', by_provider, name='analytics-purchases-by-provider'),
url(r'^all-providers/', all_providers, name='analytics-purchases-all-providers'),
url(r'^by-user/', by_user, name='analytics-purchases-by-user'),
url(r'^all-users/', all_users, name='analytics-purchases-all-users'),
url(r'^by-product/', by_product, name='analytics-purchases-by-product'),
url(r'^all-products/', all_products, name='analytics-purchases-all-products'),
url(r'^by-brand/', by_brand, name='analytics-purchases-by-brand'),
url(r'^all-brands/', all_brands, name='analytics-purchases-all-brands'),
url(r'^by-shop/', by_shop, name='analytics-purchases-by-shop'),
url(r'^all-shops/', all_shops, name='analytics-purchases-all-shops'),
url(r'^advanced-cfg-by-range/', advanced_cfg_by_range, name='analytics-purchases-advanced-cfg-by-range'),
url(r'^advanced-cfg-by-providers/', advanced_cfg_by_providers, name='analytics-purchases-advanced-cfg-by-providers'),
url(r'^advanced-cfg-by-users/', advanced_cfg_by_users, name='analytics-purchases-advanced-cfg-by-users'),
url(r'^advanced-cfg-by-products/', advanced_cfg_by_products, name='analytics-purchases-advanced-cfg-by-products'),
url(r'^advanced-cfg-by-brands/', advanced_cfg_by_brands, name='analytics-purchases-advanced-cfg-by-brands'),
url(r'^advanced-cfg-by-shops/', advanced_cfg_by_shops, name='analytics-purchases-advanced-cfg-by-shops'),
] | 58.878788 | 120 | 0.739578 | from django.conf.urls import url, include
from . views import \
index, by_range, by_provider, by_user, \
by_product, by_brand, by_shop, \
advanced_cfg_by_range, \
advanced_cfg_by_providers, \
advanced_cfg_by_users, \
advanced_cfg_by_products, \
advanced_cfg_by_brands, \
advanced_cfg_by_shops, \
all_providers, all_users, all_products, all_brands, all_shops
urlpatterns = [
url(r'^$', index, name='index-analytics-purchases'),
url(r'^by-range/', by_range, name='analytics-purchases-by-range'),
url(r'^by-provider/', by_provider, name='analytics-purchases-by-provider'),
url(r'^all-providers/', all_providers, name='analytics-purchases-all-providers'),
url(r'^by-user/', by_user, name='analytics-purchases-by-user'),
url(r'^all-users/', all_users, name='analytics-purchases-all-users'),
url(r'^by-product/', by_product, name='analytics-purchases-by-product'),
url(r'^all-products/', all_products, name='analytics-purchases-all-products'),
url(r'^by-brand/', by_brand, name='analytics-purchases-by-brand'),
url(r'^all-brands/', all_brands, name='analytics-purchases-all-brands'),
url(r'^by-shop/', by_shop, name='analytics-purchases-by-shop'),
url(r'^all-shops/', all_shops, name='analytics-purchases-all-shops'),
url(r'^advanced-cfg-by-range/', advanced_cfg_by_range, name='analytics-purchases-advanced-cfg-by-range'),
url(r'^advanced-cfg-by-providers/', advanced_cfg_by_providers, name='analytics-purchases-advanced-cfg-by-providers'),
url(r'^advanced-cfg-by-users/', advanced_cfg_by_users, name='analytics-purchases-advanced-cfg-by-users'),
url(r'^advanced-cfg-by-products/', advanced_cfg_by_products, name='analytics-purchases-advanced-cfg-by-products'),
url(r'^advanced-cfg-by-brands/', advanced_cfg_by_brands, name='analytics-purchases-advanced-cfg-by-brands'),
url(r'^advanced-cfg-by-shops/', advanced_cfg_by_shops, name='analytics-purchases-advanced-cfg-by-shops'),
] | true | true |
f7f51a6ffd5f73d3c3f5b88b393bc9296bb0bb62 | 1,207 | py | Python | benchmarking/reporters/reporters.py | jspark1105/FAI-PEP | 3cb14506bb7751d32e4507924d121357895af7d9 | [
"Apache-2.0"
] | null | null | null | benchmarking/reporters/reporters.py | jspark1105/FAI-PEP | 3cb14506bb7751d32e4507924d121357895af7d9 | [
"Apache-2.0"
] | null | null | null | benchmarking/reporters/reporters.py | jspark1105/FAI-PEP | 3cb14506bb7751d32e4507924d121357895af7d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from utils.arg_parse import getArgs
from .local_reporter.local_reporter import LocalReporter
from .remote_reporter.remote_reporter import RemoteReporter
from .simple_local_reporter.simple_local_reporter import SimpleLocalReporter
from .screen_reporter.screen_reporter import ScreenReporter
from .simple_screen_reporter.simple_screen_reporter import SimpleScreenReporter
def getReporters():
reporters = []
if getArgs().local_reporter:
reporters.append(LocalReporter())
if getArgs().simple_local_reporter:
reporters.append(SimpleLocalReporter())
if getArgs().remote_reporter:
reporters.append(RemoteReporter())
if getArgs().screen_reporter:
reporters.append(ScreenReporter())
if getArgs().simple_screen_reporter:
reporters.append(SimpleScreenReporter())
return reporters
| 37.71875 | 79 | 0.677713 | true | true | |
f7f51aebbb12fac4c9b808a0c65fc0223382adba | 1,304 | py | Python | aulas/cores no terminal.py | EduardoPessanha/Git-Python | 87aa10af09510469032732ed2c55d0d65eb4c1d6 | [
"MIT"
] | null | null | null | aulas/cores no terminal.py | EduardoPessanha/Git-Python | 87aa10af09510469032732ed2c55d0d65eb4c1d6 | [
"MIT"
] | null | null | null | aulas/cores no terminal.py | EduardoPessanha/Git-Python | 87aa10af09510469032732ed2c55d0d65eb4c1d6 | [
"MIT"
] | null | null | null | def cor(tipo):
if tipo == 0:
tipo = '\033[m'
elif tipo == 'br':
tipo = '\033[1;30m'
elif tipo == 'vm':
tipo = '\033[1;31m'
elif tipo == 'vd':
tipo = '\033[1;32m'
elif tipo == 'am':
tipo = '\033[1;33m'
elif tipo == 'az':
tipo = '\033[1;34m'
elif tipo == 'mg':
tipo = '\033[1;35m'
elif tipo == 'ci':
tipo = '\033[1;36m'
elif tipo == 'cz':
tipo = '\033[1;37m'
return tipo
print('\n')
print('\033[1;30;41m-=-' * 10, ' CORES NO TERMINAL ', '-=-' * 10, '\033[m')
print(f""" \nPadrão ANSI - escape sequence
\n \033[1mSTYLE TEXT BACKGROUND\033[m
0 = none 30 40 => {cor('br')}Branca{cor(0)}
1 = Bold 31 41 => {cor('vm')}Vermelho{cor(0)}
3 = Itálico 32 42 => {cor('vd')}Verde{cor(0)}
4 = Underline 33 43 => {cor('am')}Amarelo{cor(0)}
7 = Negative 34 44 => {cor('az')}Azul{cor(0)}
35 45 => {cor('mg')}Magenta (Roxo){cor(0)}
36 46 => {cor('ci')}Ciânico{cor(0)}
37 47 => {cor('cz')}Cinza{cor(0)}
\033[1;7;30m sintaxe ->\O33[STYLE;TEXT;BACKGROUNDm -> Ex.: \O33[1;33;46m \033[m
""")
| 33.435897 | 80 | 0.42408 | def cor(tipo):
if tipo == 0:
tipo = '\033[m'
elif tipo == 'br':
tipo = '\033[1;30m'
elif tipo == 'vm':
tipo = '\033[1;31m'
elif tipo == 'vd':
tipo = '\033[1;32m'
elif tipo == 'am':
tipo = '\033[1;33m'
elif tipo == 'az':
tipo = '\033[1;34m'
elif tipo == 'mg':
tipo = '\033[1;35m'
elif tipo == 'ci':
tipo = '\033[1;36m'
elif tipo == 'cz':
tipo = '\033[1;37m'
return tipo
print('\n')
print('\033[1;30;41m-=-' * 10, ' CORES NO TERMINAL ', '-=-' * 10, '\033[m')
print(f""" \nPadrão ANSI - escape sequence
\n \033[1mSTYLE TEXT BACKGROUND\033[m
0 = none 30 40 => {cor('br')}Branca{cor(0)}
1 = Bold 31 41 => {cor('vm')}Vermelho{cor(0)}
3 = Itálico 32 42 => {cor('vd')}Verde{cor(0)}
4 = Underline 33 43 => {cor('am')}Amarelo{cor(0)}
7 = Negative 34 44 => {cor('az')}Azul{cor(0)}
35 45 => {cor('mg')}Magenta (Roxo){cor(0)}
36 46 => {cor('ci')}Ciânico{cor(0)}
37 47 => {cor('cz')}Cinza{cor(0)}
\033[1;7;30m sintaxe ->\O33[STYLE;TEXT;BACKGROUNDm -> Ex.: \O33[1;33;46m \033[m
""")
| true | true |
f7f51d302e8028306249529141bb85502874acec | 1,530 | py | Python | openstack_dashboard/dashboards/project/trunks/panel.py | nicozhang/horizon | 49df5cffd84b6d9da4e5926afd12e0a92737d740 | [
"Apache-2.0"
] | 1 | 2019-08-07T08:46:03.000Z | 2019-08-07T08:46:03.000Z | openstack_dashboard/dashboards/project/trunks/panel.py | nicozhang/horizon | 49df5cffd84b6d9da4e5926afd12e0a92737d740 | [
"Apache-2.0"
] | 7 | 2017-06-26T14:34:33.000Z | 2020-06-30T22:10:50.000Z | openstack_dashboard/dashboards/project/trunks/panel.py | yi-cloud/horizon-xg | 827365753886025dc62fbfbed179ef719d313711 | [
"Apache-2.0"
] | 6 | 2015-05-25T00:31:26.000Z | 2022-03-21T22:36:25.000Z | # Copyright 2017 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import neutron
LOG = logging.getLogger(__name__)
class Trunks(horizon.Panel):
name = _("Trunks")
slug = "trunks"
permissions = ('openstack.services.network',)
def allowed(self, context):
request = context['request']
try:
return (
super(Trunks, self).allowed(context) and
request.user.has_perms(self.permissions) and
neutron.is_extension_supported(request,
extension_alias='trunk')
)
except Exception:
LOG.error("Call to list enabled services failed. This is likely "
"due to a problem communicating with the Neutron "
"endpoint. Trunks panel will not be displayed.")
return False
| 34 | 78 | 0.645098 |
import logging
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import neutron
LOG = logging.getLogger(__name__)
class Trunks(horizon.Panel):
name = _("Trunks")
slug = "trunks"
permissions = ('openstack.services.network',)
def allowed(self, context):
request = context['request']
try:
return (
super(Trunks, self).allowed(context) and
request.user.has_perms(self.permissions) and
neutron.is_extension_supported(request,
extension_alias='trunk')
)
except Exception:
LOG.error("Call to list enabled services failed. This is likely "
"due to a problem communicating with the Neutron "
"endpoint. Trunks panel will not be displayed.")
return False
| true | true |
f7f51dbddc4c746ce42cf8ce862580469596489f | 1,325 | py | Python | libs/traffic.py | adityashinde1506/deauth_sniper | 37c5981abea9c2347c89ac4baae6f4f8b2a7a029 | [
"MIT"
] | null | null | null | libs/traffic.py | adityashinde1506/deauth_sniper | 37c5981abea9c2347c89ac4baae6f4f8b2a7a029 | [
"MIT"
] | null | null | null | libs/traffic.py | adityashinde1506/deauth_sniper | 37c5981abea9c2347c89ac4baae6f4f8b2a7a029 | [
"MIT"
] | null | null | null | from scapy.all import *
class Traffic:
def __init__(self):
pass
def get_traffic_hexdump(self,packets):
for packet in packets:
while packet.payload:
packet=packet.payload
print(hexdump(packet))
def get_SSIDs(self,packets):
APs=list()
for packet in packets:
SSID=None
addr=None
while packet:
if type(packet)==type(Dot11()):
addr=packet.addr3
elif type(packet)==type(Dot11Elt()):
if packet.ID==0:
SSID=packet.info
if addr!=None and SSID!=None and (SSID,addr) not in APs:
APs.append((SSID,addr))
packet=packet.payload
return APs
def get_connected_devices(self,packets,APmac):
devices=list()
for packet in packets:
if packet.haslayer(Dot11):
addrs=[packet.addr1,packet.addr2,packet.addr3,packet.addr4]
if APmac in addrs:
for addr in addrs:
if addr not in devices and addr!=APmac and addr!="ff:ff:ff:ff:ff:ff":
devices.append(addr)
return devices
def main():
pass
if __name__=="__main__":
main()
| 28.804348 | 93 | 0.513962 | from scapy.all import *
class Traffic:
def __init__(self):
pass
def get_traffic_hexdump(self,packets):
for packet in packets:
while packet.payload:
packet=packet.payload
print(hexdump(packet))
def get_SSIDs(self,packets):
APs=list()
for packet in packets:
SSID=None
addr=None
while packet:
if type(packet)==type(Dot11()):
addr=packet.addr3
elif type(packet)==type(Dot11Elt()):
if packet.ID==0:
SSID=packet.info
if addr!=None and SSID!=None and (SSID,addr) not in APs:
APs.append((SSID,addr))
packet=packet.payload
return APs
def get_connected_devices(self,packets,APmac):
devices=list()
for packet in packets:
if packet.haslayer(Dot11):
addrs=[packet.addr1,packet.addr2,packet.addr3,packet.addr4]
if APmac in addrs:
for addr in addrs:
if addr not in devices and addr!=APmac and addr!="ff:ff:ff:ff:ff:ff":
devices.append(addr)
return devices
def main():
pass
if __name__=="__main__":
main()
| true | true |
f7f51e914b94bab9be97ddaa8dbab867ae0dd0ce | 12,898 | py | Python | pythalesians_examples/markets/lighttimeseriesfactory_examples.py | qheuristics/pythalesians | fb017e0ff2305d64aab50c3c0ea349e0412559b1 | [
"Apache-2.0"
] | 5 | 2017-04-10T06:54:04.000Z | 2021-11-22T00:11:23.000Z | pythalesians_examples/markets/lighttimeseriesfactory_examples.py | aopore/pythalesians | 4c8ccd57ccf311a205a3a9a4c13c30c9e748636d | [
"Apache-2.0"
] | null | null | null | pythalesians_examples/markets/lighttimeseriesfactory_examples.py | aopore/pythalesians | 4c8ccd57ccf311a205a3a9a4c13c30c9e748636d | [
"Apache-2.0"
] | 6 | 2017-03-12T11:14:37.000Z | 2021-01-26T10:50:43.000Z | __author__ = 'saeedamen' # Saeed Amen / saeed@thalesians.com
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
lightimeseriesfactory_examples
Gives several examples of how to download market data from external sources like Bloomberg using LightTimeSeriesFactory.
Also uses PlotFactory to do basic plots.
"""
# for logging
from pythalesians.util.loggermanager import LoggerManager
# to download market data
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
# for plotting graphs
from pythalesians.graphics.graphs.plotfactory import PlotFactory
from pythalesians.graphics.graphs.graphproperties import GraphProperties
# for making elementary calculations on the time series
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
if True:
logger = LoggerManager().getLogger(__name__)
import datetime
# just change "False" to "True" to run any of the below examples
###### download daily data from Bloomberg for EUR/USD and GBP/USD spot and then plot
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 2014", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['EURUSD', # ticker (Thalesians)
'GBPUSD'],
fields = ['close', 'high', 'low'], # which fields to download
vendor_tickers = ['EURUSD BGN Curncy', # ticker (Bloomberg)
'GBPUSD BGN Curncy'],
vendor_fields = ['PX_LAST', 'PX_HIGH', 'PX_LOW'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = None
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
###### download event dates for non farm payrolls and then print
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 2014", # start date
finish_date = datetime.date.today(), # finish date
category = "events",
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['FOMC', 'NFP'],
fields = ['release-date-time-full', 'release-dt', 'actual-release'], # which fields to download
vendor_tickers = ['FDTR Index', 'NFP TCH Index'], # ticker (Bloomberg)
vendor_fields = ['ECO_FUTURE_RELEASE_DATE_LIST', 'ECO_RELEASE_DT', 'ACTUAL_RELEASE'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = None
df = ltsf.harvest_time_series(time_series_request)
print(df)
###### download daily data from Bloomberg for 30Y DE bonds and then plot
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 1990", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['DE 30Y Bond'], # ticker (Thalesians)
fields = ['close', 'high', 'low', 'open'], # which fields to download
vendor_tickers = ['UB1 Comdty'], # ticker (Bloomberg)
vendor_fields = ['PX_LAST', 'PX_HIGH', 'PX_LOW', 'PX_OPEN'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = None
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
###### download intraday data from Bloomberg for EUR/USD and GBP/USD spot and then plot
if False:
from datetime import timedelta
start_date = datetime.datetime.utcnow() - timedelta(days=1)
time_series_request = TimeSeriesRequest(
start_date = start_date, # start date
finish_date = datetime.datetime.utcnow(), # finish date
freq = 'intraday', # intraday data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['EURUSD', # ticker (Thalesians)
'GBPUSD',
'JPYUSD',
'AUDUSD'],
fields = ['close'], # which fields to download
vendor_tickers = ['EURUSD BGN Curncy', # ticker (Bloomberg)
'GBPUSD BGN Curncy',
'JPYUSD BGN Curncy',
'AUDUSD BGN Curncy'],
vendor_fields = ['close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
df.columns = [x.replace('.close', '') for x in df.columns.values]
gp = GraphProperties()
pf = PlotFactory()
gp.source = 'Thalesians/BBG (created with PyThalesians Python library)'
tsc = TimeSeriesCalcs()
df = tsc.create_mult_index_from_prices(df)
pf.plot_line_graph(df, adapter = 'pythalesians', gp = gp)
###### download daily data from Quandl (via FRED) for EUR/USD and GBP/USD spot and then plot
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 1970", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'quandl', # use Quandl as data source
tickers = ['EURUSD', # ticker (Thalesians)
'GBPUSD'],
fields = ['close'], # which fields to download
vendor_tickers = ['FRED/DEXUSEU', 'FRED/DEXUSUK'], # ticker (Quandl)
vendor_fields = ['close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
###### download CPI data from FRED
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 1970", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'fred', # use FRED as data source
tickers = ['US CPI YoY', 'EZ CPI YoY'], # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = ['CPIAUCSL', 'CP0000EZ17M086NEST'], # ticker (Yahoo)
vendor_fields = ['Close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
# calculate YoY data
df = df / df.shift(12) - 1
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
###### download daily data from Yahoo for Apple and Citigroup stock and then plot
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 1970", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'yahoo', # use Bloomberg as data source
tickers = ['Apple', 'Citigroup'], # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = ['aapl', 'c'], # ticker (Yahoo)
vendor_fields = ['Close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
# downloading historical tick data from Dukascopy broker for EUR/USD
# (past month of data cannot be downloaded, hence cannot be used for live trading)
if True:
time_series_request = TimeSeriesRequest(
start_date = "01 Jun 2015", # start date
finish_date = "02 Jun 2015", # finish date
freq = 'tick', # tick data
data_source = 'dukascopy', # use dukascopy as data source
tickers = ['EURUSD', 'GBPUSD'], # ticker (Thalesians)
fields = ['bid', 'ask'], # which fields to download
vendor_tickers = ['EURUSD', 'GBPUSD'], # ticker (Dukascopy)
vendor_fields = ['bid', 'ask'], # which Dukascopy fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
gp = GraphProperties()
gp.y_axis_2_series = ['GBPUSD.bid', 'GBPUSD.ask']
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians', gp = gp)
###### download daily data from Google for Apple and S&P500 ETF (and then rebase, before plotting)
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 1970", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'google', # use Bloomberg as data source
tickers = ['Apple', 'S&P500 ETF'], # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = ['aapl', 'spy'], # ticker (Google)
vendor_fields = ['Close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
tsc = TimeSeriesCalcs()
df = tsc.create_mult_index_from_prices(ltsf.harvest_time_series(time_series_request))
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
| 49.417625 | 140 | 0.530702 | __author__ = 'saeedamen'
from pythalesians.util.loggermanager import LoggerManager
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
from pythalesians.graphics.graphs.plotfactory import PlotFactory
from pythalesians.graphics.graphs.graphproperties import GraphProperties
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
if True:
logger = LoggerManager().getLogger(__name__)
import datetime
'GBPUSD'],
fields = ['close', 'high', 'low'],
vendor_tickers = ['EURUSD BGN Curncy',
'GBPUSD BGN Curncy'],
vendor_fields = ['PX_LAST', 'PX_HIGH', 'PX_LOW'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = None
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
data_source = 'bloomberg',
tickers = ['FOMC', 'NFP'],
fields = ['release-date-time-full', 'release-dt', 'actual-release'],
vendor_tickers = ['FDTR Index', 'NFP TCH Index'],
vendor_fields = ['ECO_FUTURE_RELEASE_DATE_LIST', 'ECO_RELEASE_DT', 'ACTUAL_RELEASE'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = None
df = ltsf.harvest_time_series(time_series_request)
print(df)
ers = ['DE 30Y Bond'],
fields = ['close', 'high', 'low', 'open'],
vendor_tickers = ['UB1 Comdty'],
vendor_fields = ['PX_LAST', 'PX_HIGH', 'PX_LOW', 'PX_OPEN'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = None
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
berg',
tickers = ['EURUSD',
'GBPUSD',
'JPYUSD',
'AUDUSD'],
fields = ['close'],
vendor_tickers = ['EURUSD BGN Curncy',
'GBPUSD BGN Curncy',
'JPYUSD BGN Curncy',
'AUDUSD BGN Curncy'],
vendor_fields = ['close'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
df.columns = [x.replace('.close', '') for x in df.columns.values]
gp = GraphProperties()
pf = PlotFactory()
gp.source = 'Thalesians/BBG (created with PyThalesians Python library)'
tsc = TimeSeriesCalcs()
df = tsc.create_mult_index_from_prices(df)
pf.plot_line_graph(df, adapter = 'pythalesians', gp = gp)
ields = ['close'],
vendor_tickers = ['FRED/DEXUSEU', 'FRED/DEXUSUK'],
vendor_fields = ['close'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
te = datetime.date.today(),
freq = 'daily',
data_source = 'fred',
tickers = ['US CPI YoY', 'EZ CPI YoY'],
fields = ['close'],
vendor_tickers = ['CPIAUCSL', 'CP0000EZ17M086NEST'],
vendor_fields = ['Close'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
df = df / df.shift(12) - 1
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
fields = ['close'],
vendor_tickers = ['aapl', 'c'],
vendor_fields = ['Close'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
if True:
time_series_request = TimeSeriesRequest(
start_date = "01 Jun 2015",
finish_date = "02 Jun 2015",
freq = 'tick',
data_source = 'dukascopy',
tickers = ['EURUSD', 'GBPUSD'],
fields = ['bid', 'ask'],
vendor_tickers = ['EURUSD', 'GBPUSD'],
vendor_fields = ['bid', 'ask'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
df = ltsf.harvest_time_series(time_series_request)
gp = GraphProperties()
gp.y_axis_2_series = ['GBPUSD.bid', 'GBPUSD.ask']
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians', gp = gp)
vendor_tickers = ['aapl', 'spy'],
vendor_fields = ['Close'],
cache_algo = 'internet_load_return')
ltsf = LightTimeSeriesFactory()
tsc = TimeSeriesCalcs()
df = tsc.create_mult_index_from_prices(ltsf.harvest_time_series(time_series_request))
pf = PlotFactory()
pf.plot_line_graph(df, adapter = 'pythalesians')
| true | true |
f7f520cafff80c11f4efc972bffb38390142fe48 | 498 | py | Python | cryomem/test/BIVdata_old/convert.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | 1 | 2018-09-16T12:29:04.000Z | 2018-09-16T12:29:04.000Z | cryomem/test/BIVdata_old/convert.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | null | null | null | cryomem/test/BIVdata_old/convert.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | null | null | null | """Convert all cmtools oscilloscope datafiles to zip datafiles."""
from cryomem.common.datafile import conv_tdsbin
from glob import glob
import os.path
import os
datadir = "data"
datafiles = sorted(glob(datadir + "/*.dat"))
for df in datafiles:
# make a new datafile
root, ext = os.path.splitext(df)
print("Datafiles converted to:", conv_tdsbin(df))
# remove old files
for ext in [".dat", ".txt", ".osccfg"]:
os.remove(root + ext)
print("Removed", root + ext)
| 27.666667 | 66 | 0.668675 | from cryomem.common.datafile import conv_tdsbin
from glob import glob
import os.path
import os
datadir = "data"
datafiles = sorted(glob(datadir + "/*.dat"))
for df in datafiles:
root, ext = os.path.splitext(df)
print("Datafiles converted to:", conv_tdsbin(df))
for ext in [".dat", ".txt", ".osccfg"]:
os.remove(root + ext)
print("Removed", root + ext)
| true | true |
f7f5226523ecf52dbde7b108382ac3a32d968028 | 22,523 | py | Python | inverse_graphics/direct_pose_and_param_estimation/pose_head.py | gizatt/scene_generation | cd978b4fe8ac58983894db3fb93d625c85578dd6 | [
"MIT"
] | 5 | 2018-11-27T18:46:01.000Z | 2020-09-06T19:59:12.000Z | inverse_graphics/direct_pose_and_param_estimation/pose_head.py | gizatt/scene_generation | cd978b4fe8ac58983894db3fb93d625c85578dd6 | [
"MIT"
] | null | null | null | inverse_graphics/direct_pose_and_param_estimation/pose_head.py | gizatt/scene_generation | cd978b4fe8ac58983894db3fb93d625c85578dd6 | [
"MIT"
] | null | null | null | import fvcore.nn.weight_init as weight_init
import numpy as np
import torch
from detectron2.layers import Conv2d, ConvTranspose2d, cat, get_norm
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from torch import nn
from torch.nn import functional as F
from scene_generation.utils.torch_quaternion import (
quat2mat
)
ROI_POSE_HEAD_REGISTRY = Registry("ROI_POSE_HEAD")
@ROI_POSE_HEAD_REGISTRY.register()
class RCNNPoseXyzHead(nn.Module):
"""
Takes an ROI an spits out estimates of the object XYZ pose.
Operates by applying a number of convolutional + FC layers with
a final soft classification output over a discretization of the
pose xyz components.
Layout is:
conv layers --> FC layers --> N pose estimate bins --> final regression
| |
v v
cross-entropy L1 loss
loss
"""
def __init__(self, cfg, input_shape):
super().__init__()
# fmt: off
num_conv = cfg.MODEL.ROI_POSE_XYZ_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_POSE_XYZ_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_POSE_XYZ_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_POSE_XYZ_HEAD.FC_DIM
norm = cfg.MODEL.ROI_POSE_XYZ_HEAD.NORM
self.num_bins = cfg.MODEL.ROI_POSE_XYZ_HEAD.NUM_BINS
self.xyz_bin_ranges = cfg.MODEL.ROI_POSE_XYZ_HEAD.XYZ_BIN_RANGES
# fmt: on
assert num_conv + num_fc > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
num_output_params = self.num_bins * 3
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
for k in range(num_fc):
if k == 0:
# Takes 3x3 calibrations, rotations, and Hinfs as input as well
fc = nn.Linear(np.prod(self._output_size) + 27, fc_dim)
self._output_size = fc_dim
elif k < num_fc - 1:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self._output_size = fc_dim
else:
fc = nn.Linear(np.prod(self._output_size), num_output_params)
self._output_size = num_output_params
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
# Temperature for softmax (gets exponentiated in the
# forward pass to ensure it's always positive).
self.log_T = torch.nn.Parameter(torch.log(torch.tensor([0.5])))
self.log_T.requires_grad = True
# Pre-compute xyz bin centers
xyz_bin_corners = []
xyz_bin_widths = []
for k in range(3):
bottom, top = self.xyz_bin_ranges[k]
xyz_bin_widths.append( (top - bottom) / (self.num_bins - 1) )
xyz_bin_corners.append(
torch.linspace(bottom, top, steps=self.num_bins))
self.register_buffer("xyz_bin_corners", torch.stack(xyz_bin_corners))
self.register_buffer("xyz_bin_widths", torch.tensor(xyz_bin_widths))
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x, Kcs, rotations, Hinfs):
for layer in self.conv_norm_relus:
x = layer(x)
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
Kcs = torch.flatten(Kcs, start_dim=1)
rotations = torch.flatten(rotations, start_dim=1)
Hinfs = torch.flatten(Hinfs, start_dim=1)
x = torch.cat([x, Kcs, rotations, Hinfs], dim=-1)
if len(self.fcs):
for layer in self.fcs:
x = F.relu(layer(x))
x = x.reshape(x.shape[0], 3, self.num_bins)
log_P = F.log_softmax(x / torch.exp(self.log_T), dim=-1)
xyz_estimate = torch.sum(torch.exp(log_P) * self.xyz_bin_corners, dim=2)
if self.training:
get_event_storage().put_scalar("pose_xyz_log_T", self.log_T)
return xyz_estimate, log_P
def pose_xyz_rcnn_loss(self, pose_xyz_estimate, log_P,
instances, loss_weight=1.0, loss_type="l1"):
"""
Compute the error between the estimated and actual pose.
Args:
pose_xyz_estimate (Tensor): A tensor of shape (B, 3) for batch size B.
log_P (Tensor): A tensor of shape (B, 3, N_bins) for batch size B,
and # of xyz bins N_bins.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pose estimates. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
loss_weight (float): A float to multiply the loss with.
loss_type (string): 'l1' or 'l2'
Returns:
xyz_pose_loss (Tensor): A scalar tensor containing the loss.
"""
total_num_pose_estimates = pose_xyz_estimate.size(0)
assert(pose_xyz_estimate.size(1) == 3)
assert(log_P.size(0) == total_num_pose_estimates)
assert(log_P.size(1) == 3)
assert(log_P.size(2) == self.num_bins)
# Gather up gt xyz poses from the list of Instances objects
all_gt_pose_xyz = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
all_gt_pose_xyz.append(instances_per_image.gt_pose_quatxyz[:, -3:].to(device=pose_xyz_estimate.device))
if len(all_gt_pose_xyz) == 0:
return 0.
all_gt_pose_xyz = cat(all_gt_pose_xyz, dim=0)
assert all_gt_pose_xyz.numel() > 0, all_gt_pose_xyz.shape
# Compute the bin index in which the ground truth xyz poses fall
# by subtracting off the bin left boundaries and dividing by the bin widths
distance_into_bins = all_gt_pose_xyz.detach() - self.xyz_bin_corners[:, 0]
bin_indices = (distance_into_bins / self.xyz_bin_widths).floor()
bin_indices = torch.clamp(bin_indices, 0, self.num_bins).long()
active_log_probs = torch.stack(
[log_P[k, range(3), bin_indices[k, :]]
for k in range(total_num_pose_estimates)])
pose_loss = torch.mean(-active_log_probs)
if loss_type == "l1":
pose_loss = pose_loss + F.l1_loss(
pose_xyz_estimate, all_gt_pose_xyz, reduction="mean"
)
elif loss_type == "l2":
pose_loss = pose_loss + F.mse_loss(
pose_xyz_estimate, all_gt_pose_xyz, reduction="mean"
)
else:
raise NotImplementedError("Unrecognized loss type: ", loss_type)
pose_loss = pose_loss * loss_weight
return pose_loss
@ROI_POSE_HEAD_REGISTRY.register()
class RCNNPoseRpyHead(nn.Module):
"""
Takes an ROI an spits out estimates of the object pose RPY components.
Operates by applying a number of convolutional + FC layers with
a final soft classification output over a discretization of the
pose rpy components.
Layout is:
conv layers --> FC layers --> N pose estimate bins --> final regression
| |
v v
cross-entropy L1 loss
loss
RPY is treated differently than XYZ, as in the 3dRCNN paper (Kundu et al):
XYZ is discretized over some range with loss taken directly, whereas RPY is
discretized over the entire range [0, 2pi] with a complex loss that wraps
around from 2pi to 0.
"""
def __init__(self, cfg, input_shape):
super().__init__()
# fmt: off
num_conv = cfg.MODEL.ROI_POSE_RPY_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_POSE_RPY_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_POSE_RPY_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_POSE_RPY_HEAD.FC_DIM
norm = cfg.MODEL.ROI_POSE_RPY_HEAD.NORM
self.num_bins = cfg.MODEL.ROI_POSE_RPY_HEAD.NUM_BINS
# fmt: on
assert num_conv + num_fc > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
num_output_params = self.num_bins * 3
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
for k in range(num_fc):
if k == 0:
# Takes 3x3 calibrations + Hinfs + rotmats as input as well
fc = nn.Linear(np.prod(self._output_size) + 27, fc_dim)
self._output_size = fc_dim
elif k < num_fc - 1:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self._output_size = fc_dim
else:
fc = nn.Linear(np.prod(self._output_size), num_output_params)
self._output_size = num_output_params
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
# Temperature for softmax (gets exponentiated in the
# forward pass to ensure it's always positive).
self.log_T = torch.nn.Parameter(torch.log(torch.tensor([0.5])))
self.log_T.requires_grad = True
# Pre-compute rpy bin centers -- to make computing the complex
# expectation easier, prepare the real + imaginary component of
# the complex exponential of each bin center.
rpy_bin_corners = []
rpy_bin_widths = []
for k in range(3):
bottom, top = -np.pi, np.pi
rpy_bin_widths.append( (top - bottom) / (self.num_bins - 1) )
rpy_bin_corners.append(
torch.linspace(bottom, top, steps=self.num_bins))
rpy_bin_corners = torch.stack(rpy_bin_corners)
self.register_buffer("rpy_bin_corners",
rpy_bin_corners)
self.register_buffer("rpy_bin_corners_real",
torch.cos(rpy_bin_corners))
self.register_buffer("rpy_bin_corners_imag",
torch.sin(rpy_bin_corners))
self.register_buffer("rpy_bin_widths", torch.tensor(rpy_bin_widths))
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x, Kcs, rotations, Hinfs):
for layer in self.conv_norm_relus:
x = layer(x)
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
Kcs = torch.flatten(Kcs, start_dim=1)
rotations = torch.flatten(rotations, start_dim=1)
Hinfs = torch.flatten(Hinfs, start_dim=1)
x = torch.cat([x, Kcs, rotations, Hinfs], dim=-1)
if len(self.fcs):
for layer in self.fcs:
x = F.relu(layer(x))
x = x.reshape(x.shape[0], 3, self.num_bins)
log_P = F.log_softmax(x / torch.exp(self.log_T), dim=-1)
# To get the estimate, take the *complex* expectation -- see
# eq. (2) in the 3dRCNN paper.
P = torch.exp(log_P)
real_total = torch.sum(P * self.rpy_bin_corners_real, dim=2)
imag_total = torch.sum(P * self.rpy_bin_corners_imag, dim=2)
rpy_estimate = torch.atan2(imag_total, real_total)
if self.training:
get_event_storage().put_scalar("pose_rpy_log_T", self.log_T)
return rpy_estimate, log_P
def pose_rpy_rcnn_loss(self, pose_rpy_estimate, log_P,
instances, loss_weight=1.0, loss_type="l1"):
"""
Compute the error between the estimated and actual pose.
Args:
pose_rpy_estimate (Tensor): A tensor of shape (B, 3) for batch size B.
P (Tensor): A tensor of shape (B, 3, N_bins) for batch size B,
and # of rpy bins N_bins.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pose estimates. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
loss_weight (float): A float to multiply the loss with.
loss_type (string): 'l1' or 'l2'
Returns:
rpy_pose_loss (Tensor): A scalar tensor containing the loss.
"""
total_num_pose_estimates = pose_rpy_estimate.size(0)
assert(pose_rpy_estimate.size(1) == 3)
assert(log_P.size(0) == total_num_pose_estimates)
assert(log_P.size(1) == 3)
assert(log_P.size(2) == self.num_bins)
# Gather up gt rpy poses from the list of Instances objects
all_gt_pose_rpy = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
all_gt_pose_rpy.append(instances_per_image.gt_pose_rpy.to(device=pose_rpy_estimate.device))
if len(all_gt_pose_rpy) == 0:
return 0.
all_gt_pose_rpy = cat(all_gt_pose_rpy, dim=0)
assert all_gt_pose_rpy.numel() > 0, all_gt_pose_rpy.shape
# Compute the bin index in which the ground truth rpy poses fall
# by subtracting off the bin left boundaries and dividing by the bin widths
distance_into_bins = all_gt_pose_rpy.detach() - self.rpy_bin_corners[:, 0]
bin_indices = (distance_into_bins / self.rpy_bin_widths).floor()
bin_indices = torch.clamp(bin_indices, 0, self.num_bins).long()
active_log_probs = torch.stack(
[log_P[k, range(3), bin_indices[k, :]]
for k in range(total_num_pose_estimates)])
pose_loss = torch.mean(-active_log_probs)
# In either loss case, collapse among the minimum (elementwise) loss among
# the original angle estimate, as well as the angle estimate rotated left
# and right by 2pi.
pose_loss_0 = torch.abs(pose_rpy_estimate - all_gt_pose_rpy)
pose_loss_1 = torch.abs(pose_rpy_estimate + np.pi*2. - all_gt_pose_rpy)
pose_loss_2 = torch.abs(pose_rpy_estimate - np.pi*2. - all_gt_pose_rpy)
pose_loss_min, _ = torch.min(torch.stack([pose_loss_0, pose_loss_1, pose_loss_2], dim=0), dim=0)
if loss_type == "l1":
pose_loss = pose_loss + torch.mean(pose_loss_min)
elif loss_type == "l2":
pose_loss = pose_loss + torch.mean(pose_loss_min**2.)
else:
raise NotImplementedError("Unrecognized loss type: ", loss_type)
pose_loss = pose_loss * loss_weight
return pose_loss
@ROI_POSE_HEAD_REGISTRY.register()
class RCNNPose6DOFRotHead(nn.Module):
"""
Takes an ROI an spits out estimates of the object pose rotation estimate
(in rotation matrix form).
Operates by applying a number of convolutional + FC layers with
that spit out a 6DOF overparameterized rotation format (following
https://arxiv.org/pdf/1812.07035.pdf).
Layout is:
conv layers --> FC layers --> 6DOF representation -> postprocess into rotmat
"""
def __init__(self, cfg, input_shape):
super().__init__()
# fmt: off
num_conv = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.FC_DIM
norm = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NORM
# fmt: on
assert num_conv + num_fc > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
num_output_params = 6
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
for k in range(num_fc):
if k == 0:
# Takes 3x3 calibrations + Hinfs + rotmats as input as well
fc = nn.Linear(np.prod(self._output_size) + 27, fc_dim)
self._output_size = fc_dim
elif k < num_fc - 1:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self._output_size = fc_dim
else:
fc = nn.Linear(np.prod(self._output_size), num_output_params)
self._output_size = num_output_params
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x, Kcs, rotations, Hinfs):
for layer in self.conv_norm_relus:
x = layer(x)
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
Kcs = torch.flatten(Kcs, start_dim=1)
rotations = torch.flatten(rotations, start_dim=1)
Hinfs = torch.flatten(Hinfs, start_dim=1)
x = torch.cat([x, Kcs, rotations, Hinfs], dim=-1)
if len(self.fcs):
for layer in self.fcs:
x = F.relu(layer(x))
x = x.reshape(x.shape[0], 2, 3)
# We've predicted two 3-vectors -- normalize them
# and form a rotation matrix from them referencing Appendix B
# in https://arxiv.org/pdf/1812.07035.pdf
a1 = x[:, 0, :]
a2 = x[:, 1, :]
b1 = F.normalize(a1, p=2, dim=1)
# Sum is repeated out to [batch x 3] from [batch] so it
# broadcast-multiplies with [batch x 3] b1 happily
b2 = F.normalize(a2 - (torch.sum(b1*a2, dim=-1).view(-1, 1).expand(-1, 3)*b1), dim=1)
b3 = torch.cross(b1, b2)
R = torch.stack([b1, b2, b3], dim=-1)
return R
def pose_6DOF_rot_rcnn_loss(self, R_estimate,
instances, loss_weight=1.0, loss_type="l1"):
"""
Compute the error between the estimated and actual pose.
Args:
R_estimate (Tensor): A tensor of shape (B, 3, 3) for batch size B.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pose estimates. The ground-truth labels (class, box, mask,
...) associated with each instance are stored in fields.
loss_weight (float): A float to multiply the loss with.
loss_type (string): 'l1' or 'l2'
Returns:
loss (Tensor): A scalar tensor containing the loss.
"""
total_num_pose_estimates = R_estimate.size(0)
assert(R_estimate.shape[-2:] == (3, 3))
# Gather up gt rotation matrices from the list of Instances objects
all_gt_pose_quat = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
all_gt_pose_quat.append(instances_per_image.gt_pose_quatxyz[:, :4].detach().to(device=R_estimate.device))
if len(all_gt_pose_quat) == 0:
return 0.
all_gt_pose_quat = cat(all_gt_pose_quat, dim=0)
assert all_gt_pose_quat.numel() > 0, all_gt_pose_quat.shape
all_gt_pose_rotmat = quat2mat(all_gt_pose_quat)
# Get rotation difference between predicted and target
diff_rotations = torch.matmul(all_gt_pose_rotmat, torch.transpose(R_estimate, 1, 2)).contiguous()
# Batch trace implementation from torch_quaternion.py
rotation_matrix_vec = diff_rotations.reshape(*diff_rotations.shape[:-2], 9)
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk(
rotation_matrix_vec, chunks=9, dim=-1)
trace = m00 + m11 + m22
angle_errors = torch.acos(torch.clamp((trace - 1.)/2., -0.9999, 0.9999))
if loss_type == "l1":
pose_loss = torch.mean(angle_errors)
elif loss_type == "l2":
pose_loss = torch.mean(angle_errors**2.)
else:
raise NotImplementedError("Unrecognized loss type: ", loss_type)
pose_loss = pose_loss * loss_weight
return pose_loss
def build_pose_xyz_head(cfg, input_shape):
name = cfg.MODEL.ROI_POSE_XYZ_HEAD.NAME
return ROI_POSE_HEAD_REGISTRY.get(name)(cfg, input_shape)
def build_pose_rpy_head(cfg, input_shape):
name = cfg.MODEL.ROI_POSE_RPY_HEAD.NAME
return ROI_POSE_HEAD_REGISTRY.get(name)(cfg, input_shape)
def build_pose_6DOF_rot_head(cfg, input_shape):
name = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NAME
return ROI_POSE_HEAD_REGISTRY.get(name)(cfg, input_shape) | 41.555351 | 117 | 0.595791 | import fvcore.nn.weight_init as weight_init
import numpy as np
import torch
from detectron2.layers import Conv2d, ConvTranspose2d, cat, get_norm
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from torch import nn
from torch.nn import functional as F
from scene_generation.utils.torch_quaternion import (
quat2mat
)
ROI_POSE_HEAD_REGISTRY = Registry("ROI_POSE_HEAD")
@ROI_POSE_HEAD_REGISTRY.register()
class RCNNPoseXyzHead(nn.Module):
def __init__(self, cfg, input_shape):
super().__init__()
num_conv = cfg.MODEL.ROI_POSE_XYZ_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_POSE_XYZ_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_POSE_XYZ_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_POSE_XYZ_HEAD.FC_DIM
norm = cfg.MODEL.ROI_POSE_XYZ_HEAD.NORM
self.num_bins = cfg.MODEL.ROI_POSE_XYZ_HEAD.NUM_BINS
self.xyz_bin_ranges = cfg.MODEL.ROI_POSE_XYZ_HEAD.XYZ_BIN_RANGES
assert num_conv + num_fc > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
num_output_params = self.num_bins * 3
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
for k in range(num_fc):
if k == 0:
fc = nn.Linear(np.prod(self._output_size) + 27, fc_dim)
self._output_size = fc_dim
elif k < num_fc - 1:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self._output_size = fc_dim
else:
fc = nn.Linear(np.prod(self._output_size), num_output_params)
self._output_size = num_output_params
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
self.log_T = torch.nn.Parameter(torch.log(torch.tensor([0.5])))
self.log_T.requires_grad = True
# Pre-compute xyz bin centers
xyz_bin_corners = []
xyz_bin_widths = []
for k in range(3):
bottom, top = self.xyz_bin_ranges[k]
xyz_bin_widths.append( (top - bottom) / (self.num_bins - 1) )
xyz_bin_corners.append(
torch.linspace(bottom, top, steps=self.num_bins))
self.register_buffer("xyz_bin_corners", torch.stack(xyz_bin_corners))
self.register_buffer("xyz_bin_widths", torch.tensor(xyz_bin_widths))
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x, Kcs, rotations, Hinfs):
for layer in self.conv_norm_relus:
x = layer(x)
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
Kcs = torch.flatten(Kcs, start_dim=1)
rotations = torch.flatten(rotations, start_dim=1)
Hinfs = torch.flatten(Hinfs, start_dim=1)
x = torch.cat([x, Kcs, rotations, Hinfs], dim=-1)
if len(self.fcs):
for layer in self.fcs:
x = F.relu(layer(x))
x = x.reshape(x.shape[0], 3, self.num_bins)
log_P = F.log_softmax(x / torch.exp(self.log_T), dim=-1)
xyz_estimate = torch.sum(torch.exp(log_P) * self.xyz_bin_corners, dim=2)
if self.training:
get_event_storage().put_scalar("pose_xyz_log_T", self.log_T)
return xyz_estimate, log_P
def pose_xyz_rcnn_loss(self, pose_xyz_estimate, log_P,
instances, loss_weight=1.0, loss_type="l1"):
total_num_pose_estimates = pose_xyz_estimate.size(0)
assert(pose_xyz_estimate.size(1) == 3)
assert(log_P.size(0) == total_num_pose_estimates)
assert(log_P.size(1) == 3)
assert(log_P.size(2) == self.num_bins)
# Gather up gt xyz poses from the list of Instances objects
all_gt_pose_xyz = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
all_gt_pose_xyz.append(instances_per_image.gt_pose_quatxyz[:, -3:].to(device=pose_xyz_estimate.device))
if len(all_gt_pose_xyz) == 0:
return 0.
all_gt_pose_xyz = cat(all_gt_pose_xyz, dim=0)
assert all_gt_pose_xyz.numel() > 0, all_gt_pose_xyz.shape
# Compute the bin index in which the ground truth xyz poses fall
# by subtracting off the bin left boundaries and dividing by the bin widths
distance_into_bins = all_gt_pose_xyz.detach() - self.xyz_bin_corners[:, 0]
bin_indices = (distance_into_bins / self.xyz_bin_widths).floor()
bin_indices = torch.clamp(bin_indices, 0, self.num_bins).long()
active_log_probs = torch.stack(
[log_P[k, range(3), bin_indices[k, :]]
for k in range(total_num_pose_estimates)])
pose_loss = torch.mean(-active_log_probs)
if loss_type == "l1":
pose_loss = pose_loss + F.l1_loss(
pose_xyz_estimate, all_gt_pose_xyz, reduction="mean"
)
elif loss_type == "l2":
pose_loss = pose_loss + F.mse_loss(
pose_xyz_estimate, all_gt_pose_xyz, reduction="mean"
)
else:
raise NotImplementedError("Unrecognized loss type: ", loss_type)
pose_loss = pose_loss * loss_weight
return pose_loss
@ROI_POSE_HEAD_REGISTRY.register()
class RCNNPoseRpyHead(nn.Module):
def __init__(self, cfg, input_shape):
super().__init__()
# fmt: off
num_conv = cfg.MODEL.ROI_POSE_RPY_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_POSE_RPY_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_POSE_RPY_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_POSE_RPY_HEAD.FC_DIM
norm = cfg.MODEL.ROI_POSE_RPY_HEAD.NORM
self.num_bins = cfg.MODEL.ROI_POSE_RPY_HEAD.NUM_BINS
# fmt: on
assert num_conv + num_fc > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
num_output_params = self.num_bins * 3
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
for k in range(num_fc):
if k == 0:
# Takes 3x3 calibrations + Hinfs + rotmats as input as well
fc = nn.Linear(np.prod(self._output_size) + 27, fc_dim)
self._output_size = fc_dim
elif k < num_fc - 1:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self._output_size = fc_dim
else:
fc = nn.Linear(np.prod(self._output_size), num_output_params)
self._output_size = num_output_params
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
# Temperature for softmax (gets exponentiated in the
# forward pass to ensure it's always positive).
self.log_T = torch.nn.Parameter(torch.log(torch.tensor([0.5])))
self.log_T.requires_grad = True
rpy_bin_corners = []
rpy_bin_widths = []
for k in range(3):
bottom, top = -np.pi, np.pi
rpy_bin_widths.append( (top - bottom) / (self.num_bins - 1) )
rpy_bin_corners.append(
torch.linspace(bottom, top, steps=self.num_bins))
rpy_bin_corners = torch.stack(rpy_bin_corners)
self.register_buffer("rpy_bin_corners",
rpy_bin_corners)
self.register_buffer("rpy_bin_corners_real",
torch.cos(rpy_bin_corners))
self.register_buffer("rpy_bin_corners_imag",
torch.sin(rpy_bin_corners))
self.register_buffer("rpy_bin_widths", torch.tensor(rpy_bin_widths))
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x, Kcs, rotations, Hinfs):
for layer in self.conv_norm_relus:
x = layer(x)
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
Kcs = torch.flatten(Kcs, start_dim=1)
rotations = torch.flatten(rotations, start_dim=1)
Hinfs = torch.flatten(Hinfs, start_dim=1)
x = torch.cat([x, Kcs, rotations, Hinfs], dim=-1)
if len(self.fcs):
for layer in self.fcs:
x = F.relu(layer(x))
x = x.reshape(x.shape[0], 3, self.num_bins)
log_P = F.log_softmax(x / torch.exp(self.log_T), dim=-1)
P = torch.exp(log_P)
real_total = torch.sum(P * self.rpy_bin_corners_real, dim=2)
imag_total = torch.sum(P * self.rpy_bin_corners_imag, dim=2)
rpy_estimate = torch.atan2(imag_total, real_total)
if self.training:
get_event_storage().put_scalar("pose_rpy_log_T", self.log_T)
return rpy_estimate, log_P
def pose_rpy_rcnn_loss(self, pose_rpy_estimate, log_P,
instances, loss_weight=1.0, loss_type="l1"):
total_num_pose_estimates = pose_rpy_estimate.size(0)
assert(pose_rpy_estimate.size(1) == 3)
assert(log_P.size(0) == total_num_pose_estimates)
assert(log_P.size(1) == 3)
assert(log_P.size(2) == self.num_bins)
all_gt_pose_rpy = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
all_gt_pose_rpy.append(instances_per_image.gt_pose_rpy.to(device=pose_rpy_estimate.device))
if len(all_gt_pose_rpy) == 0:
return 0.
all_gt_pose_rpy = cat(all_gt_pose_rpy, dim=0)
assert all_gt_pose_rpy.numel() > 0, all_gt_pose_rpy.shape
distance_into_bins = all_gt_pose_rpy.detach() - self.rpy_bin_corners[:, 0]
bin_indices = (distance_into_bins / self.rpy_bin_widths).floor()
bin_indices = torch.clamp(bin_indices, 0, self.num_bins).long()
active_log_probs = torch.stack(
[log_P[k, range(3), bin_indices[k, :]]
for k in range(total_num_pose_estimates)])
pose_loss = torch.mean(-active_log_probs)
pose_loss_0 = torch.abs(pose_rpy_estimate - all_gt_pose_rpy)
pose_loss_1 = torch.abs(pose_rpy_estimate + np.pi*2. - all_gt_pose_rpy)
pose_loss_2 = torch.abs(pose_rpy_estimate - np.pi*2. - all_gt_pose_rpy)
pose_loss_min, _ = torch.min(torch.stack([pose_loss_0, pose_loss_1, pose_loss_2], dim=0), dim=0)
if loss_type == "l1":
pose_loss = pose_loss + torch.mean(pose_loss_min)
elif loss_type == "l2":
pose_loss = pose_loss + torch.mean(pose_loss_min**2.)
else:
raise NotImplementedError("Unrecognized loss type: ", loss_type)
pose_loss = pose_loss * loss_weight
return pose_loss
@ROI_POSE_HEAD_REGISTRY.register()
class RCNNPose6DOFRotHead(nn.Module):
def __init__(self, cfg, input_shape):
super().__init__()
num_conv = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.FC_DIM
norm = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NORM
assert num_conv + num_fc > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
num_output_params = 6
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
self.fcs = []
for k in range(num_fc):
if k == 0:
fc = nn.Linear(np.prod(self._output_size) + 27, fc_dim)
self._output_size = fc_dim
elif k < num_fc - 1:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self._output_size = fc_dim
else:
fc = nn.Linear(np.prod(self._output_size), num_output_params)
self._output_size = num_output_params
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x, Kcs, rotations, Hinfs):
for layer in self.conv_norm_relus:
x = layer(x)
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
Kcs = torch.flatten(Kcs, start_dim=1)
rotations = torch.flatten(rotations, start_dim=1)
Hinfs = torch.flatten(Hinfs, start_dim=1)
x = torch.cat([x, Kcs, rotations, Hinfs], dim=-1)
if len(self.fcs):
for layer in self.fcs:
x = F.relu(layer(x))
x = x.reshape(x.shape[0], 2, 3)
# and form a rotation matrix from them referencing Appendix B
# in https://arxiv.org/pdf/1812.07035.pdf
a1 = x[:, 0, :]
a2 = x[:, 1, :]
b1 = F.normalize(a1, p=2, dim=1)
# Sum is repeated out to [batch x 3] from [batch] so it
# broadcast-multiplies with [batch x 3] b1 happily
b2 = F.normalize(a2 - (torch.sum(b1*a2, dim=-1).view(-1, 1).expand(-1, 3)*b1), dim=1)
b3 = torch.cross(b1, b2)
R = torch.stack([b1, b2, b3], dim=-1)
return R
def pose_6DOF_rot_rcnn_loss(self, R_estimate,
instances, loss_weight=1.0, loss_type="l1"):
total_num_pose_estimates = R_estimate.size(0)
assert(R_estimate.shape[-2:] == (3, 3))
# Gather up gt rotation matrices from the list of Instances objects
all_gt_pose_quat = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
all_gt_pose_quat.append(instances_per_image.gt_pose_quatxyz[:, :4].detach().to(device=R_estimate.device))
if len(all_gt_pose_quat) == 0:
return 0.
all_gt_pose_quat = cat(all_gt_pose_quat, dim=0)
assert all_gt_pose_quat.numel() > 0, all_gt_pose_quat.shape
all_gt_pose_rotmat = quat2mat(all_gt_pose_quat)
# Get rotation difference between predicted and target
diff_rotations = torch.matmul(all_gt_pose_rotmat, torch.transpose(R_estimate, 1, 2)).contiguous()
# Batch trace implementation from torch_quaternion.py
rotation_matrix_vec = diff_rotations.reshape(*diff_rotations.shape[:-2], 9)
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk(
rotation_matrix_vec, chunks=9, dim=-1)
trace = m00 + m11 + m22
angle_errors = torch.acos(torch.clamp((trace - 1.)/2., -0.9999, 0.9999))
if loss_type == "l1":
pose_loss = torch.mean(angle_errors)
elif loss_type == "l2":
pose_loss = torch.mean(angle_errors**2.)
else:
raise NotImplementedError("Unrecognized loss type: ", loss_type)
pose_loss = pose_loss * loss_weight
return pose_loss
def build_pose_xyz_head(cfg, input_shape):
name = cfg.MODEL.ROI_POSE_XYZ_HEAD.NAME
return ROI_POSE_HEAD_REGISTRY.get(name)(cfg, input_shape)
def build_pose_rpy_head(cfg, input_shape):
name = cfg.MODEL.ROI_POSE_RPY_HEAD.NAME
return ROI_POSE_HEAD_REGISTRY.get(name)(cfg, input_shape)
def build_pose_6DOF_rot_head(cfg, input_shape):
name = cfg.MODEL.ROI_POSE_6DOF_ROT_HEAD.NAME
return ROI_POSE_HEAD_REGISTRY.get(name)(cfg, input_shape) | true | true |
f7f522e7d04219b8aa9187b6b62f2db427ff3a4f | 659 | py | Python | Chapter02/read_xlsx.py | appleDev20/Automate-it | 0660507c40fe08914beb893ec4b4990086a74238 | [
"MIT"
] | 28 | 2017-02-07T23:06:31.000Z | 2021-12-22T17:43:39.000Z | Chapter02/read_xlsx.py | appleDev20/Automate-it | 0660507c40fe08914beb893ec4b4990086a74238 | [
"MIT"
] | null | null | null | Chapter02/read_xlsx.py | appleDev20/Automate-it | 0660507c40fe08914beb893ec4b4990086a74238 | [
"MIT"
] | 20 | 2017-03-16T11:55:15.000Z | 2022-02-08T10:00:46.000Z | #Get all the sheets from given workbook
import openpyxl
workbook = openpyxl.load_workbook('myxls.xlsx')
print "Workbook Object:", workbook.get_sheet_names()
#Get a particular sheet of the workbook
people = workbook.get_sheet_by_name('People')
print "People sheet object:", people
#Get individual cell objects with cell name or
#column/row combination
import openpyxl
workbook = openpyxl.load_workbook('myxls.xlsx')
people = workbook.get_sheet_by_name('People')
print "First cell Object:", people['A1']
print "Other Cell Object:", people.cell(row=3, column=2)
#Read cell values from cell objects
print "First Name:", people['B2'].value, people['C2'].value
| 31.380952 | 59 | 0.770865 |
import openpyxl
workbook = openpyxl.load_workbook('myxls.xlsx')
print "Workbook Object:", workbook.get_sheet_names()
people = workbook.get_sheet_by_name('People')
print "People sheet object:", people
import openpyxl
workbook = openpyxl.load_workbook('myxls.xlsx')
people = workbook.get_sheet_by_name('People')
print "First cell Object:", people['A1']
print "Other Cell Object:", people.cell(row=3, column=2)
print "First Name:", people['B2'].value, people['C2'].value
| false | true |
f7f52318236841ed73bc75c4a26fa18362c0ed30 | 7,004 | py | Python | fairness_teaching/rl/data.py | egonrian/google-research | 8177adbe9ca0d7e5a9463b54581fe6dd27be0974 | [
"Apache-2.0"
] | 3 | 2021-01-18T04:46:49.000Z | 2021-03-05T09:21:40.000Z | fairness_teaching/rl/data.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 7 | 2021-11-10T19:44:38.000Z | 2022-02-10T06:48:39.000Z | fairness_teaching/rl/data.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
# pylint: skip-file
ATT_ID = {'5_o_Clock_Shadow': 0, 'Arched_Eyebrows': 1, 'Attractive': 2,
'Bags_Under_Eyes': 3, 'Bald': 4, 'Bangs': 5, 'Big_Lips': 6,
'Big_Nose': 7, 'Black_Hair': 8, 'Blond_Hair': 9, 'Blurry': 10,
'Brown_Hair': 11, 'Bushy_Eyebrows': 12, 'Chubby': 13,
'Double_Chin': 14, 'Eyeglasses': 15, 'Goatee': 16,
'Gray_Hair': 17, 'Heavy_Makeup': 18, 'High_Cheekbones': 19,
'Male': 20, 'Mouth_Slightly_Open': 21, 'Mustache': 22,
'Narrow_Eyes': 23, 'No_Beard': 24, 'Oval_Face': 25,
'Pale_Skin': 26, 'Pointy_Nose': 27, 'Receding_Hairline': 28,
'Rosy_Cheeks': 29, 'Sideburns': 30, 'Smiling': 31,
'Straight_Hair': 32, 'Wavy_Hair': 33, 'Wearing_Earrings': 34,
'Wearing_Hat': 35, 'Wearing_Lipstick': 36,
'Wearing_Necklace': 37, 'Wearing_Necktie': 38, 'Young': 39}
ID_ATT = {v: k for k, v in ATT_ID.items()}
CENTRAL_FRACTION = 0.89
LOAD_SIZE = 142 #286
CROP_SIZE = 128 #256
def cal_eo(a, y_label, y_pred):
a = np.array(a)
y_label = np.array(y_label)
y_pred = np.array(y_pred)
idx00 = np.logical_and(a==0,y_label==0)
idx01 = np.logical_and(a==0,y_label==1)
idx10 = np.logical_and(a==1,y_label==0)
idx11 = np.logical_and(a==1,y_label==1)
if y_pred[idx00].shape[0] ==0:
d00=0.5
else:
d00 = 1 - np.sum(y_pred[idx00])/y_pred[idx00].shape[0]
if y_pred[idx01].shape[0] ==0:
d01=0.5
else:
d01 = np.sum(y_pred[idx01])/y_pred[idx01].shape[0]
if y_pred[idx10].shape[0] ==0:
d10=0.5
else:
d10 = 1 - np.sum(y_pred[idx10])/y_pred[idx10].shape[0]
if y_pred[idx11].shape[0] ==0:
d11=0.5
else:
d11 = np.sum(y_pred[idx11])/y_pred[idx11].shape[0]
eo = np.abs(d00-d10)+np.abs(d01-d11)
return (d00,d01,d10,d11,eo)
def reorg(label_path,af,bf):
img_names = np.genfromtxt(label_path, dtype=str, usecols=0)
labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))
entry = np.concatenate((img_names[:, np.newaxis], labels), axis=1)
a = np.asarray((labels[:,ATT_ID[af]]+1)//2)
b = np.asarray((labels[:,ATT_ID[bf]]+1)//2)
d00 = []
d01 = []
d10 = []
d11 = []
for i in range(labels.shape[0]):
if a[i]==0:
if b[i]==0: d00.append(entry[i])
elif b[i]==1: d01.append(entry[i])
elif a[i]==1:
if b[i]==0: d10.append(entry[i])
elif b[i]==1: d11.append(entry[i])
min_leng = np.min([len(d00),len(d01),len(d10),len(d11)])
new_list = d00[:min_leng]+d01[:3*min_leng]+d10[:3*min_leng]+d11[:min_leng]
# new_list = d00[:min_leng]+d01[:min_leng]+d10[:min_leng]+d11[:min_leng]
return np.array(new_list)
def load_train(image_path, label, att):
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, [LOAD_SIZE, LOAD_SIZE])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_crop(image, [CROP_SIZE, CROP_SIZE, 3])
image = tf.clip_by_value(image, 0, 255) / 127.5 - 1
label = (label + 1) // 2
att = (att + 1) // 2
image = tf.cast(image, tf.float32)
label = tf.cast(label, tf.int32)
att = tf.cast(att, tf.int32)
return (image, label, att)
def load_test(image_path, label, att):
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, [LOAD_SIZE, LOAD_SIZE])
image = tf.image.central_crop(image, CENTRAL_FRACTION)
image = tf.clip_by_value(image, 0, 255) / 127.5 - 1
label = (label + 1) // 2
att = (att + 1) // 2
image = tf.cast(image, tf.float32)
label = tf.cast(label, tf.int32)
att = tf.cast(att, tf.int32)
return (image, label, att)
# load partial training dataset
def data_train(image_path, label_path, batch_size):
a = 'Male'
b = 'Arched_Eyebrows'
new_entry = reorg(label_path,a,b)
n_examples = new_entry.shape[0]
img_names = new_entry[:,0]
img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])
img_labels = new_entry[:,1:]
labels = img_labels[:,ATT_ID['Arched_Eyebrows']].astype(int)
att = img_labels[:,ATT_ID['Male']].astype(int)
train_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))
train_dataset = train_dataset.map(load_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_dataset = train_dataset.shuffle(n_examples, seed=0)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.repeat().prefetch(1)
train_iter = train_dataset.make_one_shot_iterator()
batch = train_iter.get_next()
return batch, int(np.ceil(n_examples/batch_size))
# load entire training dataset
# def data_train(image_path, label_path, batch_size):
# img_names = np.genfromtxt(label_path, dtype=str, usecols=0)
# img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])
# img_labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))
# n_examples = img_names.shape[0]
# labels = img_labels[:,ATT_ID['Arched_Eyebrows']]
# att = img_labels[:,ATT_ID['Male']]
# train_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))
# train_dataset = train_dataset.map(load_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# train_dataset = train_dataset.shuffle(n_examples, seed=0)
# train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
# train_dataset = train_dataset.repeat().prefetch(1)
# train_iter = train_dataset.make_one_shot_iterator()
# batch = train_iter.get_next()
# return batch, int(np.ceil(n_examples/batch_size))
def data_test(image_path, label_path, batch_size):
img_names = np.genfromtxt(label_path, dtype=str, usecols=0)
img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])
img_labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))
n_examples = img_names.shape[0]
labels = img_labels[:,ATT_ID['Arched_Eyebrows']]
att = img_labels[:,ATT_ID['Male']]
test_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))
test_dataset = test_dataset.map(load_test, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(batch_size, drop_remainder=False)
test_dataset = test_dataset.repeat().prefetch(1)
test_iter = test_dataset.make_one_shot_iterator()
batch = test_iter.get_next()
return batch, int(np.ceil(n_examples/batch_size))
| 36.863158 | 101 | 0.692461 |
import os
import numpy as np
import tensorflow as tf
ATT_ID = {'5_o_Clock_Shadow': 0, 'Arched_Eyebrows': 1, 'Attractive': 2,
'Bags_Under_Eyes': 3, 'Bald': 4, 'Bangs': 5, 'Big_Lips': 6,
'Big_Nose': 7, 'Black_Hair': 8, 'Blond_Hair': 9, 'Blurry': 10,
'Brown_Hair': 11, 'Bushy_Eyebrows': 12, 'Chubby': 13,
'Double_Chin': 14, 'Eyeglasses': 15, 'Goatee': 16,
'Gray_Hair': 17, 'Heavy_Makeup': 18, 'High_Cheekbones': 19,
'Male': 20, 'Mouth_Slightly_Open': 21, 'Mustache': 22,
'Narrow_Eyes': 23, 'No_Beard': 24, 'Oval_Face': 25,
'Pale_Skin': 26, 'Pointy_Nose': 27, 'Receding_Hairline': 28,
'Rosy_Cheeks': 29, 'Sideburns': 30, 'Smiling': 31,
'Straight_Hair': 32, 'Wavy_Hair': 33, 'Wearing_Earrings': 34,
'Wearing_Hat': 35, 'Wearing_Lipstick': 36,
'Wearing_Necklace': 37, 'Wearing_Necktie': 38, 'Young': 39}
ID_ATT = {v: k for k, v in ATT_ID.items()}
CENTRAL_FRACTION = 0.89
LOAD_SIZE = 142
CROP_SIZE = 128
def cal_eo(a, y_label, y_pred):
a = np.array(a)
y_label = np.array(y_label)
y_pred = np.array(y_pred)
idx00 = np.logical_and(a==0,y_label==0)
idx01 = np.logical_and(a==0,y_label==1)
idx10 = np.logical_and(a==1,y_label==0)
idx11 = np.logical_and(a==1,y_label==1)
if y_pred[idx00].shape[0] ==0:
d00=0.5
else:
d00 = 1 - np.sum(y_pred[idx00])/y_pred[idx00].shape[0]
if y_pred[idx01].shape[0] ==0:
d01=0.5
else:
d01 = np.sum(y_pred[idx01])/y_pred[idx01].shape[0]
if y_pred[idx10].shape[0] ==0:
d10=0.5
else:
d10 = 1 - np.sum(y_pred[idx10])/y_pred[idx10].shape[0]
if y_pred[idx11].shape[0] ==0:
d11=0.5
else:
d11 = np.sum(y_pred[idx11])/y_pred[idx11].shape[0]
eo = np.abs(d00-d10)+np.abs(d01-d11)
return (d00,d01,d10,d11,eo)
def reorg(label_path,af,bf):
img_names = np.genfromtxt(label_path, dtype=str, usecols=0)
labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))
entry = np.concatenate((img_names[:, np.newaxis], labels), axis=1)
a = np.asarray((labels[:,ATT_ID[af]]+1)//2)
b = np.asarray((labels[:,ATT_ID[bf]]+1)//2)
d00 = []
d01 = []
d10 = []
d11 = []
for i in range(labels.shape[0]):
if a[i]==0:
if b[i]==0: d00.append(entry[i])
elif b[i]==1: d01.append(entry[i])
elif a[i]==1:
if b[i]==0: d10.append(entry[i])
elif b[i]==1: d11.append(entry[i])
min_leng = np.min([len(d00),len(d01),len(d10),len(d11)])
new_list = d00[:min_leng]+d01[:3*min_leng]+d10[:3*min_leng]+d11[:min_leng]
return np.array(new_list)
def load_train(image_path, label, att):
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, [LOAD_SIZE, LOAD_SIZE])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_crop(image, [CROP_SIZE, CROP_SIZE, 3])
image = tf.clip_by_value(image, 0, 255) / 127.5 - 1
label = (label + 1) // 2
att = (att + 1) // 2
image = tf.cast(image, tf.float32)
label = tf.cast(label, tf.int32)
att = tf.cast(att, tf.int32)
return (image, label, att)
def load_test(image_path, label, att):
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, [LOAD_SIZE, LOAD_SIZE])
image = tf.image.central_crop(image, CENTRAL_FRACTION)
image = tf.clip_by_value(image, 0, 255) / 127.5 - 1
label = (label + 1) // 2
att = (att + 1) // 2
image = tf.cast(image, tf.float32)
label = tf.cast(label, tf.int32)
att = tf.cast(att, tf.int32)
return (image, label, att)
def data_train(image_path, label_path, batch_size):
a = 'Male'
b = 'Arched_Eyebrows'
new_entry = reorg(label_path,a,b)
n_examples = new_entry.shape[0]
img_names = new_entry[:,0]
img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])
img_labels = new_entry[:,1:]
labels = img_labels[:,ATT_ID['Arched_Eyebrows']].astype(int)
att = img_labels[:,ATT_ID['Male']].astype(int)
train_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))
train_dataset = train_dataset.map(load_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_dataset = train_dataset.shuffle(n_examples, seed=0)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.repeat().prefetch(1)
train_iter = train_dataset.make_one_shot_iterator()
batch = train_iter.get_next()
return batch, int(np.ceil(n_examples/batch_size))
def data_test(image_path, label_path, batch_size):
img_names = np.genfromtxt(label_path, dtype=str, usecols=0)
img_paths = np.array([os.path.join(image_path, img_name) for img_name in img_names])
img_labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 41))
n_examples = img_names.shape[0]
labels = img_labels[:,ATT_ID['Arched_Eyebrows']]
att = img_labels[:,ATT_ID['Male']]
test_dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels, att))
test_dataset = test_dataset.map(load_test, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(batch_size, drop_remainder=False)
test_dataset = test_dataset.repeat().prefetch(1)
test_iter = test_dataset.make_one_shot_iterator()
batch = test_iter.get_next()
return batch, int(np.ceil(n_examples/batch_size))
| true | true |
f7f523ac9f1afa44aabf532e7d1a87a7d213d0f8 | 15,418 | py | Python | recording/record_calib_npy.py | chrelli/3DDD_social_mouse_tracker | 291d2ed90029628dd65db0ce3e8972b721159a15 | [
"Apache-2.0"
] | 1 | 2022-02-10T07:26:09.000Z | 2022-02-10T07:26:09.000Z | recording/record_calib_npy.py | chrelli/3DDD_social_mouse_tracker | 291d2ed90029628dd65db0ce3e8972b721159a15 | [
"Apache-2.0"
] | 1 | 2022-02-11T06:55:29.000Z | 2022-02-12T22:26:44.000Z | recording/record_calib_npy.py | chrelli/3DDD_social_mouse_tracker | 291d2ed90029628dd65db0ce3e8972b721159a15 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 15:36:15 2018
@author: chrelli
added unix time stamps to the first camera!
Ways to slim down the data:
no unix time stamps?
no color frame showing? - yes, helps a lot!
no png compression? Totally fine at 30 fps!
Majow to do list:
- use arduino to synchronize? Yes, could send out synchronization time code to another unit: Problem: doesn't account for delay of arriving frames
- use depth roi to slim down writing footprint
- use LED roi to get blinking time stamps
-
## with connected device cam
from pyrealsense import offline
offline.save_depth_intrinsics(dev)
"""
#%% Import the nescessary stuff
# basic OS stuff
import time, os, sys, shutil
import json
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# small utilities
import csv
from colour import Color
from itertools import compress # for list selection with logical
from tqdm import tqdm
# for image manipulation
import cv2
# for recording and connecting to the intel realsense librar
#import pyrealsense as pyrs
# add the realsense library
sys.path.append(r'/usr/local/lib')
# and load it!
import pyrealsense2 as rs
#import multiprocessing
from multiprocessing import Process
# import handy Functions
from utils.common_utils import *
from utils.recording_utils import *
#%% Parse some inputs
import argparse
parser = argparse.ArgumentParser(description='Records cad and d images with no roi cut to disk. Also records timestamps and led traces using the auto LED mask. Currently, with no ROI, the program maxes out disk write speed around 45 fps.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--ncams', type=int, default = 4 , choices=[1,2,3,4],
help='number of cameras to stream')
parser.add_argument('--fps',type=int, default = 30 , choices=[30,60],
help='select fps to stream')
#parser.add_argument("--singlecore", help="disables mult.proc. for debugging on macbook, overrides ncams to 1",
# action="store_true")
parser.add_argument("--plots", help="shows the live video while recording",
action="store_true")
args = parser.parse_args()
#%% Constants
# frame_width,frame_height = 848,480
frame_width,frame_height = 640,480
fps_choice = args.fps
# number of padding digits for the frame numbers
n_padding_digits = 8
print('# cameras: '+str(args.ncams))
print('Frame size is '+str(frame_width)+'x'+str(frame_height)+' pixels.')
print('Grabbing frames at '+str(fps_choice)+' fps')
# get the current timestring
timestr = time.strftime("%Y%m%d-%H%M%S")
# reset the folder
#data_folder = '/media/chrelli/Data0'
#top_folder = data_folder + '/calibration_' + timestr
#reset_folder_if_present(top_folder)
#
#top_folder_0 = top_folder
#top_folder_1 = top_folder
# reset the folders
top_folder_0 = '/media/chrelli/Data0' + '/calibration_' + timestr
top_folder_1 = '/media/chrelli/Data1' + '/calibration_' + timestr
reset_folder_if_present(top_folder_0)
reset_folder_if_present(top_folder_1)
# also make the numpy folders
npy_folder_0 = top_folder_0+'/npy_raw'
npy_folder_1 = top_folder_1+'/npy_raw'
reset_folder_if_present(npy_folder_0)
reset_folder_if_present(npy_folder_1)
#%% 8 bit color setup
fps_color = (Color('White').rgb)
ts_color = (Color('Peru').rgb)
# convert to 8 bit color
fps_color=tuple(255*x for x in fps_color)
ts_color=tuple(255*x for x in ts_color)
#%% Block for running
# open the pyrealsense server
#serv = pyrs.Service()
# set the start time for the unix time stamp
start_time = time.time()
# open up a realsense context and get a list of the devices!
ctx = rs.context()
devices = [ctx.devices[i] for i in range(args.ncams)]
# sort the devices by their serial numbers
serials = [devices[i].get_info(rs.camera_info.serial_number) for i in range(args.ncams)]
devices = [x for _,x in sorted(zip(serials,devices))]
def sub_function_trick(which_device,top_folder):
show_frames = args.plots
####################
#
# DEVICE SETUP BLOCK
#
#####################
# get the serial of that device
device = devices[which_device]
device_serial = device.get_info(rs.camera_info.serial_number)
#set the preset
advnc_mode = rs.rs400_advanced_mode(device)
print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")
# run like
# advnc_mode.load_json(json_string)
# load the preset here!
preset_folder = '/home/chrelli/git/3d_sandbox/mycetrack0p8/presets/'
if device_serial[:3] == '740':
preset_name = 'master60pp'
else:
preset_name = 'slave60pp'
jsonFile = preset_folder+preset_name+'.json'
jsonObj = json.load(open(jsonFile))
json_string = str(jsonObj).replace("'", '\"')
print("Configuration " + jsonFile + " loaded");
time.sleep(1.)
advnc_mode.load_json(json_string)
print("Configuration " + jsonFile + " applied!");
if device_serial[:3] == '740':
# master
targetSyncMode = 1
else:
# slave
targetSyncMode = 2
device.first_depth_sensor().set_option(rs.option.inter_cam_sync_mode, targetSyncMode)
# first, open up a config
config = rs.config()
# then open a pipeline
pipeline = rs.pipeline()
# enable the selected device and streams # RGB SPACE HERE
config.enable_device(device_serial);
config.enable_stream(rs.stream.depth, frame_width,frame_height, rs.format.z16, fps_choice)
# config.enable_stream(rs.stream.color, frame_width,frame_height, rs.format.rgb8, fps_choice)
config.enable_stream(rs.stream.color, frame_width,frame_height, rs.format.rgb8, fps_choice)
config.enable_stream(rs.stream.infrared,1, frame_width,frame_height, rs.format.y8, fps_choice)
print("PING after enabling the sync mode is {}".format(device.first_depth_sensor().get_option(rs.option.inter_cam_sync_mode)))
# Start streaming, call the stream 'cfg' for some reason, as pr example
cfg = pipeline.start(config)
# create an align object
# alternative is to align to color, faster but less precise: align_to = rs.stream.color
align_to = rs.stream.depth
align = rs.align(align_to)
print('dev '+str(which_device)+' serial is ' + device_serial)
# Use the first three digits of the serial as a string to tag the device:
device_tag = device_serial[0:3]
if show_frames:
# open a window for cv2
window_title = "dev"+str(which_device)+"(#" + device_tag + ")"
cv2.namedWindow(window_title+'cad')
# block for setting up a low-level fps estimation,
cnt = 0 # a counter
last = time.time() # start_time
fps = 0 # initial fps value
# save the camera intrinsics
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = cfg.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print ("Depth Scale is: " , depth_scale)
# this is how to get the intrinsics
profile = cfg.get_stream(rs.stream.depth) # Fetch stream profile for depth stream
intr = profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile
#% now make file and save time stamps and depth scaling and intrinsics etc
# use the old naming convention
parameternames = np.array(['cam_params.fx',
'cam_params.fy',
'cam_params.ppx',
'cam_params.ppy',
'd_scale',
'fps_choice',
'frame_width',
'frame_height'])
parameters = np.array([intr.fx,
intr.fy,
intr.ppx,
intr.ppy,
depth_scale,
fps_choice,
intr.width,
intr.height])
# open a file for writint the parameters
with open(top_folder+'/parameters_'+str(which_device)+'.csv','w') as intrfile:
writer = csv.writer(intrfile, delimiter=',')
writer.writerow(parameternames)
writer.writerow(parameters)
# load the automatic led mask from the constants folder!
led_mask,led_logic,led_centroid = load_auto_roi(which_device)
# open a file for time stamps
tsfile = open(top_folder+'/timestamps_'+str(which_device)+'.csv','w')
# ## HF try to open an HF file
# import h5py
# #TODO input from somewhere
# hf = h5py.File(top_folder+'/dev'+str(which_device)+'_d_'+'.h5', 'w')
# # also open one for the cad
# hf_cad = h5py.File(top_folder+'/dev'+str(which_device)+'_cad_'+'.h5', 'w')
# NPY ADDITION
npy_folder = top_folder+'/npy_raw'
# open a file for led stamps
# ledsfile = open(top_folder+'/ledstamps_'+str(which_device)+'.csv','w')
print('starting to stream from device '+str(which_device)+'!')
# wait for a bit for the cam to warm up
# and loop over 30 frames
warmup_time = 2 # seconds
warmup = 0
while warmup < fps_choice*warmup_time:
frames = pipeline.wait_for_frames()
warmup += 1
print('device '+str(which_device)+' is warmed up!')
# START A CLOCK FOR THE FRAMES!
FRAME_CLOCK = 0
try:
while True:
if show_frames:
# for counting frame rate
cnt += 1
if (cnt % 10) == 0:
now = time.time() # after 10 frames
dt = now - last # how long did it take?
fps = 10/dt # calculate frame rate
last = now # assign a new value to the 'last time'
#################################
#
# R E A D B L O C K
#
#################################
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
# get the frame numbers and time stamps
# ts = round(frames.get,2)
ts = frames.get_timestamp()
fn = frames.get_frame_number()
# get the unix time stamp
ts_unix = time.time()-start_time
# run the alignment process
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
cad_frame = aligned_frames.get_color_frame()
# also get one for the LED
# depth_frame = frames.get_depth_frame()
# color_frame = frames.get_color_frame()
infrared_frame = frames.get_infrared_frame()
# Convert images to numpy arrays
depth = np.asanyarray(depth_frame.get_data())
cad = np.asanyarray(cad_frame.get_data())
c = np.asanyarray(infrared_frame.get_data())
# get the LED value, round it a bit, could be profiled
led_stamp = c[led_centroid[1],led_centroid[0]]
# this is the writing block for the csv file, frame number and time stamp!
# tsfile.write(str(FRAME_CLOCK)+','+str(fn)+','+str(ts)+','+str(ts_unix)+','+str(single_pixel_RGB2GRAY(led_stamp))+'\n')
tsfile.write(str(FRAME_CLOCK)+','+str(fn)+','+str(ts)+','+str(ts_unix)+','+str(led_stamp)+'\n')
# this is the writing block for the csv file, frame number and time stamp!
#TODO put led with the others in same file?
# ledsfile.write(str(single_pixel_RGB2GRAY(led_stamp))+'\n')
# write the depth frames to tiff (replace: send to queue)
# cv2.imwrite(top_folder+'/dev'+str(which_device)+'_d_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.png', depth)
# cv2.imwrite(top_folder+'/dev'+str(which_device)+'_cad_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.png', cad)
# hf.create_dataset(str(FRAME_CLOCK), data=depth)
# hf_cad.create_dataset(str(FRAME_CLOCK), data=cad)
np.save(npy_folder+'/dev'+str(which_device)+'_d_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.npy',depth, allow_pickle = False)
np.save(npy_folder+'/dev'+str(which_device)+'_cad_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.npy',cad, allow_pickle = False)
# UPDATE CLOCK
FRAME_CLOCK += 1
#
if show_frames:
# add text and show the CAD frames
cv2.putText(cad, window_title+', fps: '+str(fps)[:4], (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, fps_color)
cv2.putText(cad, str(round(ts)), (0, frame_height-20), cv2.FONT_HERSHEY_SIMPLEX, 1, ts_color)
cv2.imshow(window_title+'cad', cad)
if cv2.waitKey(1) & 0xFF == ord('q'):
# looks for a small q to nbe pressed
# close the time stamp file
tsfile.close
# close the hf file
# hf.close()
# hf_cad.close()
# ledsfile.close
# stop the device
pipeline.stop()
print('pipeline from device '+str(which_device)+' is now closed!')
break
finally:
tsfile.close
# close the hf file
# hf.close()
# hf_cad.close()
# stop the device
pipeline.stop()
print('pipeline from device '+str(which_device)+' is now closed!')
#%% define helping funtions for the multiprocessing
# these functions have to not be iterable.
def read_device_0():
print('starting camera 1!')
which_device = 0
top_folder = top_folder_0
sub_function_trick(which_device,top_folder)
def read_device_1():
print('starting camera 2!')
which_device = 1
top_folder = top_folder_0
sub_function_trick(which_device,top_folder)
def read_device_2():
print('starting camera 3!')
which_device = 2
top_folder = top_folder_1
sub_function_trick(which_device,top_folder)
def read_device_3():
print('starting camera 4!')
which_device = 3
top_folder = top_folder_1
sub_function_trick(which_device,top_folder)
#%% run the processes on independent cores
from multiprocessing import Process
if __name__ == '__main__':
if args.ncams == 4:
print('starting 4 cams, with multiprocessing!')
# start 4 worker processes
Process(target=read_device_0).start()
time.sleep(3.)
Process(target=read_device_1).start()
Process(target=read_device_2).start()
Process(target=read_device_3).start()
Process(target=blink_using_firmata_random).start()
elif args.ncams == 3:
print('starting 3 cams, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=read_device_1).start()
Process(target=read_device_2).start()
Process(target=blink_using_firmata).start()
elif args.ncams == 2:
print('starting 2 cams, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=read_device_1).start()
Process(target=blink_using_firmata).start()
elif args.ncams == 1:
print('starting 1 cam, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=blink_using_firmata).start()
| 32.527426 | 294 | 0.642626 |
import time, os, sys, shutil
import json
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import csv
from colour import Color
from itertools import compress
from tqdm import tqdm
import cv2
sys.path.append(r'/usr/local/lib')
import pyrealsense2 as rs
from multiprocessing import Process
from utils.common_utils import *
from utils.recording_utils import *
import argparse
parser = argparse.ArgumentParser(description='Records cad and d images with no roi cut to disk. Also records timestamps and led traces using the auto LED mask. Currently, with no ROI, the program maxes out disk write speed around 45 fps.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--ncams', type=int, default = 4 , choices=[1,2,3,4],
help='number of cameras to stream')
parser.add_argument('--fps',type=int, default = 30 , choices=[30,60],
help='select fps to stream')
parser.add_argument("--plots", help="shows the live video while recording",
action="store_true")
args = parser.parse_args()
frame_width,frame_height = 640,480
fps_choice = args.fps
n_padding_digits = 8
print('# cameras: '+str(args.ncams))
print('Frame size is '+str(frame_width)+'x'+str(frame_height)+' pixels.')
print('Grabbing frames at '+str(fps_choice)+' fps')
timestr = time.strftime("%Y%m%d-%H%M%S")
top_folder_0 = '/media/chrelli/Data0' + '/calibration_' + timestr
top_folder_1 = '/media/chrelli/Data1' + '/calibration_' + timestr
reset_folder_if_present(top_folder_0)
reset_folder_if_present(top_folder_1)
npy_folder_0 = top_folder_0+'/npy_raw'
npy_folder_1 = top_folder_1+'/npy_raw'
reset_folder_if_present(npy_folder_0)
reset_folder_if_present(npy_folder_1)
fps_color = (Color('White').rgb)
ts_color = (Color('Peru').rgb)
fps_color=tuple(255*x for x in fps_color)
ts_color=tuple(255*x for x in ts_color)
start_time = time.time()
ctx = rs.context()
devices = [ctx.devices[i] for i in range(args.ncams)]
serials = [devices[i].get_info(rs.camera_info.serial_number) for i in range(args.ncams)]
devices = [x for _,x in sorted(zip(serials,devices))]
def sub_function_trick(which_device,top_folder):
show_frames = args.plots
reset_name = 'master60pp'
else:
preset_name = 'slave60pp'
jsonFile = preset_folder+preset_name+'.json'
jsonObj = json.load(open(jsonFile))
json_string = str(jsonObj).replace("'", '\"')
print("Configuration " + jsonFile + " loaded");
time.sleep(1.)
advnc_mode.load_json(json_string)
print("Configuration " + jsonFile + " applied!");
if device_serial[:3] == '740':
# master
targetSyncMode = 1
else:
# slave
targetSyncMode = 2
device.first_depth_sensor().set_option(rs.option.inter_cam_sync_mode, targetSyncMode)
# first, open up a config
config = rs.config()
# then open a pipeline
pipeline = rs.pipeline()
# enable the selected device and streams # RGB SPACE HERE
config.enable_device(device_serial);
config.enable_stream(rs.stream.depth, frame_width,frame_height, rs.format.z16, fps_choice)
# config.enable_stream(rs.stream.color, frame_width,frame_height, rs.format.rgb8, fps_choice)
config.enable_stream(rs.stream.color, frame_width,frame_height, rs.format.rgb8, fps_choice)
config.enable_stream(rs.stream.infrared,1, frame_width,frame_height, rs.format.y8, fps_choice)
print("PING after enabling the sync mode is {}".format(device.first_depth_sensor().get_option(rs.option.inter_cam_sync_mode)))
# Start streaming, call the stream 'cfg' for some reason, as pr example
cfg = pipeline.start(config)
# create an align object
# alternative is to align to color, faster but less precise: align_to = rs.stream.color
align_to = rs.stream.depth
align = rs.align(align_to)
print('dev '+str(which_device)+' serial is ' + device_serial)
# Use the first three digits of the serial as a string to tag the device:
device_tag = device_serial[0:3]
if show_frames:
# open a window for cv2
window_title = "dev"+str(which_device)+"(#" + device_tag + ")"
cv2.namedWindow(window_title+'cad')
# block for setting up a low-level fps estimation,
cnt = 0 # a counter
last = time.time() # start_time
fps = 0 # initial fps value
# save the camera intrinsics
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = cfg.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print ("Depth Scale is: " , depth_scale)
# this is how to get the intrinsics
profile = cfg.get_stream(rs.stream.depth) # Fetch stream profile for depth stream
intr = profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile
#% now make file and save time stamps and depth scaling and intrinsics etc
# use the old naming convention
parameternames = np.array(['cam_params.fx',
'cam_params.fy',
'cam_params.ppx',
'cam_params.ppy',
'd_scale',
'fps_choice',
'frame_width',
'frame_height'])
parameters = np.array([intr.fx,
intr.fy,
intr.ppx,
intr.ppy,
depth_scale,
fps_choice,
intr.width,
intr.height])
# open a file for writint the parameters
with open(top_folder+'/parameters_'+str(which_device)+'.csv','w') as intrfile:
writer = csv.writer(intrfile, delimiter=',')
writer.writerow(parameternames)
writer.writerow(parameters)
# load the automatic led mask from the constants folder!
led_mask,led_logic,led_centroid = load_auto_roi(which_device)
# open a file for time stamps
tsfile = open(top_folder+'/timestamps_'+str(which_device)+'.csv','w')
# ## HF try to open an HF file
# import h5py
# #TODO input from somewhere
# hf = h5py.File(top_folder+'/dev'+str(which_device)+'_d_'+'.h5', 'w')
# # also open one for the cad
# hf_cad = h5py.File(top_folder+'/dev'+str(which_device)+'_cad_'+'.h5', 'w')
# NPY ADDITION
npy_folder = top_folder+'/npy_raw'
# open a file for led stamps
# ledsfile = open(top_folder+'/ledstamps_'+str(which_device)+'.csv','w')
print('starting to stream from device '+str(which_device)+'!')
# wait for a bit for the cam to warm up
# and loop over 30 frames
warmup_time = 2 # seconds
warmup = 0
while warmup < fps_choice*warmup_time:
frames = pipeline.wait_for_frames()
warmup += 1
print('device '+str(which_device)+' is warmed up!')
# START A CLOCK FOR THE FRAMES!
FRAME_CLOCK = 0
try:
while True:
if show_frames:
# for counting frame rate
cnt += 1
if (cnt % 10) == 0:
now = time.time() # after 10 frames
dt = now - last # how long did it take?
fps = 10/dt # calculate frame rate
last = now # assign a new value to the 'last time'
#################################
#
# R E A D B L O C K
#
#################################
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
# get the frame numbers and time stamps
# ts = round(frames.get,2)
ts = frames.get_timestamp()
fn = frames.get_frame_number()
# get the unix time stamp
ts_unix = time.time()-start_time
# run the alignment process
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
cad_frame = aligned_frames.get_color_frame()
# also get one for the LED
# depth_frame = frames.get_depth_frame()
# color_frame = frames.get_color_frame()
infrared_frame = frames.get_infrared_frame()
# Convert images to numpy arrays
depth = np.asanyarray(depth_frame.get_data())
cad = np.asanyarray(cad_frame.get_data())
c = np.asanyarray(infrared_frame.get_data())
# get the LED value, round it a bit, could be profiled
led_stamp = c[led_centroid[1],led_centroid[0]]
# this is the writing block for the csv file, frame number and time stamp!
# tsfile.write(str(FRAME_CLOCK)+','+str(fn)+','+str(ts)+','+str(ts_unix)+','+str(single_pixel_RGB2GRAY(led_stamp))+'\n')
tsfile.write(str(FRAME_CLOCK)+','+str(fn)+','+str(ts)+','+str(ts_unix)+','+str(led_stamp)+'\n')
# this is the writing block for the csv file, frame number and time stamp!
#TODO put led with the others in same file?
# ledsfile.write(str(single_pixel_RGB2GRAY(led_stamp))+'\n')
# write the depth frames to tiff (replace: send to queue)
# cv2.imwrite(top_folder+'/dev'+str(which_device)+'_d_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.png', depth)
# cv2.imwrite(top_folder+'/dev'+str(which_device)+'_cad_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.png', cad)
# hf.create_dataset(str(FRAME_CLOCK), data=depth)
# hf_cad.create_dataset(str(FRAME_CLOCK), data=cad)
np.save(npy_folder+'/dev'+str(which_device)+'_d_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.npy',depth, allow_pickle = False)
np.save(npy_folder+'/dev'+str(which_device)+'_cad_'+str(FRAME_CLOCK).rjust(n_padding_digits,'0')+'.npy',cad, allow_pickle = False)
# UPDATE CLOCK
FRAME_CLOCK += 1
#
if show_frames:
# add text and show the CAD frames
cv2.putText(cad, window_title+', fps: '+str(fps)[:4], (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, fps_color)
cv2.putText(cad, str(round(ts)), (0, frame_height-20), cv2.FONT_HERSHEY_SIMPLEX, 1, ts_color)
cv2.imshow(window_title+'cad', cad)
if cv2.waitKey(1) & 0xFF == ord('q'):
# looks for a small q to nbe pressed
# close the time stamp file
tsfile.close
# close the hf file
# hf.close()
# hf_cad.close()
# ledsfile.close
# stop the device
pipeline.stop()
print('pipeline from device '+str(which_device)+' is now closed!')
break
finally:
tsfile.close
# close the hf file
# hf.close()
# hf_cad.close()
# stop the device
pipeline.stop()
print('pipeline from device '+str(which_device)+' is now closed!')
#%% define helping funtions for the multiprocessing
# these functions have to not be iterable.
def read_device_0():
print('starting camera 1!')
which_device = 0
top_folder = top_folder_0
sub_function_trick(which_device,top_folder)
def read_device_1():
print('starting camera 2!')
which_device = 1
top_folder = top_folder_0
sub_function_trick(which_device,top_folder)
def read_device_2():
print('starting camera 3!')
which_device = 2
top_folder = top_folder_1
sub_function_trick(which_device,top_folder)
def read_device_3():
print('starting camera 4!')
which_device = 3
top_folder = top_folder_1
sub_function_trick(which_device,top_folder)
#%% run the processes on independent cores
from multiprocessing import Process
if __name__ == '__main__':
if args.ncams == 4:
print('starting 4 cams, with multiprocessing!')
# start 4 worker processes
Process(target=read_device_0).start()
time.sleep(3.)
Process(target=read_device_1).start()
Process(target=read_device_2).start()
Process(target=read_device_3).start()
Process(target=blink_using_firmata_random).start()
elif args.ncams == 3:
print('starting 3 cams, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=read_device_1).start()
Process(target=read_device_2).start()
Process(target=blink_using_firmata).start()
elif args.ncams == 2:
print('starting 2 cams, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=read_device_1).start()
Process(target=blink_using_firmata).start()
elif args.ncams == 1:
print('starting 1 cam, with multiprocessing!')
Process(target=read_device_0).start()
Process(target=blink_using_firmata).start()
| true | true |
f7f52406b09fd253d3b6657d1872f60fa266def1 | 1,257 | py | Python | rpsp/rpspnets/nn_diags.py | ahefnycmu/rpsp | ff3aa3e89a91bb4afb7bad932d2c04691a727a63 | [
"Apache-2.0"
] | 4 | 2019-11-03T12:04:47.000Z | 2022-01-21T08:55:54.000Z | rpsp/rpspnets/nn_diags.py | ahefnycmu/rpsp | ff3aa3e89a91bb4afb7bad932d2c04691a727a63 | [
"Apache-2.0"
] | null | null | null | rpsp/rpspnets/nn_diags.py | ahefnycmu/rpsp | ff3aa3e89a91bb4afb7bad932d2c04691a727a63 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 8 18:56:28 2017
@author: ahefny
"""
from __future__ import print_function
import numpy as np
import rpsp.globalconfig as globalconfig
from rpsp.rpspnets.psr_lite.utils.nn import CallbackOp
class PredictionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def dbg_nn_skip_update(X, condition, msg):
def fn(x):
if not condition(x):
print ('Skip update: '+msg, sep='')
raise PredictionError(0)
return CallbackOp(fn)(X)
def dbg_nn_raise_PredictionError(X, msg):
def fn(x):
errors = np.sum(np.abs(np.sum(x,axis=1))<1e-6)
#print ('OUT: '+msg, errors, sep='')
if errors > 10:
print ('all zeros Error! Skip update: '+msg, errors, sep='')
raise PredictionError(errors)
return CallbackOp(fn)(X)
def dbg_raise_BadPrediction(X, msg):
def fn(x):
#print ('pred cost: ',x)
if x > globalconfig.vars.args.dbg_prederror:
print (msg+' Skip update. high pred cost (>%f)'%globalconfig.vars.args.dbg_prederror, x, sep='')
raise PredictionError(-1)
return CallbackOp(fn)(X)
| 29.232558 | 108 | 0.626889 |
from __future__ import print_function
import numpy as np
import rpsp.globalconfig as globalconfig
from rpsp.rpspnets.psr_lite.utils.nn import CallbackOp
class PredictionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def dbg_nn_skip_update(X, condition, msg):
def fn(x):
if not condition(x):
print ('Skip update: '+msg, sep='')
raise PredictionError(0)
return CallbackOp(fn)(X)
def dbg_nn_raise_PredictionError(X, msg):
def fn(x):
errors = np.sum(np.abs(np.sum(x,axis=1))<1e-6)
if errors > 10:
print ('all zeros Error! Skip update: '+msg, errors, sep='')
raise PredictionError(errors)
return CallbackOp(fn)(X)
def dbg_raise_BadPrediction(X, msg):
def fn(x):
if x > globalconfig.vars.args.dbg_prederror:
print (msg+' Skip update. high pred cost (>%f)'%globalconfig.vars.args.dbg_prederror, x, sep='')
raise PredictionError(-1)
return CallbackOp(fn)(X)
| true | true |
f7f5251fbdc4b78a7c78b3453a0816e9f24cb0aa | 566 | py | Python | chapter4/Spark SQL UDF.py | xli1110/Spark | 1a79c4e7ce39fb83581faeb5ced7c71495fe35f5 | [
"MIT"
] | null | null | null | chapter4/Spark SQL UDF.py | xli1110/Spark | 1a79c4e7ce39fb83581faeb5ced7c71495fe35f5 | [
"MIT"
] | null | null | null | chapter4/Spark SQL UDF.py | xli1110/Spark | 1a79c4e7ce39fb83581faeb5ced7c71495fe35f5 | [
"MIT"
] | null | null | null | from pyspark.sql import SparkSession
from pyspark.sql.types import LongType
spark = (SparkSession
.builder
.appName("Spark_SQL_UDF")
.getOrCreate())
# Create cubed function
def cubed(s):
return s * s * s
# Register UDF
spark.udf.register("cubed", cubed, LongType())
# Generate temporary view
spark.range(1, 9).createOrReplaceTempView("udf_test")
# Query
spark.sql(
"""
SELECT *
FROM udf_test
"""
).show()
spark.sql(
"""
SELECT id,
cubed(id) AS id_cubed
FROM udf_test
"""
).show()
| 15.722222 | 53 | 0.618375 | from pyspark.sql import SparkSession
from pyspark.sql.types import LongType
spark = (SparkSession
.builder
.appName("Spark_SQL_UDF")
.getOrCreate())
def cubed(s):
return s * s * s
spark.udf.register("cubed", cubed, LongType())
spark.range(1, 9).createOrReplaceTempView("udf_test")
spark.sql(
"""
SELECT *
FROM udf_test
"""
).show()
spark.sql(
"""
SELECT id,
cubed(id) AS id_cubed
FROM udf_test
"""
).show()
| true | true |
f7f5258a9e09f44f5e8b5e67a41ad597790af8c8 | 19,689 | py | Python | test/functional/feature_backwards_compatibility.py | Garlic-HM/garliccoin | 9eefb7a2a8c7cccfbc833756c7b16bc181473b6d | [
"MIT"
] | null | null | null | test/functional/feature_backwards_compatibility.py | Garlic-HM/garliccoin | 9eefb7a2a8c7cccfbc833756c7b16bc181473b6d | [
"MIT"
] | null | null | null | test/functional/feature_backwards_compatibility.py | Garlic-HM/garliccoin | 9eefb7a2a8c7cccfbc833756c7b16bc181473b6d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Garliccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Backwards compatibility functional test
Test various backwards compatibility scenarios. Download the previous node binaries:
test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
v0.15.2 is not required by this test, but it is used in wallet_upgradewallet.py.
Due to a hardfork in regtest, it can't be used to sync nodes.
Due to RPC changes introduced in various versions the below tests
won't work for older versions without some patches or workarounds.
Use only the latest patch version of each release, unless a test specifically
needs an older patch version.
"""
import os
import shutil
from test_framework.test_framework import GarliccoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class BackwardsCompatibilityTest(GarliccoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
# Add new version after each release:
self.extra_args = [
["-addresstype=bech32"], # Pre-release: use to mine blocks
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # Pre-release: use to receive coins, swap wallets, etc
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.19.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.18.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.17.2
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-wallet=wallet.dat"], # v0.16.3
]
self.wallet_names = [self.default_wallet_name]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_previous_releases()
def setup_nodes(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
None,
190100,
180100,
170200,
160300,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def run_test(self):
self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
self.sync_blocks()
# Sanity check the test framework:
res = self.nodes[self.num_nodes - 1].getblockchaininfo()
assert_equal(res['blocks'], 101)
node_master = self.nodes[self.num_nodes - 5]
node_v19 = self.nodes[self.num_nodes - 4]
node_v18 = self.nodes[self.num_nodes - 3]
node_v17 = self.nodes[self.num_nodes - 2]
node_v16 = self.nodes[self.num_nodes - 1]
self.log.info("Test wallet backwards compatibility...")
# Create a number of wallets and open them in older versions:
# w1: regular wallet, created on master: update this test when default
# wallets can no longer be opened by older versions.
node_master.createwallet(wallet_name="w1")
wallet = node_master.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# Create a confirmed transaction, receiving coins
address = wallet.getnewaddress()
self.nodes[0].sendtoaddress(address, 10)
self.sync_mempools()
self.nodes[0].generate(1)
self.sync_blocks()
# Create a conflicting transaction using RBF
return_address = self.nodes[0].getnewaddress()
tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
# Confirm the transaction
self.sync_mempools()
self.nodes[0].generate(1)
self.sync_blocks()
# Create another conflicting transaction using RBF
tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
# Abandon transaction, but don't confirm
self.nodes[1].abandontransaction(tx3_id)
# w1_v19: regular wallet, created with v0.19
node_v19.rpc.createwallet(wallet_name="w1_v19")
wallet = node_v19.get_wallet_rpc("w1_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# Use addmultisigaddress (see #18075)
address_18075 = wallet.rpc.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"]
assert wallet.getaddressinfo(address_18075)["solvable"]
# w1_v18: regular wallet, created with v0.18
node_v18.rpc.createwallet(wallet_name="w1_v18")
wallet = node_v18.get_wallet_rpc("w1_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# w2: wallet with private keys disabled, created on master: update this
# test when default wallets private keys disabled can no longer be
# opened by older versions.
node_master.createwallet(wallet_name="w2", disable_private_keys=True)
wallet = node_master.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w2_v19: wallet with private keys disabled, created with v0.19
node_v19.rpc.createwallet(wallet_name="w2_v19", disable_private_keys=True)
wallet = node_v19.get_wallet_rpc("w2_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w2_v18: wallet with private keys disabled, created with v0.18
node_v18.rpc.createwallet(wallet_name="w2_v18", disable_private_keys=True)
wallet = node_v18.get_wallet_rpc("w2_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w3: blank wallet, created on master: update this
# test when default blank wallets can no longer be opened by older versions.
node_master.createwallet(wallet_name="w3", blank=True)
wallet = node_master.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# w3_v19: blank wallet, created with v0.19
node_v19.rpc.createwallet(wallet_name="w3_v19", blank=True)
wallet = node_v19.get_wallet_rpc("w3_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# w3_v18: blank wallet, created with v0.18
node_v18.rpc.createwallet(wallet_name="w3_v18", blank=True)
wallet = node_v18.get_wallet_rpc("w3_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# Copy the wallets to older nodes:
node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets")
node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets")
node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets")
node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets")
node_v16_wallets_dir = os.path.join(node_v16.datadir, "regtest")
node_master.unloadwallet("w1")
node_master.unloadwallet("w2")
node_v19.unloadwallet("w1_v19")
node_v19.unloadwallet("w2_v19")
node_v18.unloadwallet("w1_v18")
node_v18.unloadwallet("w2_v18")
# Copy wallets to v0.16
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v16_wallets_dir, wallet)
)
# Copy wallets to v0.17
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v17_wallets_dir, wallet)
)
for wallet in os.listdir(node_v18_wallets_dir):
shutil.copytree(
os.path.join(node_v18_wallets_dir, wallet),
os.path.join(node_v17_wallets_dir, wallet)
)
# Copy wallets to v0.18
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v18_wallets_dir, wallet)
)
# Copy wallets to v0.19
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v19_wallets_dir, wallet)
)
if not self.options.descriptors:
# Descriptor wallets break compatibility, only run this test for legacy wallet
# Open the wallets in v0.19
node_v19.loadwallet("w1")
wallet = node_v19.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
txs = wallet.listtransactions()
assert_equal(len(txs), 5)
assert_equal(txs[1]["txid"], tx1_id)
assert_equal(txs[2]["walletconflicts"], [tx1_id])
assert_equal(txs[1]["replaced_by_txid"], tx2_id)
assert not(txs[1]["abandoned"])
assert_equal(txs[1]["confirmations"], -1)
assert_equal(txs[2]["blockindex"], 1)
assert txs[3]["abandoned"]
assert_equal(txs[4]["walletconflicts"], [tx3_id])
assert_equal(txs[3]["replaced_by_txid"], tx4_id)
assert not(hasattr(txs[3], "blockindex"))
node_v19.loadwallet("w2")
wallet = node_v19.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
node_v19.loadwallet("w3")
wallet = node_v19.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# Open the wallets in v0.18
node_v18.loadwallet("w1")
wallet = node_v18.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
txs = wallet.listtransactions()
assert_equal(len(txs), 5)
assert_equal(txs[1]["txid"], tx1_id)
assert_equal(txs[2]["walletconflicts"], [tx1_id])
assert_equal(txs[1]["replaced_by_txid"], tx2_id)
assert not(txs[1]["abandoned"])
assert_equal(txs[1]["confirmations"], -1)
assert_equal(txs[2]["blockindex"], 1)
assert txs[3]["abandoned"]
assert_equal(txs[4]["walletconflicts"], [tx3_id])
assert_equal(txs[3]["replaced_by_txid"], tx4_id)
assert not(hasattr(txs[3], "blockindex"))
node_v18.loadwallet("w2")
wallet = node_v18.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
node_v18.loadwallet("w3")
wallet = node_v18.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
node_v17.loadwallet("w1")
wallet = node_v17.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
node_v17.loadwallet("w2")
wallet = node_v17.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
else:
# Descriptor wallets appear to be corrupted wallets to old software
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w1")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w2")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w3")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w1")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w2")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w3")
# Open the wallets in v0.17
node_v17.loadwallet("w1_v18")
wallet = node_v17.get_wallet_rpc("w1_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
node_v17.loadwallet("w2_v18")
wallet = node_v17.get_wallet_rpc("w2_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# RPC loadwallet failure causes garliccoind to exit, in addition to the RPC
# call failure, so the following test won't work:
# assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18')
# Instead, we stop node and try to launch it with the wallet:
self.stop_node(4)
node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Garliccoin Core")
if self.options.descriptors:
# Descriptor wallets appear to be corrupted wallets to old software
node_v17.assert_start_raises_init_error(["-wallet=w1"], "Error: wallet.dat corrupt, salvage failed")
node_v17.assert_start_raises_init_error(["-wallet=w2"], "Error: wallet.dat corrupt, salvage failed")
node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: wallet.dat corrupt, salvage failed")
else:
node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Garliccoin Core")
self.start_node(4)
if not self.options.descriptors:
# Descriptor wallets break compatibility, only run this test for legacy wallets
# Open most recent wallet in v0.16 (no loadwallet RPC)
self.restart_node(5, extra_args=["-wallet=w2"])
wallet = node_v16.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['keypoolsize'] == 1
# Create upgrade wallet in v0.16
self.restart_node(-1, extra_args=["-wallet=u1_v16"])
wallet = node_v16.get_wallet_rpc("u1_v16")
v16_addr = wallet.getnewaddress('', "bech32")
v16_info = wallet.validateaddress(v16_addr)
v16_pubkey = v16_info['pubkey']
self.stop_node(-1)
self.log.info("Test wallet upgrade path...")
# u1: regular wallet, created with v0.17
node_v17.rpc.createwallet(wallet_name="u1_v17")
wallet = node_v17.get_wallet_rpc("u1_v17")
address = wallet.getnewaddress("bech32")
v17_info = wallet.getaddressinfo(address)
hdkeypath = v17_info["hdkeypath"]
pubkey = v17_info["pubkey"]
if self.is_bdb_compiled():
# Old wallets are BDB and will only work if BDB is compiled
# Copy the 0.16 wallet to the last Garliccoin Core version and open it:
shutil.copyfile(
os.path.join(node_v16_wallets_dir, "wallets/u1_v16"),
os.path.join(node_master_wallets_dir, "u1_v16")
)
load_res = node_master.loadwallet("u1_v16")
# Make sure this wallet opens without warnings. See https://github.com/garliccoin/garliccoin/pull/19054
assert_equal(load_res['warning'], '')
wallet = node_master.get_wallet_rpc("u1_v16")
info = wallet.getaddressinfo(v16_addr)
descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")"
assert_equal(info["desc"], descsum_create(descriptor))
# Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it
os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16"))
shutil.copyfile(
os.path.join(node_master_wallets_dir, "u1_v16"),
os.path.join(node_v16_wallets_dir, "wallets/u1_v16")
)
self.start_node(-1, extra_args=["-wallet=u1_v16"])
wallet = node_v16.get_wallet_rpc("u1_v16")
info = wallet.validateaddress(v16_addr)
assert_equal(info, v16_info)
# Copy the 0.17 wallet to the last Garliccoin Core version and open it:
node_v17.unloadwallet("u1_v17")
shutil.copytree(
os.path.join(node_v17_wallets_dir, "u1_v17"),
os.path.join(node_master_wallets_dir, "u1_v17")
)
node_master.loadwallet("u1_v17")
wallet = node_master.get_wallet_rpc("u1_v17")
info = wallet.getaddressinfo(address)
descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")"
assert_equal(info["desc"], descsum_create(descriptor))
# Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it
node_master.unloadwallet("u1_v17")
shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17"))
shutil.copytree(
os.path.join(node_master_wallets_dir, "u1_v17"),
os.path.join(node_v17_wallets_dir, "u1_v17")
)
node_v17.loadwallet("u1_v17")
wallet = node_v17.get_wallet_rpc("u1_v17")
info = wallet.getaddressinfo(address)
assert_equal(info, v17_info)
# Copy the 0.19 wallet to the last Garliccoin Core version and open it:
shutil.copytree(
os.path.join(node_v19_wallets_dir, "w1_v19"),
os.path.join(node_master_wallets_dir, "w1_v19")
)
node_master.loadwallet("w1_v19")
wallet = node_master.get_wallet_rpc("w1_v19")
assert wallet.getaddressinfo(address_18075)["solvable"]
# Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it
node_master.unloadwallet("w1_v19")
shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19"))
shutil.copytree(
os.path.join(node_master_wallets_dir, "w1_v19"),
os.path.join(node_v19_wallets_dir, "w1_v19")
)
node_v19.loadwallet("w1_v19")
wallet = node_v19.get_wallet_rpc("w1_v19")
assert wallet.getaddressinfo(address_18075)["solvable"]
if __name__ == '__main__':
BackwardsCompatibilityTest().main()
| 45.895105 | 223 | 0.63599 |
import os
import shutil
from test_framework.test_framework import GarliccoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class BackwardsCompatibilityTest(GarliccoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
self.extra_args = [
["-addresstype=bech32"],
["-nowallet", "-walletrbf=1", "-addresstype=bech32"],
["-nowallet", "-walletrbf=1", "-addresstype=bech32"],
["-nowallet", "-walletrbf=1", "-addresstype=bech32"],
["-nowallet", "-walletrbf=1", "-addresstype=bech32"],
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-wallet=wallet.dat"],
]
self.wallet_names = [self.default_wallet_name]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_previous_releases()
def setup_nodes(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
None,
190100,
180100,
170200,
160300,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def run_test(self):
self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
self.sync_blocks()
res = self.nodes[self.num_nodes - 1].getblockchaininfo()
assert_equal(res['blocks'], 101)
node_master = self.nodes[self.num_nodes - 5]
node_v19 = self.nodes[self.num_nodes - 4]
node_v18 = self.nodes[self.num_nodes - 3]
node_v17 = self.nodes[self.num_nodes - 2]
node_v16 = self.nodes[self.num_nodes - 1]
self.log.info("Test wallet backwards compatibility...")
node_master.createwallet(wallet_name="w1")
wallet = node_master.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
address = wallet.getnewaddress()
self.nodes[0].sendtoaddress(address, 10)
self.sync_mempools()
self.nodes[0].generate(1)
self.sync_blocks()
return_address = self.nodes[0].getnewaddress()
tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
self.sync_mempools()
self.nodes[0].generate(1)
self.sync_blocks()
tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
self.nodes[1].abandontransaction(tx3_id)
# w1_v19: regular wallet, created with v0.19
node_v19.rpc.createwallet(wallet_name="w1_v19")
wallet = node_v19.get_wallet_rpc("w1_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# Use addmultisigaddress (see #18075)
address_18075 = wallet.rpc.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"]
assert wallet.getaddressinfo(address_18075)["solvable"]
# w1_v18: regular wallet, created with v0.18
node_v18.rpc.createwallet(wallet_name="w1_v18")
wallet = node_v18.get_wallet_rpc("w1_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# w2: wallet with private keys disabled, created on master: update this
# test when default wallets private keys disabled can no longer be
# opened by older versions.
node_master.createwallet(wallet_name="w2", disable_private_keys=True)
wallet = node_master.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w2_v19: wallet with private keys disabled, created with v0.19
node_v19.rpc.createwallet(wallet_name="w2_v19", disable_private_keys=True)
wallet = node_v19.get_wallet_rpc("w2_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w2_v18: wallet with private keys disabled, created with v0.18
node_v18.rpc.createwallet(wallet_name="w2_v18", disable_private_keys=True)
wallet = node_v18.get_wallet_rpc("w2_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w3: blank wallet, created on master: update this
# test when default blank wallets can no longer be opened by older versions.
node_master.createwallet(wallet_name="w3", blank=True)
wallet = node_master.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# w3_v19: blank wallet, created with v0.19
node_v19.rpc.createwallet(wallet_name="w3_v19", blank=True)
wallet = node_v19.get_wallet_rpc("w3_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# w3_v18: blank wallet, created with v0.18
node_v18.rpc.createwallet(wallet_name="w3_v18", blank=True)
wallet = node_v18.get_wallet_rpc("w3_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# Copy the wallets to older nodes:
node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets")
node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets")
node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets")
node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets")
node_v16_wallets_dir = os.path.join(node_v16.datadir, "regtest")
node_master.unloadwallet("w1")
node_master.unloadwallet("w2")
node_v19.unloadwallet("w1_v19")
node_v19.unloadwallet("w2_v19")
node_v18.unloadwallet("w1_v18")
node_v18.unloadwallet("w2_v18")
# Copy wallets to v0.16
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v16_wallets_dir, wallet)
)
# Copy wallets to v0.17
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v17_wallets_dir, wallet)
)
for wallet in os.listdir(node_v18_wallets_dir):
shutil.copytree(
os.path.join(node_v18_wallets_dir, wallet),
os.path.join(node_v17_wallets_dir, wallet)
)
# Copy wallets to v0.18
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v18_wallets_dir, wallet)
)
# Copy wallets to v0.19
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v19_wallets_dir, wallet)
)
if not self.options.descriptors:
# Descriptor wallets break compatibility, only run this test for legacy wallet
# Open the wallets in v0.19
node_v19.loadwallet("w1")
wallet = node_v19.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
txs = wallet.listtransactions()
assert_equal(len(txs), 5)
assert_equal(txs[1]["txid"], tx1_id)
assert_equal(txs[2]["walletconflicts"], [tx1_id])
assert_equal(txs[1]["replaced_by_txid"], tx2_id)
assert not(txs[1]["abandoned"])
assert_equal(txs[1]["confirmations"], -1)
assert_equal(txs[2]["blockindex"], 1)
assert txs[3]["abandoned"]
assert_equal(txs[4]["walletconflicts"], [tx3_id])
assert_equal(txs[3]["replaced_by_txid"], tx4_id)
assert not(hasattr(txs[3], "blockindex"))
node_v19.loadwallet("w2")
wallet = node_v19.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
node_v19.loadwallet("w3")
wallet = node_v19.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# Open the wallets in v0.18
node_v18.loadwallet("w1")
wallet = node_v18.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
txs = wallet.listtransactions()
assert_equal(len(txs), 5)
assert_equal(txs[1]["txid"], tx1_id)
assert_equal(txs[2]["walletconflicts"], [tx1_id])
assert_equal(txs[1]["replaced_by_txid"], tx2_id)
assert not(txs[1]["abandoned"])
assert_equal(txs[1]["confirmations"], -1)
assert_equal(txs[2]["blockindex"], 1)
assert txs[3]["abandoned"]
assert_equal(txs[4]["walletconflicts"], [tx3_id])
assert_equal(txs[3]["replaced_by_txid"], tx4_id)
assert not(hasattr(txs[3], "blockindex"))
node_v18.loadwallet("w2")
wallet = node_v18.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
node_v18.loadwallet("w3")
wallet = node_v18.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
node_v17.loadwallet("w1")
wallet = node_v17.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
node_v17.loadwallet("w2")
wallet = node_v17.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
else:
# Descriptor wallets appear to be corrupted wallets to old software
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w1")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w2")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w3")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w1")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w2")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w3")
# Open the wallets in v0.17
node_v17.loadwallet("w1_v18")
wallet = node_v17.get_wallet_rpc("w1_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
node_v17.loadwallet("w2_v18")
wallet = node_v17.get_wallet_rpc("w2_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# RPC loadwallet failure causes garliccoind to exit, in addition to the RPC
# call failure, so the following test won't work:
self.stop_node(4)
node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Garliccoin Core")
if self.options.descriptors:
node_v17.assert_start_raises_init_error(["-wallet=w1"], "Error: wallet.dat corrupt, salvage failed")
node_v17.assert_start_raises_init_error(["-wallet=w2"], "Error: wallet.dat corrupt, salvage failed")
node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: wallet.dat corrupt, salvage failed")
else:
node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Garliccoin Core")
self.start_node(4)
if not self.options.descriptors:
self.restart_node(5, extra_args=["-wallet=w2"])
wallet = node_v16.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['keypoolsize'] == 1
self.restart_node(-1, extra_args=["-wallet=u1_v16"])
wallet = node_v16.get_wallet_rpc("u1_v16")
v16_addr = wallet.getnewaddress('', "bech32")
v16_info = wallet.validateaddress(v16_addr)
v16_pubkey = v16_info['pubkey']
self.stop_node(-1)
self.log.info("Test wallet upgrade path...")
node_v17.rpc.createwallet(wallet_name="u1_v17")
wallet = node_v17.get_wallet_rpc("u1_v17")
address = wallet.getnewaddress("bech32")
v17_info = wallet.getaddressinfo(address)
hdkeypath = v17_info["hdkeypath"]
pubkey = v17_info["pubkey"]
if self.is_bdb_compiled():
shutil.copyfile(
os.path.join(node_v16_wallets_dir, "wallets/u1_v16"),
os.path.join(node_master_wallets_dir, "u1_v16")
)
load_res = node_master.loadwallet("u1_v16")
assert_equal(load_res['warning'], '')
wallet = node_master.get_wallet_rpc("u1_v16")
info = wallet.getaddressinfo(v16_addr)
descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")"
assert_equal(info["desc"], descsum_create(descriptor))
os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16"))
shutil.copyfile(
os.path.join(node_master_wallets_dir, "u1_v16"),
os.path.join(node_v16_wallets_dir, "wallets/u1_v16")
)
self.start_node(-1, extra_args=["-wallet=u1_v16"])
wallet = node_v16.get_wallet_rpc("u1_v16")
info = wallet.validateaddress(v16_addr)
assert_equal(info, v16_info)
node_v17.unloadwallet("u1_v17")
shutil.copytree(
os.path.join(node_v17_wallets_dir, "u1_v17"),
os.path.join(node_master_wallets_dir, "u1_v17")
)
node_master.loadwallet("u1_v17")
wallet = node_master.get_wallet_rpc("u1_v17")
info = wallet.getaddressinfo(address)
descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")"
assert_equal(info["desc"], descsum_create(descriptor))
node_master.unloadwallet("u1_v17")
shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17"))
shutil.copytree(
os.path.join(node_master_wallets_dir, "u1_v17"),
os.path.join(node_v17_wallets_dir, "u1_v17")
)
node_v17.loadwallet("u1_v17")
wallet = node_v17.get_wallet_rpc("u1_v17")
info = wallet.getaddressinfo(address)
assert_equal(info, v17_info)
shutil.copytree(
os.path.join(node_v19_wallets_dir, "w1_v19"),
os.path.join(node_master_wallets_dir, "w1_v19")
)
node_master.loadwallet("w1_v19")
wallet = node_master.get_wallet_rpc("w1_v19")
assert wallet.getaddressinfo(address_18075)["solvable"]
node_master.unloadwallet("w1_v19")
shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19"))
shutil.copytree(
os.path.join(node_master_wallets_dir, "w1_v19"),
os.path.join(node_v19_wallets_dir, "w1_v19")
)
node_v19.loadwallet("w1_v19")
wallet = node_v19.get_wallet_rpc("w1_v19")
assert wallet.getaddressinfo(address_18075)["solvable"]
if __name__ == '__main__':
BackwardsCompatibilityTest().main()
| true | true |
f7f525917964ede1f175cc8117f8a9fb6871d815 | 1,225 | py | Python | 340_easy_first_recurring/first_recurring.py | jacobmorzinski/dailyprogrammer | 9f52c93498c7c27d235da9bac50ab91ddcd19a71 | [
"MIT"
] | null | null | null | 340_easy_first_recurring/first_recurring.py | jacobmorzinski/dailyprogrammer | 9f52c93498c7c27d235da9bac50ab91ddcd19a71 | [
"MIT"
] | null | null | null | 340_easy_first_recurring/first_recurring.py | jacobmorzinski/dailyprogrammer | 9f52c93498c7c27d235da9bac50ab91ddcd19a71 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# https://www.reddit.com/r/dailyprogrammer/comments/7cnqtw/20171113_challenge_340_easy_first_recurring/
'''
a program that outputs the first recurring character in a string
'''
from __future__ import unicode_literals, print_function, division, absolute_import
import argparse
import sys
import logging
def determine_first_recurring(s): # pylint: disable=C0103
"Given string s, determine the first recurring character"
counts = {}
answer = None
for char in s:
counts[char] = 1 + counts.get(char, 0)
logging.debug("char: %s, count: %d", char, counts[char])
if counts[char] > 1:
answer = char
break
return answer
def main(args=None):
"Main entry point"
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--debug', action="store_true")
args_ns = parser.parse_args(args)
if args_ns.debug is True:
logging.basicConfig(level=logging.DEBUG)
logging.debug(args_ns)
for line in sys.stdin:
answer = determine_first_recurring(line.strip())
print(answer)
return 0
if __name__ == '__main__':
sys.exit(main())
| 26.06383 | 103 | 0.677551 |
from __future__ import unicode_literals, print_function, division, absolute_import
import argparse
import sys
import logging
def determine_first_recurring(s):
counts = {}
answer = None
for char in s:
counts[char] = 1 + counts.get(char, 0)
logging.debug("char: %s, count: %d", char, counts[char])
if counts[char] > 1:
answer = char
break
return answer
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--debug', action="store_true")
args_ns = parser.parse_args(args)
if args_ns.debug is True:
logging.basicConfig(level=logging.DEBUG)
logging.debug(args_ns)
for line in sys.stdin:
answer = determine_first_recurring(line.strip())
print(answer)
return 0
if __name__ == '__main__':
sys.exit(main())
| true | true |
f7f525f6f1a9ec675c49735e4979b6ee0c67ce64 | 23,929 | py | Python | concurrent/futures/_base.py | AppliedIntuition/pythonfutures | e8cd2a441804ffd3529b6ae342ad41983717668f | [
"PSF-2.0"
] | 244 | 2015-03-19T09:45:15.000Z | 2022-02-12T16:57:40.000Z | concurrent/futures/_base.py | AppliedIntuition/pythonfutures | e8cd2a441804ffd3529b6ae342ad41983717668f | [
"PSF-2.0"
] | 79 | 2015-03-24T18:11:37.000Z | 2022-01-11T15:23:30.000Z | concurrent/futures/_base.py | AppliedIntuition/pythonfutures | e8cd2a441804ffd3529b6ae342ad41983717668f | [
"PSF-2.0"
] | 66 | 2015-03-24T17:12:14.000Z | 2022-02-12T16:57:42.000Z | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import collections
import logging
import threading
import itertools
import time
import types
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def _yield_finished_futures(fs, waiter, ref_collect):
"""
Iterate on the list *fs*, yielding finished futures one by one in
reverse order.
Before yielding a future, *waiter* is removed from its waiters
and the future is removed from each set in the collection of sets
*ref_collect*.
The aim of this function is to avoid keeping stale references after
the future is yielded and before the iterator resumes.
"""
while fs:
f = fs[-1]
for futures_set in ref_collect:
futures_set.remove(f)
with f._condition:
f._waiters.remove(waiter)
del f
# Careful not to keep a reference to the popped value
yield fs.pop()
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
finished = list(finished)
try:
for f in _yield_finished_futures(finished, waiter,
ref_collect=(fs,)):
f = [f]
yield f.pop()
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), total_futures))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
# reverse to keep finishing order
finished.reverse()
for f in _yield_finished_futures(finished, waiter,
ref_collect=(fs, pending)):
f = [f]
yield f.pop()
finally:
# Remove waiter from unfinished futures
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._traceback = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
except BaseException:
# Explicitly let all other new-style exceptions through so
# that we can catch all old-style exceptions with a simple
# "except:" clause below.
#
# All old-style exception objects are instances of
# types.InstanceType, but "except types.InstanceType:" does
# not catch old-style exceptions for some reason. Thus, the
# only way to catch all old-style exceptions without catching
# any new-style exceptions is to filter out the new-style
# exceptions, which all derive from BaseException.
raise
except:
# Because of the BaseException clause above, this handler only
# executes for old-style exception objects.
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
if isinstance(self._exception, types.InstanceType):
# The exception is an instance of an old-style class, which
# means type(self._exception) returns types.ClassType instead
# of the exception's actual class type.
exception_type = self._exception.__class__
else:
exception_type = type(self._exception)
raise exception_type, self._exception, self._traceback
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
return self.exception_info(timeout)[0]
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._traceback = traceback
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
# reverse to keep finishing order
fs.reverse()
while fs:
# Careful not to keep a reference to the popped future
if timeout is None:
yield fs.pop().result()
else:
yield fs.pop().result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
class BrokenExecutor(RuntimeError):
"""
Raised when a executor has become non-functional after a severe failure.
"""
| 35.502967 | 80 | 0.607589 |
import collections
import logging
import threading
import itertools
import time
import types
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
PENDING = 'PENDING'
RUNNING = 'RUNNING'
CANCELLED = 'CANCELLED'
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def _yield_finished_futures(fs, waiter, ref_collect):
"""
Iterate on the list *fs*, yielding finished futures one by one in
reverse order.
Before yielding a future, *waiter* is removed from its waiters
and the future is removed from each set in the collection of sets
*ref_collect*.
The aim of this function is to avoid keeping stale references after
the future is yielded and before the iterator resumes.
"""
while fs:
f = fs[-1]
for futures_set in ref_collect:
futures_set.remove(f)
with f._condition:
f._waiters.remove(waiter)
del f
yield fs.pop()
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
finished = list(finished)
try:
for f in _yield_finished_futures(finished, waiter,
ref_collect=(fs,)):
f = [f]
yield f.pop()
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), total_futures))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
finished.reverse()
for f in _yield_finished_futures(finished, waiter,
ref_collect=(fs, pending)):
f = [f]
yield f.pop()
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._traceback = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
except BaseException:
raise
except:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<%s at %#x state=%s raised %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<%s at %#x state=%s returned %s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<%s at %#x state=%s>' % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
if isinstance(self._exception, types.InstanceType):
exception_type = self._exception.__class__
else:
exception_type = type(self._exception)
raise exception_type, self._exception, self._traceback
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
return self.exception_info(timeout)[0]
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._traceback = traceback
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
# reverse to keep finishing order
fs.reverse()
while fs:
# Careful not to keep a reference to the popped future
if timeout is None:
yield fs.pop().result()
else:
yield fs.pop().result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
class BrokenExecutor(RuntimeError):
"""
Raised when a executor has become non-functional after a severe failure.
"""
| false | true |
f7f5263bea8c729f916374ee3fe5e16ec2785808 | 4,351 | py | Python | graph_compression/compression_lib/compression_wrapper_py2.py | muell-monster/google-research | 04d2024f4723bc4be3d639a668c19fb1f6a31478 | [
"Apache-2.0"
] | 1 | 2020-12-25T01:18:50.000Z | 2020-12-25T01:18:50.000Z | graph_compression/compression_lib/compression_wrapper_py2.py | thomascherickal/google-research | 294a888bbb6678ac255c6422fd703c325cbb0772 | [
"Apache-2.0"
] | null | null | null | graph_compression/compression_lib/compression_wrapper_py2.py | thomascherickal/google-research | 294a888bbb6678ac255c6422fd703c325cbb0772 | [
"Apache-2.0"
] | 1 | 2021-09-27T03:17:14.000Z | 2021-09-27T03:17:14.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class that wraps around multiple different compression operators.
This is the Python 2 only version of compression_wrapper.py.
This allows for easier testing of different operators. Rather than importing
each operator separately, this class can be used and different
compression_option values can be passed in to specifiy the operator type.
compression_option:
1 - LowRankDecompMatrixCompressor
2 - SimhashMatrixCompressor
3 - DLMatrixCompressor (currently unavailable for Python 2);
4 - KmeansMatrixCompressor
8 - KmeansAndPruningMatrixCompressor
9 - InputCompressor
"""
from __future__ import absolute_import
from absl import logging
from graph_compression.compression_lib import compression_op as comp_op
from graph_compression.compression_lib import simhash_compression_op as simhash_comp_op
_COMPRESSION_OPTIONS = [1, 2, 4, 8, 9]
def get_apply_compression(compression_op_spec, global_step):
"""Returns apply_compression operation matching compression_option input."""
compressor_spec = comp_op.LowRankDecompMatrixCompressor.get_default_hparams()
compressor_spec.set_hparam('rank', compression_op_spec.rank)
compressor_spec.set_hparam('block_size', compression_op_spec.block_size)
logging.info('Compressor spec %s', compressor_spec.to_json())
logging.info('Compression operator spec %s', compression_op_spec.to_json())
if compression_op_spec.compression_option not in _COMPRESSION_OPTIONS:
logging.info(
'Compression_option %s not in expected options: %s. '
'Will use low_rank decomp by default.',
str(compression_op_spec.compression_option),
','.join([str(opt) for opt in _COMPRESSION_OPTIONS]))
compression_op_spec.compression_option = 1
apply_compression = None
if compression_op_spec.compression_option == 1:
compressor = comp_op.LowRankDecompMatrixCompressor(spec=compressor_spec)
apply_compression = comp_op.ApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 2:
compressor_spec.set_hparam('is_b_matrix_trainable', False)
compressor = simhash_comp_op.SimhashMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 4:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor = simhash_comp_op.KmeansMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 8:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor = simhash_comp_op.KmeansMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 9:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor_spec.set_hparam('is_c_matrix_trainable', True)
compressor = comp_op.LowRankDecompMatrixCompressor(spec=compressor_spec)
apply_compression = comp_op.ApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
return apply_compression
| 43.079208 | 87 | 0.781659 |
from __future__ import absolute_import
from absl import logging
from graph_compression.compression_lib import compression_op as comp_op
from graph_compression.compression_lib import simhash_compression_op as simhash_comp_op
_COMPRESSION_OPTIONS = [1, 2, 4, 8, 9]
def get_apply_compression(compression_op_spec, global_step):
compressor_spec = comp_op.LowRankDecompMatrixCompressor.get_default_hparams()
compressor_spec.set_hparam('rank', compression_op_spec.rank)
compressor_spec.set_hparam('block_size', compression_op_spec.block_size)
logging.info('Compressor spec %s', compressor_spec.to_json())
logging.info('Compression operator spec %s', compression_op_spec.to_json())
if compression_op_spec.compression_option not in _COMPRESSION_OPTIONS:
logging.info(
'Compression_option %s not in expected options: %s. '
'Will use low_rank decomp by default.',
str(compression_op_spec.compression_option),
','.join([str(opt) for opt in _COMPRESSION_OPTIONS]))
compression_op_spec.compression_option = 1
apply_compression = None
if compression_op_spec.compression_option == 1:
compressor = comp_op.LowRankDecompMatrixCompressor(spec=compressor_spec)
apply_compression = comp_op.ApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 2:
compressor_spec.set_hparam('is_b_matrix_trainable', False)
compressor = simhash_comp_op.SimhashMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 4:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor = simhash_comp_op.KmeansMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 8:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor = simhash_comp_op.KmeansMatrixCompressor(spec=compressor_spec)
apply_compression = simhash_comp_op.SimhashApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
elif compression_op_spec.compression_option == 9:
compressor_spec.set_hparam('is_b_matrix_trainable', True)
compressor_spec.set_hparam('is_c_matrix_trainable', True)
compressor = comp_op.LowRankDecompMatrixCompressor(spec=compressor_spec)
apply_compression = comp_op.ApplyCompression(
scope='default_scope',
compression_spec=compression_op_spec,
compressor=compressor,
global_step=global_step)
return apply_compression
| true | true |
f7f526c0fc47613a272c254a30c9cf1db2eb479b | 8,142 | py | Python | datalad/support/tests/test_globbedpaths.py | soichih/datalad | 797dde3ab7497be170e2c4ea8824f33a4b38e5d8 | [
"MIT"
] | null | null | null | datalad/support/tests/test_globbedpaths.py | soichih/datalad | 797dde3ab7497be170e2c4ea8824f33a4b38e5d8 | [
"MIT"
] | null | null | null | datalad/support/tests/test_globbedpaths.py | soichih/datalad | 797dde3ab7497be170e2c4ea8824f33a4b38e5d8 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-; coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""test GlobbedPaths
"""
__docformat__ = 'restructuredtext'
import logging
import os.path as op
from itertools import product
from unittest.mock import patch
from datalad.tests.utils_pytest import (
OBSCURE_FILENAME,
assert_in,
eq_,
swallow_logs,
with_tree,
)
from ..globbedpaths import GlobbedPaths
def test_globbedpaths_get_sub_patterns():
gp = GlobbedPaths([], "doesn't matter")
for pat, expected in [
# If there are no patterns in the directory component, we get no
# sub-patterns.
("", []),
("nodir", []),
(op.join("nomagic", "path"), []),
(op.join("nomagic", "path*"), []),
# Create sub-patterns from leading path, successively dropping the
# right-most component.
(op.join("s*", "path"), ["s*" + op.sep]),
(op.join("s", "ss*", "path"), [op.join("s", "ss*") + op.sep]),
(op.join("s", "ss*", "path*"), [op.join("s", "ss*") + op.sep]),
(op.join("s", "ss*" + op.sep), []),
(op.join("s*", "ss", "path*"),
[op.join("s*", "ss") + op.sep,
"s*" + op.sep]),
(op.join("s?", "ss", "sss*", "path*"),
[op.join("s?", "ss", "sss*") + op.sep,
op.join("s?", "ss") + op.sep,
"s?" + op.sep])]:
eq_(gp._get_sub_patterns(pat), expected)
bOBSCURE_FILENAME = f"b{OBSCURE_FILENAME}.dat"
@with_tree(tree={"1.txt": "",
"2.dat": "",
"3.txt": "",
bOBSCURE_FILENAME: "",
"subdir": {"1.txt": "", "2.txt": "", "subsub": {"3.dat": ""}}})
def test_globbedpaths(path=None):
dotdir = op.curdir + op.sep
for patterns, expected in [
(["1.txt", "2.dat"], {"1.txt", "2.dat"}),
([dotdir + "1.txt", "2.dat"], {dotdir + "1.txt", "2.dat"}),
(["*.txt", "*.dat"], {"1.txt", "2.dat", bOBSCURE_FILENAME, "3.txt"}),
([dotdir + "*.txt", "*.dat"],
{dotdir + "1.txt", "2.dat", bOBSCURE_FILENAME, dotdir + "3.txt"}),
([op.join("subdir", "*.txt")],
{op.join("subdir", "1.txt"), op.join("subdir", "2.txt")}),
(["subdir" + op.sep], {"subdir" + op.sep}),
([dotdir + op.join("subdir", "*.txt")],
{dotdir + op.join(*ps)
for ps in [("subdir", "1.txt"), ("subdir", "2.txt")]}),
(["*.txt"], {"1.txt", "3.txt"}),
([op.join("subdir", "**")],
{op.join(*ps)
for ps in [("subdir" + op.sep,), ("subdir", "subsub"),
("subdir", "1.txt"), ("subdir", "2.txt"),
("subdir", "subsub", "3.dat")]}),
([dotdir + op.join("**", "*.dat")],
{dotdir + op.join("2.dat"), dotdir + bOBSCURE_FILENAME,
dotdir + op.join("subdir", "subsub", "3.dat")})]:
gp = GlobbedPaths(patterns, pwd=path)
eq_(set(gp.expand()), expected)
eq_(set(gp.expand(full=True)),
{op.join(path, p) for p in expected})
pardir = op.pardir + op.sep
subdir_path = op.join(path, "subdir")
for patterns, expected in [
(["*.txt"], {"1.txt", "2.txt"}),
([dotdir + "*.txt"], {dotdir + p for p in ["1.txt", "2.txt"]}),
([pardir + "*.txt"], {pardir + p for p in ["1.txt", "3.txt"]}),
([dotdir + pardir + "*.txt"],
{dotdir + pardir + p for p in ["1.txt", "3.txt"]}),
# Patterns that don't match are retained by default.
(["amiss"], {"amiss"})]:
gp = GlobbedPaths(patterns, pwd=subdir_path)
eq_(set(gp.expand()), expected)
eq_(set(gp.expand(full=True)),
{op.join(subdir_path, p) for p in expected})
# Full patterns still get returned as relative to pwd.
gp = GlobbedPaths([op.join(path, "*.dat")], pwd=path)
eq_(gp.expand(), ["2.dat", bOBSCURE_FILENAME])
# "." gets special treatment.
gp = GlobbedPaths([".", "*.dat"], pwd=path)
eq_(set(gp.expand()), {"2.dat", bOBSCURE_FILENAME, "."})
eq_(gp.expand(dot=False), ["2.dat", bOBSCURE_FILENAME])
gp = GlobbedPaths(["."], pwd=path, expand=False)
eq_(gp.expand(), ["."])
eq_(gp.paths, ["."])
# We can the glob outputs.
glob_results = {"z": "z",
"a": ["x", "d", "b"]}
with patch('glob.glob', lambda k, **kwargs: glob_results[k]):
gp = GlobbedPaths(["z", "a"])
eq_(gp.expand(), ["z", "b", "d", "x"])
# glob expansion for paths property is determined by expand argument.
for expand, expected in [(True, ["2.dat", bOBSCURE_FILENAME]),
(False, ["*.dat"])]:
gp = GlobbedPaths(["*.dat"], pwd=path, expand=expand)
eq_(gp.paths, expected)
with swallow_logs(new_level=logging.DEBUG) as cml:
GlobbedPaths(["not here"], pwd=path).expand()
assert_in("No matching files found for 'not here'", cml.out)
@with_tree(tree={"1.txt": "", "2.dat": "", "3.txt": ""})
def test_globbedpaths_misses(path=None):
gp = GlobbedPaths(["amiss"], pwd=path)
eq_(gp.expand_strict(), [])
eq_(gp.misses, ["amiss"])
eq_(gp.expand(include_misses=True), ["amiss"])
# miss at beginning
gp = GlobbedPaths(["amiss", "*.txt", "*.dat"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.expand(include_misses=True),
["amiss", "1.txt", "3.txt", "2.dat"])
# miss in middle
gp = GlobbedPaths(["*.txt", "amiss", "*.dat"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.misses, ["amiss"])
eq_(gp.expand(include_misses=True),
["1.txt", "3.txt", "amiss", "2.dat"])
# miss at end
gp = GlobbedPaths(["*.txt", "*.dat", "amiss"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.misses, ["amiss"])
eq_(gp.expand(include_misses=True),
["1.txt", "3.txt", "2.dat", "amiss"])
# miss at beginning, middle, and end
gp = GlobbedPaths(["amiss1", "amiss2", "*.txt", "amiss3", "*.dat",
"amiss4"],
pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.misses, ["amiss1", "amiss2", "amiss3", "amiss4"])
eq_(gp.expand(include_misses=True),
["amiss1", "amiss2", "1.txt", "3.txt", "amiss3", "2.dat", "amiss4"])
# Property expands if needed.
gp = GlobbedPaths(["amiss"], pwd=path)
eq_(gp.misses, ["amiss"])
@with_tree(tree={"adir": {},
"bdir": {},
"other": {},
"1.txt": "", "2.dat": "", "3.txt": ""})
def test_globbedpaths_partial_matches(path=None):
gp = GlobbedPaths([op.join("?dir", "*.txt"), "*.txt"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt"])
expected_partial = ["adir" + op.sep, "bdir" + op.sep]
eq_(gp.partial_hits, expected_partial)
eq_(gp.expand(include_partial=True),
expected_partial + ["1.txt", "3.txt"])
# Property expands if needed.
gp = GlobbedPaths([op.join("?dir", "*.txt")], pwd=path)
eq_(gp.partial_hits, expected_partial)
@with_tree(tree={"1.txt": "",
"2.dat": "",
"3.txt": "",
"foo.dat": ""})
def test_globbedpaths_cached(path=None):
# Smoke test to trigger cache handling.
gp = GlobbedPaths([op.join("?", ".dat"), "*.txt"], pwd=path)
for full, partial, misses in product([False, True], repeat=3):
eq_(gp.expand(full=full,
include_misses=misses,
include_partial=partial),
gp.expand(full=full,
include_misses=misses,
include_partial=partial))
| 38.771429 | 106 | 0.498649 |
= GlobbedPaths(["*.dat"], pwd=path, expand=expand)
eq_(gp.paths, expected)
with swallow_logs(new_level=logging.DEBUG) as cml:
GlobbedPaths(["not here"], pwd=path).expand()
assert_in("No matching files found for 'not here'", cml.out)
@with_tree(tree={"1.txt": "", "2.dat": "", "3.txt": ""})
def test_globbedpaths_misses(path=None):
gp = GlobbedPaths(["amiss"], pwd=path)
eq_(gp.expand_strict(), [])
eq_(gp.misses, ["amiss"])
eq_(gp.expand(include_misses=True), ["amiss"])
gp = GlobbedPaths(["amiss", "*.txt", "*.dat"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.expand(include_misses=True),
["amiss", "1.txt", "3.txt", "2.dat"])
gp = GlobbedPaths(["*.txt", "amiss", "*.dat"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.misses, ["amiss"])
eq_(gp.expand(include_misses=True),
["1.txt", "3.txt", "amiss", "2.dat"])
gp = GlobbedPaths(["*.txt", "*.dat", "amiss"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.misses, ["amiss"])
eq_(gp.expand(include_misses=True),
["1.txt", "3.txt", "2.dat", "amiss"])
gp = GlobbedPaths(["amiss1", "amiss2", "*.txt", "amiss3", "*.dat",
"amiss4"],
pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt", "2.dat"])
eq_(gp.misses, ["amiss1", "amiss2", "amiss3", "amiss4"])
eq_(gp.expand(include_misses=True),
["amiss1", "amiss2", "1.txt", "3.txt", "amiss3", "2.dat", "amiss4"])
gp = GlobbedPaths(["amiss"], pwd=path)
eq_(gp.misses, ["amiss"])
@with_tree(tree={"adir": {},
"bdir": {},
"other": {},
"1.txt": "", "2.dat": "", "3.txt": ""})
def test_globbedpaths_partial_matches(path=None):
gp = GlobbedPaths([op.join("?dir", "*.txt"), "*.txt"], pwd=path)
eq_(gp.expand_strict(), ["1.txt", "3.txt"])
expected_partial = ["adir" + op.sep, "bdir" + op.sep]
eq_(gp.partial_hits, expected_partial)
eq_(gp.expand(include_partial=True),
expected_partial + ["1.txt", "3.txt"])
gp = GlobbedPaths([op.join("?dir", "*.txt")], pwd=path)
eq_(gp.partial_hits, expected_partial)
@with_tree(tree={"1.txt": "",
"2.dat": "",
"3.txt": "",
"foo.dat": ""})
def test_globbedpaths_cached(path=None):
gp = GlobbedPaths([op.join("?", ".dat"), "*.txt"], pwd=path)
for full, partial, misses in product([False, True], repeat=3):
eq_(gp.expand(full=full,
include_misses=misses,
include_partial=partial),
gp.expand(full=full,
include_misses=misses,
include_partial=partial))
| true | true |
f7f5272b1bae5fb19b57d5e116d5b380aab04320 | 1,161 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/basic/server_args.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 1 | 2015-04-05T21:21:26.000Z | 2015-04-05T21:21:26.000Z | nssrc/com/citrix/netscaler/nitro/resource/config/basic/server_args.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 1 | 2017-01-20T22:56:58.000Z | 2017-01-20T22:56:58.000Z | nssrc/com/citrix/netscaler/nitro/resource/config/basic/server_args.py | benfinke/ns_python | d651d7aa01d7dc63c1cd435c7b3314d7f5b26659 | [
"Apache-2.0"
] | 6 | 2015-04-21T13:14:08.000Z | 2020-12-03T07:27:52.000Z | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class server_args :
ur""" Provides additional arguments required for fetching the server resource.
"""
def __init__(self) :
self._Internal = False
@property
def Internal(self) :
ur"""Display names of the servers that have been created for internal use.
"""
try :
return self._Internal
except Exception as e:
raise e
@Internal.setter
def Internal(self, Internal) :
ur"""Display names of the servers that have been created for internal use.
"""
try :
self._Internal = Internal
except Exception as e:
raise e
| 27.642857 | 79 | 0.720069 |
class server_args :
ur""" Provides additional arguments required for fetching the server resource.
"""
def __init__(self) :
self._Internal = False
@property
def Internal(self) :
ur"""Display names of the servers that have been created for internal use.
"""
try :
return self._Internal
except Exception as e:
raise e
@Internal.setter
def Internal(self, Internal) :
ur"""Display names of the servers that have been created for internal use.
"""
try :
self._Internal = Internal
except Exception as e:
raise e
| false | true |
f7f5273cfe45338906797e8f9796282479ab68a5 | 1,425 | py | Python | pyimagesearch gurus/Module 1/lesson_1_9/simple_thresholding.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | pyimagesearch gurus/Module 1/lesson_1_9/simple_thresholding.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | null | null | null | pyimagesearch gurus/Module 1/lesson_1_9/simple_thresholding.py | Ali-Parandeh/Data_Science_Playground | c529e9b3692381572de259e7c93938d6611d83da | [
"MIT"
] | 1 | 2021-03-10T09:40:05.000Z | 2021-03-10T09:40:05.000Z | # import the necessary packages
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image, convert it to grayscale and blur it slightly
# NOTE: Applying Gaussian blurring helps remove some of the high frequency edges
# in the image that we are not concerned with and allow us to obtain a more “clean” segmentation.
image = cv2.imread(args['image'])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
cv2.imshow('Image', image)
# apply basic thresholding -- the first parameter is the image
# we want to threshold, the second value is our threshold check
# if a pixel value is greater than our threshold (in this case,
# 200), we set it to be BLACK, otherwise it is WHITE.
(T, threshInv) = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('Threshold Binary Invserse', threshInv)
# apply basic thresholding (rather than inverse thresholding)
# we can change the last argument in the function to make the coins black rather than white
(T, thresh) = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)
cv2.imshow('Threshold Binary', thresh)
# finally, we can visualise only the marked regions in the image
cv2.imshow('Output', cv2.bitwise_and(image, image, mask=threshInv))
cv2.waitKey(0)
| 41.911765 | 97 | 0.76 |
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
cv2.imshow('Image', image)
(T, threshInv) = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('Threshold Binary Invserse', threshInv)
(T, thresh) = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)
cv2.imshow('Threshold Binary', thresh)
cv2.imshow('Output', cv2.bitwise_and(image, image, mask=threshInv))
cv2.waitKey(0)
| true | true |
f7f5276b215210aa75b0ea40c745c38dc81f8c0a | 16,013 | py | Python | tests/server.py | Akkowicz/synapse | 7859c4d079ab707520252d6210cf4bc3d4e54902 | [
"Apache-2.0"
] | null | null | null | tests/server.py | Akkowicz/synapse | 7859c4d079ab707520252d6210cf4bc3d4e54902 | [
"Apache-2.0"
] | null | null | null | tests/server.py | Akkowicz/synapse | 7859c4d079ab707520252d6210cf4bc3d4e54902 | [
"Apache-2.0"
] | null | null | null | import json
import logging
from io import SEEK_END, BytesIO
import attr
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.defer import Deferred, fail, succeed
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IReactorPluggableNameResolver,
IReactorTCP,
IResolverSimple,
)
from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from twisted.web.http import unquote
from twisted.web.http_headers import Headers
from twisted.web.server import Site
from synapse.http.site import SynapseRequest
from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
logger = logging.getLogger(__name__)
class TimedOutException(Exception):
"""
A web query timed out.
"""
@attr.s
class FakeChannel:
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
site = attr.ib(type=Site)
_reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@property
def json_body(self):
if not self.result:
raise Exception("No result yet.")
return json.loads(self.result["body"].decode("utf8"))
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
@property
def headers(self):
if not self.result:
raise Exception("No result yet.")
h = Headers()
for i in self.result["headers"]:
h.addRawHeader(*i)
return h
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@property
def transport(self):
return self
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
access_logger = logging.getLogger("synapse.access.http.fake")
def make_request(
reactor,
method,
path,
content=b"",
access_token=None,
request=SynapseRequest,
shorthand=True,
federation_auth_origin=None,
content_is_form=False,
):
"""
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
federation_auth_origin (bytes|None): if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
content_is_form: Whether the content is URL encoded form data. Adds the
'Content-Type': 'application/x-www-form-urlencoded' header.
Returns:
Tuple[synapse.http.site.SynapseRequest, channel]
"""
if not isinstance(method, bytes):
method = method.encode("ascii")
if not isinstance(path, bytes):
path = path.encode("ascii")
# Decorate it to be the full path, if we're using shorthand
if (
shorthand
and not path.startswith(b"/_matrix")
and not path.startswith(b"/_synapse")
):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
if not path.startswith(b"/"):
path = b"/" + path
if isinstance(content, str):
content = content.encode("utf8")
site = FakeSite()
channel = FakeChannel(site, reactor)
req = request(channel)
req.process = lambda: b""
req.content = BytesIO(content)
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(SEEK_END)
req.postpath = list(map(unquote, path[1:].split(b"/")))
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode("ascii")
)
if federation_auth_origin is not None:
req.requestHeaders.addRawHeader(
b"Authorization",
b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,),
)
if content:
if content_is_form:
req.requestHeaders.addRawHeader(
b"Content-Type", b"application/x-www-form-urlencoded"
)
else:
# Assume the body is JSON
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
req.requestReceived(method, path, b"1.1")
return req, channel
def wait_until_result(clock, request, timeout=100):
"""
Wait until the request is finished.
"""
clock.run()
x = 0
while not request.finished:
# If there's a producer, tell it to resume producing so we get content
if request._channel._producer:
request._channel._producer.resumeProducing()
x += 1
if x > timeout:
raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
def render(request, resource, clock):
request.render(resource)
wait_until_result(clock, request)
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self.threadpool = ThreadPool(self)
self._tcp_callbacks = {}
self._udp = []
lookups = self.lookups = {}
@implementer(IResolverSimple)
class FakeResolver:
def getHostByName(self, name, timeout=None):
if name not in lookups:
return fail(DNSLookupError("OH NO: unknown %s" % (name,)))
return succeed(lookups[name])
self.nameResolver = SimpleResolverComplexifier(FakeResolver())
super().__init__()
def listenUDP(self, port, protocol, interface="", maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
d = Deferred()
d.addCallback(lambda x: callback(*args, **kwargs))
self.callLater(0, d.callback, True)
return d
def getThreadPool(self):
return self.threadpool
def add_tcp_client_callback(self, host, port, callback):
"""Add a callback that will be invoked when we receive a connection
attempt to the given IP/port using `connectTCP`.
Note that the callback gets run before we return the connection to the
client, which means callbacks cannot block while waiting for writes.
"""
self._tcp_callbacks[(host, port)] = callback
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""Fake L{IReactorTCP.connectTCP}.
"""
conn = super().connectTCP(
host, port, factory, timeout=timeout, bindAddress=None
)
callback = self._tcp_callbacks.get((host, port))
if callback:
callback()
return conn
class ThreadPool:
"""
Threadless thread pool.
"""
def __init__(self, reactor):
self._reactor = reactor
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
self._reactor.callLater(0, d.callback, True)
return d
def setup_test_homeserver(cleanup_func, *args, **kwargs):
"""
Set up a synchronous test server, driven by the reactor used by
the homeserver.
"""
server = _sth(cleanup_func, *args, **kwargs)
database = server.config.database.get_single_database()
# Make the thread pool synchronous.
clock = server.get_clock()
for database in server.get_datastores().databases:
pool = database._db_pool
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
pool.threadpool = ThreadPool(clock._reactor)
pool.running = True
# We've just changed the Databases to run DB transactions on the same
# thread, so we need to disable the dedicated thread behaviour.
server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False
return server
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return clock, hs_clock
@attr.s(cmp=False)
class FakeTransport:
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
_protocol = attr.ib(default=None)
"""The Protocol which is producing data for this transport. Optional, but if set
will get called back for connectionLost() notifications etc.
"""
disconnecting = False
disconnected = False
connected = True
buffer = attr.ib(default=b"")
producer = attr.ib(default=None)
autoflush = attr.ib(default=True)
def getPeer(self):
return None
def getHost(self):
return None
def loseConnection(self, reason=None):
if not self.disconnecting:
logger.info("FakeTransport: loseConnection(%s)", reason)
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(reason)
# if we still have data to write, delay until that is done
if self.buffer:
logger.info(
"FakeTransport: Delaying disconnect until buffer is flushed"
)
else:
self.connected = False
self.disconnected = True
def abortConnection(self):
logger.info("FakeTransport: abortConnection()")
if not self.disconnecting:
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(None)
self.disconnected = True
def pauseProducing(self):
if not self.producer:
return
self.producer.pauseProducing()
def resumeProducing(self):
if not self.producer:
return
self.producer.resumeProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
d = self.producer.resumeProducing()
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
if self.disconnecting:
raise Exception("Writing to disconnecting FakeTransport")
self.buffer = self.buffer + byt
# always actually do the write asynchronously. Some protocols (notably the
# TLSMemoryBIOProtocol) get very confused if a read comes back while they are
# still doing a write. Doing a callLater here breaks the cycle.
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
def writeSequence(self, seq):
for x in seq:
self.write(x)
def flush(self, maxbytes=None):
if not self.buffer:
# nothing to do. Don't write empty buffers: it upsets the
# TLSMemoryBIOProtocol
return
if self.disconnected:
return
if getattr(self.other, "transport") is None:
# the other has no transport yet; reschedule
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
return
if maxbytes is not None:
to_write = self.buffer[:maxbytes]
else:
to_write = self.buffer
logger.info("%s->%s: %s", self._protocol, self.other, to_write)
try:
self.other.dataReceived(to_write)
except Exception as e:
logger.exception("Exception writing to protocol: %s", e)
return
self.buffer = self.buffer[len(to_write) :]
if self.buffer and self.autoflush:
self._reactor.callLater(0.0, self.flush)
if not self.buffer and self.disconnecting:
logger.info("FakeTransport: Buffer now empty, completing disconnect")
self.disconnected = True
def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol:
"""
Connect a client to a fake TCP transport.
Args:
reactor
factory: The connecting factory to build.
"""
factory = reactor.tcpClients[client_id][2]
client = factory.buildProtocol(None)
server = AccumulatingProtocol()
server.makeConnection(FakeTransport(client, reactor))
client.makeConnection(FakeTransport(server, reactor))
reactor.tcpClients.pop(client_id)
return client, server
| 28.748654 | 85 | 0.627053 | import json
import logging
from io import SEEK_END, BytesIO
import attr
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.defer import Deferred, fail, succeed
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IReactorPluggableNameResolver,
IReactorTCP,
IResolverSimple,
)
from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from twisted.web.http import unquote
from twisted.web.http_headers import Headers
from twisted.web.server import Site
from synapse.http.site import SynapseRequest
from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
logger = logging.getLogger(__name__)
class TimedOutException(Exception):
@attr.s
class FakeChannel:
site = attr.ib(type=Site)
_reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@property
def json_body(self):
if not self.result:
raise Exception("No result yet.")
return json.loads(self.result["body"].decode("utf8"))
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
@property
def headers(self):
if not self.result:
raise Exception("No result yet.")
h = Headers()
for i in self.result["headers"]:
h.addRawHeader(*i)
return h
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@property
def transport(self):
return self
class FakeSite:
server_version_string = b"1"
site_tag = "test"
access_logger = logging.getLogger("synapse.access.http.fake")
def make_request(
reactor,
method,
path,
content=b"",
access_token=None,
request=SynapseRequest,
shorthand=True,
federation_auth_origin=None,
content_is_form=False,
):
if not isinstance(method, bytes):
method = method.encode("ascii")
if not isinstance(path, bytes):
path = path.encode("ascii")
if (
shorthand
and not path.startswith(b"/_matrix")
and not path.startswith(b"/_synapse")
):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
if not path.startswith(b"/"):
path = b"/" + path
if isinstance(content, str):
content = content.encode("utf8")
site = FakeSite()
channel = FakeChannel(site, reactor)
req = request(channel)
req.process = lambda: b""
req.content = BytesIO(content)
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(SEEK_END)
req.postpath = list(map(unquote, path[1:].split(b"/")))
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode("ascii")
)
if federation_auth_origin is not None:
req.requestHeaders.addRawHeader(
b"Authorization",
b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,),
)
if content:
if content_is_form:
req.requestHeaders.addRawHeader(
b"Content-Type", b"application/x-www-form-urlencoded"
)
else:
# Assume the body is JSON
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
req.requestReceived(method, path, b"1.1")
return req, channel
def wait_until_result(clock, request, timeout=100):
clock.run()
x = 0
while not request.finished:
# If there's a producer, tell it to resume producing so we get content
if request._channel._producer:
request._channel._producer.resumeProducing()
x += 1
if x > timeout:
raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
def render(request, resource, clock):
request.render(resource)
wait_until_result(clock, request)
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
def __init__(self):
self.threadpool = ThreadPool(self)
self._tcp_callbacks = {}
self._udp = []
lookups = self.lookups = {}
@implementer(IResolverSimple)
class FakeResolver:
def getHostByName(self, name, timeout=None):
if name not in lookups:
return fail(DNSLookupError("OH NO: unknown %s" % (name,)))
return succeed(lookups[name])
self.nameResolver = SimpleResolverComplexifier(FakeResolver())
super().__init__()
def listenUDP(self, port, protocol, interface="", maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
d = Deferred()
d.addCallback(lambda x: callback(*args, **kwargs))
self.callLater(0, d.callback, True)
return d
def getThreadPool(self):
return self.threadpool
def add_tcp_client_callback(self, host, port, callback):
self._tcp_callbacks[(host, port)] = callback
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
conn = super().connectTCP(
host, port, factory, timeout=timeout, bindAddress=None
)
callback = self._tcp_callbacks.get((host, port))
if callback:
callback()
return conn
class ThreadPool:
def __init__(self, reactor):
self._reactor = reactor
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
self._reactor.callLater(0, d.callback, True)
return d
def setup_test_homeserver(cleanup_func, *args, **kwargs):
server = _sth(cleanup_func, *args, **kwargs)
database = server.config.database.get_single_database()
clock = server.get_clock()
for database in server.get_datastores().databases:
pool = database._db_pool
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
pool.threadpool = ThreadPool(clock._reactor)
pool.running = True
# thread, so we need to disable the dedicated thread behaviour.
server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False
return server
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return clock, hs_clock
@attr.s(cmp=False)
class FakeTransport:
other = attr.ib()
_reactor = attr.ib()
_protocol = attr.ib(default=None)
disconnecting = False
disconnected = False
connected = True
buffer = attr.ib(default=b"")
producer = attr.ib(default=None)
autoflush = attr.ib(default=True)
def getPeer(self):
return None
def getHost(self):
return None
def loseConnection(self, reason=None):
if not self.disconnecting:
logger.info("FakeTransport: loseConnection(%s)", reason)
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(reason)
# if we still have data to write, delay until that is done
if self.buffer:
logger.info(
"FakeTransport: Delaying disconnect until buffer is flushed"
)
else:
self.connected = False
self.disconnected = True
def abortConnection(self):
logger.info("FakeTransport: abortConnection()")
if not self.disconnecting:
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(None)
self.disconnected = True
def pauseProducing(self):
if not self.producer:
return
self.producer.pauseProducing()
def resumeProducing(self):
if not self.producer:
return
self.producer.resumeProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
d = self.producer.resumeProducing()
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
if self.disconnecting:
raise Exception("Writing to disconnecting FakeTransport")
self.buffer = self.buffer + byt
# always actually do the write asynchronously. Some protocols (notably the
# TLSMemoryBIOProtocol) get very confused if a read comes back while they are
# still doing a write. Doing a callLater here breaks the cycle.
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
def writeSequence(self, seq):
for x in seq:
self.write(x)
def flush(self, maxbytes=None):
if not self.buffer:
# nothing to do. Don't write empty buffers: it upsets the
return
if self.disconnected:
return
if getattr(self.other, "transport") is None:
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
return
if maxbytes is not None:
to_write = self.buffer[:maxbytes]
else:
to_write = self.buffer
logger.info("%s->%s: %s", self._protocol, self.other, to_write)
try:
self.other.dataReceived(to_write)
except Exception as e:
logger.exception("Exception writing to protocol: %s", e)
return
self.buffer = self.buffer[len(to_write) :]
if self.buffer and self.autoflush:
self._reactor.callLater(0.0, self.flush)
if not self.buffer and self.disconnecting:
logger.info("FakeTransport: Buffer now empty, completing disconnect")
self.disconnected = True
def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol:
factory = reactor.tcpClients[client_id][2]
client = factory.buildProtocol(None)
server = AccumulatingProtocol()
server.makeConnection(FakeTransport(client, reactor))
client.makeConnection(FakeTransport(server, reactor))
reactor.tcpClients.pop(client_id)
return client, server
| true | true |
f7f52810a4bb10daf2d3ebed24a802e24b2b4a10 | 36,743 | py | Python | all_cnn_bi_skippy_cifar100.py | christiaanlamers/sms-mip-ego | 3601efcb9cfe069e8543d3c29a6102ebbeb9a78c | [
"MIT"
] | null | null | null | all_cnn_bi_skippy_cifar100.py | christiaanlamers/sms-mip-ego | 3601efcb9cfe069e8543d3c29a6102ebbeb9a78c | [
"MIT"
] | null | null | null | all_cnn_bi_skippy_cifar100.py | christiaanlamers/sms-mip-ego | 3601efcb9cfe069e8543d3c29a6102ebbeb9a78c | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
#np.random.seed(43)
import tensorflow as tf
tf.set_random_seed(43)
import keras
#from keras.datasets import mnist
from keras.datasets import cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D,UpSampling2D,ZeroPadding2D,Concatenate
from keras.layers import Layer#CHRIS to define a layer yourself
import os
import sys
import pandas as pd
import keras.backend as K
import math
from keras.callbacks import LearningRateScheduler
from keras.regularizers import l2
import time #CHRIS added to measure runtime of training
from pynvml import * #CHRIS needed to test gpu memory capacity
#from fractions import gcd #CHRIS needed for proper upscaling
import setproctitle
import json
#setproctitle.setproctitle('lamers c, do not use GPU 4-7, 9-15 please')
class TimedAccHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.accuracy_log = []
self.timed = []
self.start_time = time.time()
def on_epoch_end(self, batch, logs={}):
self.accuracy_log.append(logs.get('val_acc'))
self.timed.append(time.time() - self.start_time)
def inv_gray(num):#TODO only for testing
n = 0
while num != 0:
n = num ^ n
num = num >> 1
return n
class Skip_manager(object):
def __init__(self,skip_ints,skip_ints_count):
self.skip_ints= skip_ints
self.skip_ints_count = skip_ints_count
self.skip_connections = []
self.layer_num = 0 #layer number of currently build layer
def identity(self,num):
return num
def gray(self,num):
return num ^ (num >> 1)
def startpoint(self,func,num):
return (func(num) >> self.layer_num) & 1
def set_dropout(self,dropout_val):
for i in range(len(self.skip_connections)):
self.skip_connections[i][3] = dropout_val
return
def pad_and_connect(self, layer, incoming_layer):
if K.int_shape(incoming_layer)[1] != K.int_shape(layer)[1] or K.int_shape(incoming_layer)[2] != K.int_shape(layer)[2]:
pad_tpl1 = (int(np.floor(np.abs(K.int_shape(incoming_layer)[1]-K.int_shape(layer)[1])/2)),int(np.ceil(np.abs(K.int_shape(incoming_layer)[1]-K.int_shape(layer)[1])/2)))
pad_tpl2 = (int(np.floor(np.abs(K.int_shape(incoming_layer)[2]-K.int_shape(layer)[2])/2)),int(np.ceil(np.abs(K.int_shape(incoming_layer)[2]-K.int_shape(layer)[2])/2)))
#print(pad_tpl)
if K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
padded = ZeroPadding2D(padding=(pad_tpl1, pad_tpl2))(incoming_layer)
layer = Concatenate()([layer, padded])
elif K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] >= K.int_shape(layer)[2]:
padded1 = ZeroPadding2D(padding=(pad_tpl1, 0))(incoming_layer)
padded2 = ZeroPadding2D(padding=(0, pad_tpl2))(layer)
layer = Concatenate()([padded1, padded2])
elif K.int_shape(incoming_layer)[1] >= K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
padded1 = ZeroPadding2D(padding=(0, pad_tpl2))(incoming_layer)
padded2 = ZeroPadding2D(padding=(pad_tpl1, 0))(layer)
layer= Concatenate()([padded1, padded2])
else:
#print(layer.shape)
padded = ZeroPadding2D(padding=(pad_tpl1, pad_tpl2))(layer)
#print(padded.shape)
#print(incoming_layer.shape)
layer= Concatenate()([padded, incoming_layer])
else:
layer= Concatenate()([layer, incoming_layer])
return layer
def pool_pad_connect(self, layer, incoming_layer,dropout_val):
if K.int_shape(incoming_layer)[1] != K.int_shape(layer)[1] or K.int_shape(incoming_layer)[2] != K.int_shape(layer)[2]:
#print('layer dimensions:')
#print(K.int_shape(layer)[1], K.int_shape(layer)[2])
#print('incoming_layer dimensions:')
#print(K.int_shape(incoming_layer)[1], K.int_shape(incoming_layer)[2])
if K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
pass
elif K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] >= K.int_shape(layer)[2]:
scalar = int(np.ceil(K.int_shape(incoming_layer)[2] / K.int_shape(layer)[2]))
incoming_layer = MaxPooling2D(pool_size=(1, scalar), strides=(1, scalar), padding='same')(incoming_layer)
print('warning: code used that is not tested, see: all_cnn_bi_skippy.py --> pool_pad_connect()')
elif K.int_shape(incoming_layer)[1] >= K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
scalar = int(np.ceil(K.int_shape(incoming_layer)[1] / K.int_shape(layer)[1]))
incoming_layer = MaxPooling2D(pool_size=(scalar, 1), strides=(scalar, 1), padding='same')(incoming_layer)
print('warning: code used that is not tested, see: all_cnn_bi_skippy.py --> pool_pad_connect()')
else: #K.int_shape(incoming_layer)[1] > K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] > K.int_shape(layer)[2]
scalar_1 = int(np.ceil(K.int_shape(incoming_layer)[1] / K.int_shape(layer)[1]))
scalar_2 = int(np.ceil(K.int_shape(incoming_layer)[2] / K.int_shape(layer)[2]))
incoming_layer = MaxPooling2D(pool_size=(scalar_1, scalar_2), strides=(scalar_1, scalar_2), padding='same')(incoming_layer)
#print('Did a max pool')
if dropout_val is not None:
incoming_layer = Dropout(dropout_val)(incoming_layer)
return self.pad_and_connect(layer, incoming_layer)
def start_skip(self,layer):
for j in range(len(self.skip_ints)):
if self.skip_ints_count[j] > 1 and self.startpoint(self.identity,self.skip_ints[j]):#CHRIS skip connections smaller than 2 are not made, thus mean no skip connection.
self.skip_connections.append([layer,self.skip_ints_count[j],self.layer_num,None])#save layer output, skip counter, layer this skip connection starts (to remove duplicates)
return layer
def end_skip(self,layer,filters,kernel,regulizer,act):
for j in range(len(self.skip_connections)):
self.skip_connections[j][1] -= 1 #decrease skip connection counters
j = 0
prev_skip = -1
connected = False #CHRIS check if an end skip connection is made
while j < len(self.skip_connections):
if self.skip_connections[j][1] <= 0:
#print(prev_skip,self.skip_connections[j][2])
if prev_skip != self.skip_connections[j][2]:#this removes skip connection duplicates (works because same skip connections are next to eachother) TODO maybe better to make more robust
#CHRIS TODO add pooling, because this becomes too complex to train
#layer = self.pad_and_connect(layer, self.skip_connections[j][0])#CHRIS warning! pad_and_connect does not do dropout!
layer = self.pool_pad_connect(layer, self.skip_connections[j][0],self.skip_connections[j][3])
connected = True#CHRIS an end skip connection is made
#if upscaling is desired: (can result in enormous tensors though)
#shape1 = K.int_shape(layer)
#shape2 = K.int_shape(self.skip_connections[j][0])
#gcd_x = gcd(shape1[1], shape2[1])
#gcd_y = gcd(shape1[2], shape2[2])
#scale1 =shape2[1] // gcd_x, shape2[2] // gcd_y
#scale2 =shape1[1] // gcd_x, shape1[2] // gcd_y
#upscaled1 = UpSampling2D(size=scale1, interpolation='nearest')(layer)
#upscaled2 = UpSampling2D(size=scale2, interpolation='nearest')(self.skip_connections[j][0])
#layer = keras.layers.Concatenate()([upscaled1, upscaled2])
prev_skip = self.skip_connections[j][2]
del self.skip_connections[j]
else:
j += 1
if connected and K.int_shape(layer)[3] > filters:#CHRIS we only want projection if an end skip connection is made, hence: ''connected''
#CHRIS convolution to bound amount of features
#CHRIS can funcion as addition, or projection followed by addition
layer = Conv2D(filters, (1,1), padding='same', kernel_regularizer=l2(regulizer), bias_regularizer=l2(regulizer))(layer)#CHRIS kernel value set to (1,1) in order to simply act as projection
#layer = Activation(act)(layer)
for j in range(len(self.skip_connections)):#CHRIS TODO this is a bit hacky
self.skip_connections[j][1] += 1 #decrease skip connection counters
return layer
def connect_skip(self,layer,filters,kernel,regulizer,act):
#end skip connections
layer = self.end_skip(layer,filters,kernel,regulizer,act)
for j in range(len(self.skip_connections)):#CHRIS TODO this is a bit hacky
self.skip_connections[j][1] -= 1 #decrease skip connection counters
#start skip connections
layer = self.start_skip(layer)
self.layer_num +=1 #increase layer number where currently building takes place
return layer
def CNN_conf(cfg,epochs=1,test=False,gpu_no=0,verbose=0,save_name='skippy_test_train_hist',data_augmentation=False):
batch_size = 100
num_classes = 100
num_predictions = 20
logfile = 'mnist-cnn.log'
savemodel = False
# The data, shuffled and split between train and test sets:
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')#mnist.load_data()
#CHRIS reshape only needed for mnist
#x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],x_train.shape[2],1)
#x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],x_test.shape[2],1)
cfg_df = pd.DataFrame(cfg, index=[0])
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train.flatten(), num_classes)
y_test = keras.utils.to_categorical(y_test.flatten(), num_classes)
#print('skip steps:')
#print([cfg['skint_0'],cfg['skint_1'],cfg['skint_2']],[cfg['skst_0'],cfg['skst_1'],cfg['skst_2']])
#(skip_ints,skip_ints_count) passed to Skip_manager constructor TODO get from cfg vector
skint_0 = 0
skint_1 = 0
skint_2 = 0
skint_3 = 0
skint_4 = 0
network_depth = cfg['stack_0'] + cfg['stack_1'] + cfg['stack_2'] + cfg['stack_3'] + cfg['stack_4'] + cfg['stack_5'] + cfg['stack_6']+7
if cfg['skstep_0'] > 1:
cnt = 0
skint_0 = 1
while cnt <= network_depth:
skint_0 = skint_0 << cfg['skstep_0']
skint_0 += 1
cnt += cfg['skstep_0']
skint_0 = skint_0 << cfg['skstart_0']
if cfg['skstep_1'] > 1:
cnt = 0
skint_1 = 1
while cnt <= network_depth:
skint_1 = skint_1 << cfg['skstep_1']
skint_1 += 1
cnt += cfg['skstep_1']
skint_1 = skint_1 << cfg['skstart_1']
if cfg['skstep_2'] > 1:
cnt = 0
skint_2 = 1
while cnt <= network_depth:
skint_2 = skint_2 << cfg['skstep_2']
skint_2 += 1
cnt += cfg['skstep_2']
skint_2 = skint_2 << cfg['skstart_2']
if cfg['skstep_3'] > 1:
cnt = 0
skint_3 = 1
while cnt <= network_depth:
skint_3 = skint_3 << cfg['skstep_3']
skint_3 += 1
cnt += cfg['skstep_3']
skint_3 = skint_3 << cfg['skstart_3']
if cfg['skstep_4'] > 1:
cnt = 0
skint_4 = 1
while cnt <= network_depth:
skint_4 = skint_4 << cfg['skstep_4']
skint_4 += 1
cnt += cfg['skstep_4']
skint_4 = skint_4 << cfg['skstart_4']
skip_manager = Skip_manager([skint_0,skint_1,skint_2,skint_3,skint_4],[cfg['skstep_0'],cfg['skstep_1'],cfg['skstep_2'],cfg['skstep_3'],cfg['skstep_4']])
#skip_manager = Skip_manager([0,0,0,0,0],[cfg['skstep_0'],cfg['skstep_1'],cfg['skstep_2'],cfg['skstep_3'],cfg['skstep_4']])
input1 = keras.layers.Input(shape=(x_train.shape[1],x_train.shape[2],x_train.shape[3]))
layer=input1
filter_amount = x_train.shape[3]#CHRIS the filter amount for the sake of skip connections lags a bit behind the stack it is in, so it must be assigned separately
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_0'],cfg['l2'],cfg['activation'])
layer = Dropout(cfg['dropout_0'],input_shape=x_train.shape[1:])(layer)#CHRIS TODO reengage this line!
skip_manager.set_dropout(cfg['dropout_0'])
#CHRIS removed following:
#layer = Conv2D(cfg['filters_0'], (cfg['k_0'], cfg['k_0']), padding='same',kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
#layer = Activation(cfg['activation'])(layer)#kernel_initializer='random_uniform',
#layer = skip_manager.connect_skip(layer)
#stack 0
for i in range(cfg['stack_0']):
filter_amount = cfg['filters_0']
layer = Conv2D(cfg['filters_0'], (cfg['k_0'], cfg['k_0']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_0'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_0']>0):
#maxpooling as cnn
if not (cfg['max_pooling']):
filter_amount = cfg['filters_1']
layer = Conv2D(cfg['filters_1'], (cfg['k_1'], cfg['k_1']), strides=(cfg['s_0'], cfg['s_0']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_0'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
#layer = skip_manager.end_skip(layer,filter_amount,cfg['k_0'],cfg['l2'],cfg['activation'])
layer = MaxPooling2D(pool_size=(cfg['k_1'], cfg['k_1']), strides=(cfg['s_0'], cfg['s_0']), padding='same')(layer)
layer = Dropout(cfg['dropout_1'])(layer)
skip_manager.set_dropout(cfg['dropout_1'])
#stack 1
for i in range(cfg['stack_1']):
filter_amount = cfg['filters_2']
layer = Conv2D(cfg['filters_2'], (cfg['k_2'], cfg['k_2']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_2'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_1']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_3']
layer = Conv2D(cfg['filters_3'], (cfg['k_3'], cfg['k_3']), strides=(cfg['s_1'], cfg['s_1']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_2'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
#layer = skip_manager.end_skip(layer,filter_amount,cfg['k_2'],cfg['l2'],cfg['activation'])
layer = MaxPooling2D(pool_size=(cfg['k_3'], cfg['k_3']), strides=(cfg['s_1'], cfg['s_1']), padding='same')(layer)
layer = Dropout(cfg['dropout_2'])(layer)
skip_manager.set_dropout(cfg['dropout_2'])
#stack 2
for i in range(cfg['stack_2']):
filter_amount = cfg['filters_4']
layer = Conv2D(cfg['filters_4'], (cfg['k_4'], cfg['k_4']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_4'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_2']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_5']
layer = Conv2D(cfg['filters_5'], (cfg['k_5'], cfg['k_5']), strides=(cfg['s_2'], cfg['s_2']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_4'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
#layer = skip_manager.end_skip(layer,filter_amount,cfg['k_4'],cfg['l2'],cfg['activation'])
layer = MaxPooling2D(pool_size=(cfg['k_5'], cfg['k_5']), strides=(cfg['s_2'], cfg['s_2']), padding='same')(layer)
layer = Dropout(cfg['dropout_3'])(layer)
skip_manager.set_dropout(cfg['dropout_3'])
#stack 3
for i in range(cfg['stack_3']):
filter_amount = cfg['filters_6']
layer = Conv2D(cfg['filters_6'], (cfg['k_6'], cfg['k_6']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_6'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_3']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_7']
layer = Conv2D(cfg['filters_7'], (cfg['k_7'], cfg['k_7']), strides=(cfg['s_3'], cfg['s_3']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_6'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
#layer = skip_manager.end_skip(layer,filter_amount,cfg['k_6'],cfg['l2'],cfg['activation'])
layer = MaxPooling2D(pool_size=(cfg['k_7'], cfg['k_7']), strides=(cfg['s_3'], cfg['s_3']), padding='same')(layer)
layer = Dropout(cfg['dropout_4'])(layer)
skip_manager.set_dropout(cfg['dropout_4'])
#stack 4
for i in range(cfg['stack_4']):
filter_amount = cfg['filters_8']
layer = Conv2D(cfg['filters_8'], (cfg['k_8'], cfg['k_8']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_8'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_4']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_9']
layer = Conv2D(cfg['filters_9'], (cfg['k_9'], cfg['k_9']), strides=(cfg['s_4'], cfg['s_4']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_8'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
#layer = skip_manager.end_skip(layer,filter_amount,cfg['k_8'],cfg['l2'],cfg['activation'])
layer = MaxPooling2D(pool_size=(cfg['k_9'], cfg['k_9']), strides=(cfg['s_4'], cfg['s_4']), padding='same')(layer)
layer = Dropout(cfg['dropout_5'])(layer)
skip_manager.set_dropout(cfg['dropout_5'])
#stack 5
for i in range(cfg['stack_5']):
filter_amount = cfg['filters_10']
layer = Conv2D(cfg['filters_10'], (cfg['k_10'], cfg['k_10']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_10'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_5']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_11']
layer = Conv2D(cfg['filters_11'], (cfg['k_11'], cfg['k_11']), strides=(cfg['s_5'], cfg['s_5']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_10'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
#layer = skip_manager.end_skip(layer,filter_amount,cfg['k_10'],cfg['l2'],cfg['activation'])
layer = MaxPooling2D(pool_size=(cfg['k_11'], cfg['k_11']), strides=(cfg['s_5'], cfg['s_5']), padding='same')(layer)
layer = Dropout(cfg['dropout_6'])(layer)
skip_manager.set_dropout(cfg['dropout_6'])
#stack 6
for i in range(cfg['stack_6']):
filter_amount = cfg['filters_12']
layer = Conv2D(cfg['filters_12'], (cfg['k_12'], cfg['k_12']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_12'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_6']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_13']
layer = Conv2D(cfg['filters_13'], (cfg['k_13'], cfg['k_13']), strides=(cfg['s_6'], cfg['s_6']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_12'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
#layer = skip_manager.end_skip(layer,filter_amount,cfg['k_12'],cfg['l2'],cfg['activation'])
layer = MaxPooling2D(pool_size=(cfg['k_13'], cfg['k_13']), strides=(cfg['s_6'], cfg['s_6']), padding='same')(layer)
layer = Dropout(cfg['dropout_7'])(layer)
skip_manager.set_dropout(cfg['dropout_7'])
#layer = input1#TODO remove this
#global averaging
if (cfg['global_pooling']):
layer = GlobalAveragePooling2D()(layer)
layer = Dropout(cfg['dropout_7'])(layer)
else:
layer = Flatten()(layer)
#head
if cfg['dense_size_0'] > 0:
layer = Dense(cfg['dense_size_0'], kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = Activation(cfg['activation'])(layer)
layer = Dropout(cfg['dropout_8'])(layer)
if cfg['dense_size_1'] > 0:
layer = Dense(cfg['dense_size_1'], kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = Activation(cfg['activation'])(layer)
layer = Dropout(cfg['dropout_9'])(layer)
layer = Dense(num_classes, kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
out = Activation(cfg['activ_dense'])(layer)
cfg['decay'] = cfg['lr'] / float(epochs)
def step_decay(epoch):
initial_lrate = cfg['lr']
drop = 0.1
epochs_drop = 20.0
lrate = initial_lrate * math.pow(drop,
math.floor((1+epoch)/epochs_drop))
return lrate
hist_func = TimedAccHistory()
callbacks = [hist_func]
if (cfg['step'] == True):
callbacks = [LearningRateScheduler(step_decay),hist_func]
cfg['decay'] = 0.
# initiate RMSprop optimizer
#opt = keras.optimizers.rmsprop(lr= cfg['lr'], decay=cfg['decay'])
opt = keras.optimizers.SGD(lr=cfg['lr'], momentum=0.9, decay=cfg['decay'], nesterov=False)
model = keras.models.Model(inputs=input1, outputs=out)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])#TODO 'adam' moet zijn: opt
#model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
if test:
return model #TODO remove this, just for testing
#print("amount of parameters:")
#print(model.count_params())
#CHRIS test if gpu has enough memory
#nvmlInit()
#handle = nvmlDeviceGetHandleByIndex(int(gpu_no))
#meminfo = nvmlDeviceGetMemoryInfo(handle)
#max_size = meminfo.total #6689341440
#if meminfo.free/1024.**2 < 1.0:
# print('gpu is allready in use')
#nvmlShutdown()
#if model.count_params()*4*2 >= max_size:#CHRIS *4*2: 4 byte per parameter times 2 for backpropagation
#print('network too large for memory')
#return 1000000000.0*(model.count_params()*4*2/max_size), 5.0*(model.count_params()*4*2/max_size)
#max_size = 32828802 * 2 #CHRIS twice as large as RESnet-34-like implementation
#max_size = 129200130 #CHRIS twice as wide as RESnet-34-like implementation with batchsize=10, one network of this size was able to be ran on tritanium gpu
max_size = 130374394 #CHRIS twice as wide as RESnet-34-like implementation with batchsize=100, one network of this size was able to be ran on tritanium gpu
#if model.count_params() > max_size:
#print('network too large for implementation')
#return 1000000000.0*(model.count_params()/max_size), 5.0*(model.count_params()/max_size)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
if not data_augmentation:#CHRIS data augmentation handles normalization
x_train /= 255.
x_test /= 255.
if not data_augmentation:
print('Not using data augmentation.')
start = time.time()
hist = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=callbacks,
verbose=verbose,
shuffle=True)
stop = time.time()
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=cfg['featurewise_center'], # set input mean to 0 over the dataset
samplewise_center=cfg['samplewise_center'], # set each sample mean to 0
featurewise_std_normalization=cfg['featurewise_std_normalization'], # divide inputs by std of the dataset
samplewise_std_normalization=cfg['samplewise_std_normalization'], # divide each input by its std
zca_epsilon=cfg['zca_epsilon'],
zca_whitening=cfg['zca_whitening'], # apply ZCA whitening
rotation_range=cfg['rotation_range'], # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=cfg['width_shift_range'], # randomly shift images horizontally (fraction of total width)
height_shift_range=cfg['height_shift_range'], # randomly shift images vertically (fraction of total height)
shear_range=cfg['shear_range'],
zoom_range=cfg['zoom_range'],
channel_shift_range=cfg['channel_shift_range'],
fill_mode=cfg['fill_mode'],#('constant','nearest',reflect','wrap')
cval=cfg['cval'],
horizontal_flip=cfg['horizontal_flip'], # randomly flip images
vertical_flip=cfg['vertical_flip'], # randomly flip images
rescale=1/255.0)
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
start = time.time()
hist = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size), verbose=verbose,
callbacks=callbacks,
epochs=epochs, steps_per_epoch = len(x_train)/batch_size,
validation_data=(x_test, y_test))
stop = time.time()
timer = stop-start
#print('run-time:')
#print(timer)
#CHRIS append network training history to file
eval_training_hist = [time.time(),hist.history['val_acc'], hist_func.timed]
with open(save_name + '_eval_train_hist.json', 'a') as outfile:
json.dump(eval_training_hist,outfile)
outfile.write('\n')
if savemodel:
model.save('best_model_mnist.h5')
maxval = max(hist.history['val_acc'])
#loss = -1 * math.log( 1.0 - max(hist.history['val_acc']) ) #np.amin(hist.history['val_loss'])
loss = -1 * math.log(max(hist.history['val_acc']) ) #CHRIS minimizing this will maximize accuracy
#print('max val_acc:')
#print(max(hist.history['val_acc']))
#print('loss:')
#print(loss)
#perf5 = max(hist.history['val_top_5_categorical_accuracy'])
if logfile is not None:
log_file = logfile #os.path.join(data_des, logfile)
cfg_df['perf'] = maxval
# save the configurations to log file
if os.path.isfile(log_file):
cfg_df.to_csv(log_file, mode='a', header=False, index=False)
else:
cfg_df.to_csv(log_file, mode='w', header=True, index=False)
return timer,loss
#CHRIS testcode
def test_skippy():
from mipego.mipego import Solution #TODO remove this, only for testing
from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
from keras.utils import plot_model
#define the search space.
#objective = obj_func('./all-cnn_bi.py')
activation_fun = ["softmax"]
activation_fun_conv = ["elu","relu","tanh","sigmoid","selu"]
filters = OrdinalSpace([10, 100], 'filters') * 14 #TODO [0,100] should be [0,600]
kernel_size = OrdinalSpace([1, 8], 'k') * 14
strides = OrdinalSpace([1, 5], 's') * 7
stack_sizes = OrdinalSpace([0, 4], 'stack') * 7 #TODO [0,4] should be [0,7]
activation = NominalSpace(activation_fun_conv, "activation") # activation function
activation_dense = NominalSpace(activation_fun, "activ_dense") # activation function for dense layer
step = NominalSpace([True, False], "step") # step
global_pooling = NominalSpace([True, False], "global_pooling") # global_pooling
#skippy parameters
skstart = OrdinalSpace([0, 50], 'skstart') * 5
skstep = OrdinalSpace([1, 50], 'skstep') * 5
max_pooling = NominalSpace([True, False], "max_pooling")
dense_size = OrdinalSpace([0,2000],'dense_size')*2
#skippy parameters
drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10 # drop_out rate
lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr') # learning rate
l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')# l2_regularizer
search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size
n_init_sample = 1
samples = search_space.sampling(n_init_sample)
print(samples)
var_names = search_space.var_name.tolist()
print(var_names)
#a sample
#samples = [[1, 1, 1, 1, 2, 3, 10, 10, 5, 10, 10, 10, 10, 3, 4, 2, 1, 3, 1, 3, 'relu', 'softmax', 0.7105013348601977, 0.24225495530708516, 0.5278997344637044, 0.7264822991098491, 0.0072338759099408985, 0.00010867041652507452, False, True]]
#test parameters
#original parameters
#RESnet-34-like
stack_0 = 1
stack_1 = 6
stack_2 = 4
stack_3 = 4
stack_4 = 6
stack_5 = 6
stack_6 = 6
s_0=2#1#2
s_1=2
s_2=1#1
s_3=2
s_4=1
s_5=2
s_6=1
filters_0=64
filters_1=64
filters_2=64
filters_3=64
filters_4=128
filters_5=128
filters_6=128
filters_7=128
filters_8=256
filters_9=256
filters_10=256
filters_11=256
filters_12=512
filters_13=512
k_0=7
k_1=1
k_2=3
k_3=1
k_4=3
k_5=1
k_6=3
k_7=1
k_8=3
k_9=1
k_10=3
k_11=1
k_12=3
k_13=1
activation='relu'
activ_dense='softmax'
dropout_0=0.001
dropout_1=0.001
dropout_2=0.001
dropout_3=0.001
dropout_4=0.001
dropout_5=0.001
dropout_6=0.001
dropout_7=0.001
dropout_8=0.001
dropout_9=0.001
lr=0.01
l2=0.0001
step=False#True
global_pooling=True
#skippy parameters
om_en_om = 1
ranges = [stack_6,stack_5,stack_4,stack_3,stack_2,stack_1,stack_0]
for w in range(len(ranges)):#TODO testcode: remove
om_en_om = om_en_om << 1
for z in range(ranges[w]//2):
om_en_om = om_en_om << 2
om_en_om += 1
om_en_om = om_en_om << 1
skstart_0 = 1#inv_gray(om_en_om)#3826103921638#2**30-1
skstart_1 = 1#19283461627361826#2**30-1
skstart_2 = 1#473829102637452916#2**30-1
skstart_3 = 1#473829102637452916#2**30-1
skstart_4 = 1#473829102637452916#2**30-1
skstep_0 = 2
skstep_1 = 1
skstep_2 = 1
skstep_3 = 1
skstep_4 = 1
max_pooling = True
dense_size_0 = 1000
dense_size_1 = 0
#skippy parameters
#assembling parameters
samples = [[stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13,k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1]]
#var_names
#['stack_0', 'stack_1', 'stack_2', 's_0', 's_1', 's_2', 'filters_0', 'filters_1', 'filters_2', 'filters_3', 'filters_4', 'filters_5', 'filters_6', 'k_0', 'k_1', 'k_2', 'k_3', 'k_4', 'k_5', 'k_6', 'activation', 'activ_dense', 'dropout_0', 'dropout_1', 'dropout_2', 'dropout_3', 'lr', 'l2', 'step', 'global_pooling']
X = [Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)]
vla = {'s_2': 7, 'lr': 0.005478541674651396, 'skstep_2': 4, 'dropout_8': 0.5440199827441856, 'k_12': 15, 'activ_dense': 'softmax', 'stack_4': 3, 'k_5': 2, 'dropout_4': 0.24617655948523018, 's_3': 6, 'k_11': 13, 'filters_10': 84, 'dropout_0': 0.0639815161048702, 'k_7': 13, 'filters_9': 178, 'k_1': 13, 'dropout_6': 0.1752239013431692, 'filters_7': 353, 'skstep_4': 6, 'skstart_2': 0, 'stack_0': 0, 'stack_5': 1, 's_5': 2, 'k_13': 6, 'filters_2': 110, 'filters_0': 248, 'skstart_1': 5, 'filters_6': 341, 'filters_8': 165, 'skstart_4': 2, 'l2': 0.0012874308061650037, 's_0': 9, 'global_pooling': False, 'stack_6': 1, 's_1': 2, 'skstep_0': 4, 'dropout_3': 0.495646008202597, 'skstart_0': 3, 'k_6': 2, 'filters_1': 61, 'dropout_2': 0.028121315386701783, 'stack_3': 2, 'filters_3': 299, 'stack_1': 3, 'max_pooling': True, 'filters_4': 259, 'filters_11': 207, 'k_3': 15, 'k_0': 15, 'dense_size_0': 1400, 'k_4': 10, 's_6': 5, 'dropout_9': 0.004273458743956573, 'skstep_3': 6, 'filters_5': 16, 's_4': 2, 'dropout_1': 0.42526328646019135, 'dense_size_1': 2990, 'k_10': 9, 'k_2': 4, 'skstep_1': 6, 'dropout_5': 0.3927105783290164, 'filters_12': 283, 'dropout_7': 0.01357058138235737, 'activation': 'selu', 'filters_13': 228, 'step': False, 'k_8': 2, 'k_9': 2, 'skstart_3': 1, 'stack_2': 3}
print(X)
print(X[0].to_dict())
#cfg = [Solution(x, index=len(self.data) + i, var_name=self.var_names) for i, x in enumerate(X)]
test = False
if test:
#model = CNN_conf(X[0].to_dict(),test=test)
model = CNN_conf(vla,test=test)
plot_model(model, to_file='model_skippy_test.png',show_shapes=True,show_layer_names=True)
model.summary()
print(model.count_params())
print(str(model.count_params() * 4 * 2 / 1024/1024/1024) + ' Gb')
else:
#timer, loss = CNN_conf(X[0].to_dict(),test=test,epochs= 2000,verbose=1)
timer, loss = CNN_conf(vla,test=test,epochs= 2000,verbose=1)
print('timer, loss:')
print(timer, loss)
if __name__ == '__main__':#CHRIS TODO will this wreck the entire method?
#system arguments (configuration)
if len(sys.argv) > 2 and sys.argv[1] == '--cfg':
cfg = eval(sys.argv[2])
if len(sys.argv) > 3:
gpu = sys.argv[3]
epochs = int(sys.argv[4])
save_name = str(sys.argv[5])
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpu)
print(CNN_conf(cfg,gpu_no=gpu,epochs=epochs,save_name=save_name))
K.clear_session()
else:
print('switching to test mode')
test_skippy()
| 51.031944 | 1,283 | 0.634298 | from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.set_random_seed(43)
import keras
from keras.datasets import cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D,UpSampling2D,ZeroPadding2D,Concatenate
from keras.layers import Layer
import os
import sys
import pandas as pd
import keras.backend as K
import math
from keras.callbacks import LearningRateScheduler
from keras.regularizers import l2
import time
from pynvml import *
class TimedAccHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.accuracy_log = []
self.timed = []
self.start_time = time.time()
def on_epoch_end(self, batch, logs={}):
self.accuracy_log.append(logs.get('val_acc'))
self.timed.append(time.time() - self.start_time)
def inv_gray(num):
n = 0
while num != 0:
n = num ^ n
num = num >> 1
return n
class Skip_manager(object):
def __init__(self,skip_ints,skip_ints_count):
self.skip_ints= skip_ints
self.skip_ints_count = skip_ints_count
self.skip_connections = []
self.layer_num = 0
def identity(self,num):
return num
def gray(self,num):
return num ^ (num >> 1)
def startpoint(self,func,num):
return (func(num) >> self.layer_num) & 1
def set_dropout(self,dropout_val):
for i in range(len(self.skip_connections)):
self.skip_connections[i][3] = dropout_val
return
def pad_and_connect(self, layer, incoming_layer):
if K.int_shape(incoming_layer)[1] != K.int_shape(layer)[1] or K.int_shape(incoming_layer)[2] != K.int_shape(layer)[2]:
pad_tpl1 = (int(np.floor(np.abs(K.int_shape(incoming_layer)[1]-K.int_shape(layer)[1])/2)),int(np.ceil(np.abs(K.int_shape(incoming_layer)[1]-K.int_shape(layer)[1])/2)))
pad_tpl2 = (int(np.floor(np.abs(K.int_shape(incoming_layer)[2]-K.int_shape(layer)[2])/2)),int(np.ceil(np.abs(K.int_shape(incoming_layer)[2]-K.int_shape(layer)[2])/2)))
if K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
padded = ZeroPadding2D(padding=(pad_tpl1, pad_tpl2))(incoming_layer)
layer = Concatenate()([layer, padded])
elif K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] >= K.int_shape(layer)[2]:
padded1 = ZeroPadding2D(padding=(pad_tpl1, 0))(incoming_layer)
padded2 = ZeroPadding2D(padding=(0, pad_tpl2))(layer)
layer = Concatenate()([padded1, padded2])
elif K.int_shape(incoming_layer)[1] >= K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
padded1 = ZeroPadding2D(padding=(0, pad_tpl2))(incoming_layer)
padded2 = ZeroPadding2D(padding=(pad_tpl1, 0))(layer)
layer= Concatenate()([padded1, padded2])
else:
padded = ZeroPadding2D(padding=(pad_tpl1, pad_tpl2))(layer)
layer= Concatenate()([padded, incoming_layer])
else:
layer= Concatenate()([layer, incoming_layer])
return layer
def pool_pad_connect(self, layer, incoming_layer,dropout_val):
if K.int_shape(incoming_layer)[1] != K.int_shape(layer)[1] or K.int_shape(incoming_layer)[2] != K.int_shape(layer)[2]:
if K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
pass
elif K.int_shape(incoming_layer)[1] < K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] >= K.int_shape(layer)[2]:
scalar = int(np.ceil(K.int_shape(incoming_layer)[2] / K.int_shape(layer)[2]))
incoming_layer = MaxPooling2D(pool_size=(1, scalar), strides=(1, scalar), padding='same')(incoming_layer)
print('warning: code used that is not tested, see: all_cnn_bi_skippy.py --> pool_pad_connect()')
elif K.int_shape(incoming_layer)[1] >= K.int_shape(layer)[1] and K.int_shape(incoming_layer)[2] < K.int_shape(layer)[2]:
scalar = int(np.ceil(K.int_shape(incoming_layer)[1] / K.int_shape(layer)[1]))
incoming_layer = MaxPooling2D(pool_size=(scalar, 1), strides=(scalar, 1), padding='same')(incoming_layer)
print('warning: code used that is not tested, see: all_cnn_bi_skippy.py --> pool_pad_connect()')
else:
scalar_1 = int(np.ceil(K.int_shape(incoming_layer)[1] / K.int_shape(layer)[1]))
scalar_2 = int(np.ceil(K.int_shape(incoming_layer)[2] / K.int_shape(layer)[2]))
incoming_layer = MaxPooling2D(pool_size=(scalar_1, scalar_2), strides=(scalar_1, scalar_2), padding='same')(incoming_layer)
if dropout_val is not None:
incoming_layer = Dropout(dropout_val)(incoming_layer)
return self.pad_and_connect(layer, incoming_layer)
def start_skip(self,layer):
for j in range(len(self.skip_ints)):
if self.skip_ints_count[j] > 1 and self.startpoint(self.identity,self.skip_ints[j]):
self.skip_connections.append([layer,self.skip_ints_count[j],self.layer_num,None])
return layer
def end_skip(self,layer,filters,kernel,regulizer,act):
for j in range(len(self.skip_connections)):
self.skip_connections[j][1] -= 1
j = 0
prev_skip = -1
connected = False
while j < len(self.skip_connections):
if self.skip_connections[j][1] <= 0:
if prev_skip != self.skip_connections[j][2]:
ayer, self.skip_connections[j][0],self.skip_connections[j][3])
connected = True
prev_skip = self.skip_connections[j][2]
del self.skip_connections[j]
else:
j += 1
if connected and K.int_shape(layer)[3] > filters:
layer = Conv2D(filters, (1,1), padding='same', kernel_regularizer=l2(regulizer), bias_regularizer=l2(regulizer))(layer)
for j in range(len(self.skip_connections)):
self.skip_connections[j][1] += 1
return layer
def connect_skip(self,layer,filters,kernel,regulizer,act):
layer = self.end_skip(layer,filters,kernel,regulizer,act)
for j in range(len(self.skip_connections)):
self.skip_connections[j][1] -= 1
layer = self.start_skip(layer)
self.layer_num +=1
return layer
def CNN_conf(cfg,epochs=1,test=False,gpu_no=0,verbose=0,save_name='skippy_test_train_hist',data_augmentation=False):
batch_size = 100
num_classes = 100
num_predictions = 20
logfile = 'mnist-cnn.log'
savemodel = False
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
cfg_df = pd.DataFrame(cfg, index=[0])
y_train = keras.utils.to_categorical(y_train.flatten(), num_classes)
y_test = keras.utils.to_categorical(y_test.flatten(), num_classes)
skint_0 = 0
skint_1 = 0
skint_2 = 0
skint_3 = 0
skint_4 = 0
network_depth = cfg['stack_0'] + cfg['stack_1'] + cfg['stack_2'] + cfg['stack_3'] + cfg['stack_4'] + cfg['stack_5'] + cfg['stack_6']+7
if cfg['skstep_0'] > 1:
cnt = 0
skint_0 = 1
while cnt <= network_depth:
skint_0 = skint_0 << cfg['skstep_0']
skint_0 += 1
cnt += cfg['skstep_0']
skint_0 = skint_0 << cfg['skstart_0']
if cfg['skstep_1'] > 1:
cnt = 0
skint_1 = 1
while cnt <= network_depth:
skint_1 = skint_1 << cfg['skstep_1']
skint_1 += 1
cnt += cfg['skstep_1']
skint_1 = skint_1 << cfg['skstart_1']
if cfg['skstep_2'] > 1:
cnt = 0
skint_2 = 1
while cnt <= network_depth:
skint_2 = skint_2 << cfg['skstep_2']
skint_2 += 1
cnt += cfg['skstep_2']
skint_2 = skint_2 << cfg['skstart_2']
if cfg['skstep_3'] > 1:
cnt = 0
skint_3 = 1
while cnt <= network_depth:
skint_3 = skint_3 << cfg['skstep_3']
skint_3 += 1
cnt += cfg['skstep_3']
skint_3 = skint_3 << cfg['skstart_3']
if cfg['skstep_4'] > 1:
cnt = 0
skint_4 = 1
while cnt <= network_depth:
skint_4 = skint_4 << cfg['skstep_4']
skint_4 += 1
cnt += cfg['skstep_4']
skint_4 = skint_4 << cfg['skstart_4']
skip_manager = Skip_manager([skint_0,skint_1,skint_2,skint_3,skint_4],[cfg['skstep_0'],cfg['skstep_1'],cfg['skstep_2'],cfg['skstep_3'],cfg['skstep_4']])
input1 = keras.layers.Input(shape=(x_train.shape[1],x_train.shape[2],x_train.shape[3]))
layer=input1
filter_amount = x_train.shape[3]
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_0'],cfg['l2'],cfg['activation'])
layer = Dropout(cfg['dropout_0'],input_shape=x_train.shape[1:])(layer)
skip_manager.set_dropout(cfg['dropout_0'])
fg['stack_0']):
filter_amount = cfg['filters_0']
layer = Conv2D(cfg['filters_0'], (cfg['k_0'], cfg['k_0']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_0'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_0']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_1']
layer = Conv2D(cfg['filters_1'], (cfg['k_1'], cfg['k_1']), strides=(cfg['s_0'], cfg['s_0']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_0'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
layer = MaxPooling2D(pool_size=(cfg['k_1'], cfg['k_1']), strides=(cfg['s_0'], cfg['s_0']), padding='same')(layer)
layer = Dropout(cfg['dropout_1'])(layer)
skip_manager.set_dropout(cfg['dropout_1'])
for i in range(cfg['stack_1']):
filter_amount = cfg['filters_2']
layer = Conv2D(cfg['filters_2'], (cfg['k_2'], cfg['k_2']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_2'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_1']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_3']
layer = Conv2D(cfg['filters_3'], (cfg['k_3'], cfg['k_3']), strides=(cfg['s_1'], cfg['s_1']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_2'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
layer = MaxPooling2D(pool_size=(cfg['k_3'], cfg['k_3']), strides=(cfg['s_1'], cfg['s_1']), padding='same')(layer)
layer = Dropout(cfg['dropout_2'])(layer)
skip_manager.set_dropout(cfg['dropout_2'])
for i in range(cfg['stack_2']):
filter_amount = cfg['filters_4']
layer = Conv2D(cfg['filters_4'], (cfg['k_4'], cfg['k_4']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_4'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_2']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_5']
layer = Conv2D(cfg['filters_5'], (cfg['k_5'], cfg['k_5']), strides=(cfg['s_2'], cfg['s_2']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_4'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
layer = MaxPooling2D(pool_size=(cfg['k_5'], cfg['k_5']), strides=(cfg['s_2'], cfg['s_2']), padding='same')(layer)
layer = Dropout(cfg['dropout_3'])(layer)
skip_manager.set_dropout(cfg['dropout_3'])
for i in range(cfg['stack_3']):
filter_amount = cfg['filters_6']
layer = Conv2D(cfg['filters_6'], (cfg['k_6'], cfg['k_6']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_6'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_3']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_7']
layer = Conv2D(cfg['filters_7'], (cfg['k_7'], cfg['k_7']), strides=(cfg['s_3'], cfg['s_3']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_6'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
layer = MaxPooling2D(pool_size=(cfg['k_7'], cfg['k_7']), strides=(cfg['s_3'], cfg['s_3']), padding='same')(layer)
layer = Dropout(cfg['dropout_4'])(layer)
skip_manager.set_dropout(cfg['dropout_4'])
for i in range(cfg['stack_4']):
filter_amount = cfg['filters_8']
layer = Conv2D(cfg['filters_8'], (cfg['k_8'], cfg['k_8']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_8'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_4']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_9']
layer = Conv2D(cfg['filters_9'], (cfg['k_9'], cfg['k_9']), strides=(cfg['s_4'], cfg['s_4']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_8'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
layer = MaxPooling2D(pool_size=(cfg['k_9'], cfg['k_9']), strides=(cfg['s_4'], cfg['s_4']), padding='same')(layer)
layer = Dropout(cfg['dropout_5'])(layer)
skip_manager.set_dropout(cfg['dropout_5'])
for i in range(cfg['stack_5']):
filter_amount = cfg['filters_10']
layer = Conv2D(cfg['filters_10'], (cfg['k_10'], cfg['k_10']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_10'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_5']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_11']
layer = Conv2D(cfg['filters_11'], (cfg['k_11'], cfg['k_11']), strides=(cfg['s_5'], cfg['s_5']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_10'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
layer = MaxPooling2D(pool_size=(cfg['k_11'], cfg['k_11']), strides=(cfg['s_5'], cfg['s_5']), padding='same')(layer)
layer = Dropout(cfg['dropout_6'])(layer)
skip_manager.set_dropout(cfg['dropout_6'])
for i in range(cfg['stack_6']):
filter_amount = cfg['filters_12']
layer = Conv2D(cfg['filters_12'], (cfg['k_12'], cfg['k_12']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_12'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
if (cfg['stack_6']>0):
if not (cfg['max_pooling']):
filter_amount = cfg['filters_13']
layer = Conv2D(cfg['filters_13'], (cfg['k_13'], cfg['k_13']), strides=(cfg['s_6'], cfg['s_6']), padding='same', kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = skip_manager.connect_skip(layer,filter_amount,cfg['k_12'],cfg['l2'],cfg['activation'])
layer = Activation(cfg['activation'])(layer)
else:
layer = MaxPooling2D(pool_size=(cfg['k_13'], cfg['k_13']), strides=(cfg['s_6'], cfg['s_6']), padding='same')(layer)
layer = Dropout(cfg['dropout_7'])(layer)
skip_manager.set_dropout(cfg['dropout_7'])
['global_pooling']):
layer = GlobalAveragePooling2D()(layer)
layer = Dropout(cfg['dropout_7'])(layer)
else:
layer = Flatten()(layer)
if cfg['dense_size_0'] > 0:
layer = Dense(cfg['dense_size_0'], kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = Activation(cfg['activation'])(layer)
layer = Dropout(cfg['dropout_8'])(layer)
if cfg['dense_size_1'] > 0:
layer = Dense(cfg['dense_size_1'], kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
layer = Activation(cfg['activation'])(layer)
layer = Dropout(cfg['dropout_9'])(layer)
layer = Dense(num_classes, kernel_regularizer=l2(cfg['l2']), bias_regularizer=l2(cfg['l2']))(layer)
out = Activation(cfg['activ_dense'])(layer)
cfg['decay'] = cfg['lr'] / float(epochs)
def step_decay(epoch):
initial_lrate = cfg['lr']
drop = 0.1
epochs_drop = 20.0
lrate = initial_lrate * math.pow(drop,
math.floor((1+epoch)/epochs_drop))
return lrate
hist_func = TimedAccHistory()
callbacks = [hist_func]
if (cfg['step'] == True):
callbacks = [LearningRateScheduler(step_decay),hist_func]
cfg['decay'] = 0.
opt = keras.optimizers.SGD(lr=cfg['lr'], momentum=0.9, decay=cfg['decay'], nesterov=False)
model = keras.models.Model(inputs=input1, outputs=out)
model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])#TODO 'adam' moet zijn: opt
#model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
if test:
return model #TODO remove this, just for testing
#print("amount of parameters:")
#print(model.count_params())
#CHRIS test if gpu has enough memory
#nvmlInit()
#handle = nvmlDeviceGetHandleByIndex(int(gpu_no))
#meminfo = nvmlDeviceGetMemoryInfo(handle)
#max_size = meminfo.total #6689341440
#if meminfo.free/1024.**2 < 1.0:
# print('gpu is allready in use')
#nvmlShutdown()
#if model.count_params()*4*2 >= max_size:#CHRIS *4*2: 4 byte per parameter times 2 for backpropagation
#print('network too large for memory')
#return 1000000000.0*(model.count_params()*4*2/max_size), 5.0*(model.count_params()*4*2/max_size)
#max_size = 32828802 * 2 #CHRIS twice as large as RESnet-34-like implementation
#max_size = 129200130 #CHRIS twice as wide as RESnet-34-like implementation with batchsize=10, one network of this size was able to be ran on tritanium gpu
max_size = 130374394 #CHRIS twice as wide as RESnet-34-like implementation with batchsize=100, one network of this size was able to be ran on tritanium gpu
#if model.count_params() > max_size:
#print('network too large for implementation')
#return 1000000000.0*(model.count_params()/max_size), 5.0*(model.count_params()/max_size)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
if not data_augmentation:#CHRIS data augmentation handles normalization
x_train /= 255.
x_test /= 255.
if not data_augmentation:
print('Not using data augmentation.')
start = time.time()
hist = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=callbacks,
verbose=verbose,
shuffle=True)
stop = time.time()
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=cfg['featurewise_center'], # set input mean to 0 over the dataset
samplewise_center=cfg['samplewise_center'], # set each sample mean to 0
featurewise_std_normalization=cfg['featurewise_std_normalization'], # divide inputs by std of the dataset
samplewise_std_normalization=cfg['samplewise_std_normalization'], # divide each input by its std
zca_epsilon=cfg['zca_epsilon'],
zca_whitening=cfg['zca_whitening'], # apply ZCA whitening
rotation_range=cfg['rotation_range'], # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=cfg['width_shift_range'], # randomly shift images horizontally (fraction of total width)
height_shift_range=cfg['height_shift_range'], # randomly shift images vertically (fraction of total height)
shear_range=cfg['shear_range'],
zoom_range=cfg['zoom_range'],
channel_shift_range=cfg['channel_shift_range'],
fill_mode=cfg['fill_mode'],#('constant','nearest',reflect','wrap')
cval=cfg['cval'],
horizontal_flip=cfg['horizontal_flip'],
vertical_flip=cfg['vertical_flip'],
rescale=1/255.0)
datagen.fit(x_train)
start = time.time()
hist = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size), verbose=verbose,
callbacks=callbacks,
epochs=epochs, steps_per_epoch = len(x_train)/batch_size,
validation_data=(x_test, y_test))
stop = time.time()
timer = stop-start
eval_training_hist = [time.time(),hist.history['val_acc'], hist_func.timed]
with open(save_name + '_eval_train_hist.json', 'a') as outfile:
json.dump(eval_training_hist,outfile)
outfile.write('\n')
if savemodel:
model.save('best_model_mnist.h5')
maxval = max(hist.history['val_acc'])
.history['val_acc']) )
if logfile is not None:
log_file = logfile
cfg_df['perf'] = maxval
if os.path.isfile(log_file):
cfg_df.to_csv(log_file, mode='a', header=False, index=False)
else:
cfg_df.to_csv(log_file, mode='w', header=True, index=False)
return timer,loss
def test_skippy():
from mipego.mipego import Solution
from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
from keras.utils import plot_model
activation_fun = ["softmax"]
activation_fun_conv = ["elu","relu","tanh","sigmoid","selu"]
filters = OrdinalSpace([10, 100], 'filters') * 14
kernel_size = OrdinalSpace([1, 8], 'k') * 14
strides = OrdinalSpace([1, 5], 's') * 7
stack_sizes = OrdinalSpace([0, 4], 'stack') * 7
activation = NominalSpace(activation_fun_conv, "activation")
activation_dense = NominalSpace(activation_fun, "activ_dense")
step = NominalSpace([True, False], "step")
global_pooling = NominalSpace([True, False], "global_pooling")
skstart = OrdinalSpace([0, 50], 'skstart') * 5
skstep = OrdinalSpace([1, 50], 'skstep') * 5
max_pooling = NominalSpace([True, False], "max_pooling")
dense_size = OrdinalSpace([0,2000],'dense_size')*2
drop_out = ContinuousSpace([1e-5, .9], 'dropout') * 10
lr_rate = ContinuousSpace([1e-4, 1.0e-0], 'lr')
l2_regularizer = ContinuousSpace([1e-5, 1e-2], 'l2')
search_space = stack_sizes * strides * filters * kernel_size * activation * activation_dense * drop_out * lr_rate * l2_regularizer * step * global_pooling * skstart * skstep * max_pooling * dense_size
n_init_sample = 1
samples = search_space.sampling(n_init_sample)
print(samples)
var_names = search_space.var_name.tolist()
print(var_names)
stack_0 = 1
stack_1 = 6
stack_2 = 4
stack_3 = 4
stack_4 = 6
stack_5 = 6
stack_6 = 6
s_0=2 s_1=2
s_2=1
s_3=2
s_4=1
s_5=2
s_6=1
filters_0=64
filters_1=64
filters_2=64
filters_3=64
filters_4=128
filters_5=128
filters_6=128
filters_7=128
filters_8=256
filters_9=256
filters_10=256
filters_11=256
filters_12=512
filters_13=512
k_0=7
k_1=1
k_2=3
k_3=1
k_4=3
k_5=1
k_6=3
k_7=1
k_8=3
k_9=1
k_10=3
k_11=1
k_12=3
k_13=1
activation='relu'
activ_dense='softmax'
dropout_0=0.001
dropout_1=0.001
dropout_2=0.001
dropout_3=0.001
dropout_4=0.001
dropout_5=0.001
dropout_6=0.001
dropout_7=0.001
dropout_8=0.001
dropout_9=0.001
lr=0.01
l2=0.0001
step=False
global_pooling=True
om_en_om = 1
ranges = [stack_6,stack_5,stack_4,stack_3,stack_2,stack_1,stack_0]
for w in range(len(ranges)):
om_en_om = om_en_om << 1
for z in range(ranges[w]//2):
om_en_om = om_en_om << 2
om_en_om += 1
om_en_om = om_en_om << 1
skstart_0 = 1rt_3 = 1tart_4 = 1tep_0 = 2
skstep_1 = 1
skstep_2 = 1
skstep_3 = 1
skstep_4 = 1
max_pooling = True
dense_size_0 = 1000
dense_size_1 = 0
samples = [[stack_0, stack_1, stack_2, stack_3, stack_4, stack_5, stack_6, s_0, s_1, s_2, s_3, s_4, s_5, s_6, filters_0, filters_1, filters_2, filters_3, filters_4, filters_5, filters_6, filters_7, filters_8, filters_9, filters_10, filters_11, filters_12, filters_13,k_0, k_1, k_2, k_3, k_4, k_5, k_6, k_7, k_8, k_9, k_10, k_11, k_12, k_13, activation, activ_dense, dropout_0, dropout_1, dropout_2, dropout_3, dropout_4, dropout_5, dropout_6, dropout_7, dropout_8, dropout_9, lr, l2, step, global_pooling, skstart_0, skstart_1, skstart_2, skstart_3, skstart_4, skstep_0, skstep_1, skstep_2, skstep_3, skstep_4, max_pooling, dense_size_0, dense_size_1]]
X = [Solution(s, index=k, var_name=var_names) for k, s in enumerate(samples)]
vla = {'s_2': 7, 'lr': 0.005478541674651396, 'skstep_2': 4, 'dropout_8': 0.5440199827441856, 'k_12': 15, 'activ_dense': 'softmax', 'stack_4': 3, 'k_5': 2, 'dropout_4': 0.24617655948523018, 's_3': 6, 'k_11': 13, 'filters_10': 84, 'dropout_0': 0.0639815161048702, 'k_7': 13, 'filters_9': 178, 'k_1': 13, 'dropout_6': 0.1752239013431692, 'filters_7': 353, 'skstep_4': 6, 'skstart_2': 0, 'stack_0': 0, 'stack_5': 1, 's_5': 2, 'k_13': 6, 'filters_2': 110, 'filters_0': 248, 'skstart_1': 5, 'filters_6': 341, 'filters_8': 165, 'skstart_4': 2, 'l2': 0.0012874308061650037, 's_0': 9, 'global_pooling': False, 'stack_6': 1, 's_1': 2, 'skstep_0': 4, 'dropout_3': 0.495646008202597, 'skstart_0': 3, 'k_6': 2, 'filters_1': 61, 'dropout_2': 0.028121315386701783, 'stack_3': 2, 'filters_3': 299, 'stack_1': 3, 'max_pooling': True, 'filters_4': 259, 'filters_11': 207, 'k_3': 15, 'k_0': 15, 'dense_size_0': 1400, 'k_4': 10, 's_6': 5, 'dropout_9': 0.004273458743956573, 'skstep_3': 6, 'filters_5': 16, 's_4': 2, 'dropout_1': 0.42526328646019135, 'dense_size_1': 2990, 'k_10': 9, 'k_2': 4, 'skstep_1': 6, 'dropout_5': 0.3927105783290164, 'filters_12': 283, 'dropout_7': 0.01357058138235737, 'activation': 'selu', 'filters_13': 228, 'step': False, 'k_8': 2, 'k_9': 2, 'skstart_3': 1, 'stack_2': 3}
print(X)
print(X[0].to_dict())
test = False
if test:
model = CNN_conf(vla,test=test)
plot_model(model, to_file='model_skippy_test.png',show_shapes=True,show_layer_names=True)
model.summary()
print(model.count_params())
print(str(model.count_params() * 4 * 2 / 1024/1024/1024) + ' Gb')
else:
timer, loss = CNN_conf(vla,test=test,epochs= 2000,verbose=1)
print('timer, loss:')
print(timer, loss)
if __name__ == '__main__':
if len(sys.argv) > 2 and sys.argv[1] == '--cfg':
cfg = eval(sys.argv[2])
if len(sys.argv) > 3:
gpu = sys.argv[3]
epochs = int(sys.argv[4])
save_name = str(sys.argv[5])
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=str(gpu)
print(CNN_conf(cfg,gpu_no=gpu,epochs=epochs,save_name=save_name))
K.clear_session()
else:
print('switching to test mode')
test_skippy()
| true | true |
f7f5293f14eb9028aa5711e50850cddd657fcb6f | 999 | py | Python | yt/frontends/owls_subfind/tests/test_outputs.py | tukss/yt | 8bf6fce609cad3d4b291ebd94667019ab2e18377 | [
"BSD-3-Clause-Clear"
] | 1 | 2021-09-15T08:17:43.000Z | 2021-09-15T08:17:43.000Z | yt/frontends/owls_subfind/tests/test_outputs.py | tukss/yt | 8bf6fce609cad3d4b291ebd94667019ab2e18377 | [
"BSD-3-Clause-Clear"
] | 8 | 2020-04-02T16:51:49.000Z | 2022-01-11T14:12:44.000Z | yt/frontends/owls_subfind/tests/test_outputs.py | stonnes/yt | aad3cfa3b4ebab7838352ab467275a27c26ff363 | [
"BSD-3-Clause-Clear"
] | 2 | 2020-08-12T15:46:11.000Z | 2021-02-09T13:09:17.000Z | import os.path
from yt.testing import assert_equal
from yt.utilities.answer_testing.framework import (
FieldValuesTest,
data_dir_load,
requires_ds,
)
# from yt.frontends.owls_subfind.api import OWLSSubfindDataset
_fields = (
"particle_position_x",
"particle_position_y",
"particle_position_z",
"particle_mass",
)
# a dataset with empty files
g1 = "owls_fof_halos/groups_001/group_001.0.hdf5"
g8 = "owls_fof_halos/groups_008/group_008.0.hdf5"
@requires_ds(g8)
def test_fields_g8():
ds = data_dir_load(g8)
assert_equal(str(ds), os.path.basename(g8))
for field in _fields:
yield FieldValuesTest(g8, field, particle_type=True)
@requires_ds(g1)
def test_fields_g1():
ds = data_dir_load(g1)
assert_equal(str(ds), os.path.basename(g1))
for field in _fields:
yield FieldValuesTest(g1, field, particle_type=True)
# @requires_file(g1)
# def test_OWLSSubfindDataset():
# assert isinstance(data_dir_load(g1), OWLSSubfindDataset)
| 23.232558 | 62 | 0.731732 | import os.path
from yt.testing import assert_equal
from yt.utilities.answer_testing.framework import (
FieldValuesTest,
data_dir_load,
requires_ds,
)
_fields = (
"particle_position_x",
"particle_position_y",
"particle_position_z",
"particle_mass",
)
g1 = "owls_fof_halos/groups_001/group_001.0.hdf5"
g8 = "owls_fof_halos/groups_008/group_008.0.hdf5"
@requires_ds(g8)
def test_fields_g8():
ds = data_dir_load(g8)
assert_equal(str(ds), os.path.basename(g8))
for field in _fields:
yield FieldValuesTest(g8, field, particle_type=True)
@requires_ds(g1)
def test_fields_g1():
ds = data_dir_load(g1)
assert_equal(str(ds), os.path.basename(g1))
for field in _fields:
yield FieldValuesTest(g1, field, particle_type=True)
| true | true |
f7f529561e429b4438c1decf5c9d4e49d7444b54 | 447 | py | Python | fn_shodan/fn_shodan/util/config.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_shodan/fn_shodan/util/config.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_shodan/fn_shodan/util/config.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | # (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# -*- coding: utf-8 -*-
"""Generate a default configuration-file section for fn_shodan"""
def config_section_data():
"""
Produce add the default configuration section to app.config,
for fn_shodan when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_shodan]
shodan_apikey=<your-api-key>
#http_proxy=
#https_proxy=
"""
return config_data
| 23.526316 | 68 | 0.689038 |
def config_section_data():
config_data = u"""[fn_shodan]
shodan_apikey=<your-api-key>
#http_proxy=
#https_proxy=
"""
return config_data
| true | true |
f7f52b476212c27fc67926cd3a21de538041dd8d | 18,368 | py | Python | disnake/ui/view.py | MisileLab/disnake | c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa | [
"MIT"
] | null | null | null | disnake/ui/view.py | MisileLab/disnake | c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa | [
"MIT"
] | null | null | null | disnake/ui/view.py | MisileLab/disnake | c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
Optional,
Sequence,
TYPE_CHECKING,
Tuple,
)
from functools import partial
from itertools import groupby
import traceback
import asyncio
import sys
import time
import os
from .item import Item, ItemCallbackType
from ..enums import try_enum_to_int
from ..components import (
Component,
ActionRow as ActionRowComponent,
_component_factory,
Button as ButtonComponent,
SelectMenu as SelectComponent,
)
__all__ = ("View",)
if TYPE_CHECKING:
from ..interactions import MessageInteraction
from ..message import Message
from ..types.components import Component as ComponentPayload
from ..state import ConnectionState
def _walk_all_components(components: List[Component]) -> Iterator[Component]:
for item in components:
if isinstance(item, ActionRowComponent):
yield from item.children
else:
yield item
def _component_to_item(component: Component) -> Item:
if isinstance(component, ButtonComponent):
from .button import Button
return Button.from_component(component)
if isinstance(component, SelectComponent):
from .select import Select
return Select.from_component(component)
return Item.from_component(component)
class _ViewWeights:
__slots__ = ("weights",)
def __init__(self, children: List[Item]):
self.weights: List[int] = [0, 0, 0, 0, 0]
key = lambda i: sys.maxsize if i.row is None else i.row
children = sorted(children, key=key)
for row, group in groupby(children, key=key):
for item in group:
self.add_item(item)
def find_open_space(self, item: Item) -> int:
for index, weight in enumerate(self.weights):
if weight + item.width <= 5:
return index
raise ValueError("could not find open space for item")
def add_item(self, item: Item) -> None:
if item.row is not None:
total = self.weights[item.row] + item.width
if total > 5:
raise ValueError(f"item would not fit at row {item.row} ({total} > 5 width)")
self.weights[item.row] = total
item._rendered_row = item.row
else:
index = self.find_open_space(item)
self.weights[index] += item.width
item._rendered_row = index
def remove_item(self, item: Item) -> None:
if item._rendered_row is not None:
self.weights[item._rendered_row] -= item.width
item._rendered_row = None
def clear(self) -> None:
self.weights = [0, 0, 0, 0, 0]
class View:
"""Represents a UI view.
This object must be inherited to create a UI within Discord.
.. versionadded:: 2.0
Parameters
-----------
timeout: Optional[:class:`float`]
Timeout in seconds from last interaction with the UI before no longer accepting input.
If ``None`` then there is no timeout.
Attributes
------------
timeout: Optional[:class:`float`]
Timeout from last interaction with the UI before no longer accepting input.
If ``None`` then there is no timeout.
children: List[:class:`Item`]
The list of children attached to this view.
"""
__discord_ui_view__: ClassVar[bool] = True
__view_children_items__: ClassVar[List[ItemCallbackType]] = []
def __init_subclass__(cls) -> None:
children: List[ItemCallbackType] = []
for base in reversed(cls.__mro__):
for member in base.__dict__.values():
if hasattr(member, "__discord_ui_model_type__"):
children.append(member)
if len(children) > 25:
raise TypeError("View cannot have more than 25 children")
cls.__view_children_items__ = children
def __init__(self, *, timeout: Optional[float] = 180.0):
self.timeout = timeout
self.children: List[Item] = []
for func in self.__view_children_items__:
item: Item = func.__discord_ui_model_type__(**func.__discord_ui_model_kwargs__)
item.callback = partial(func, self, item)
item._view = self
setattr(self, func.__name__, item)
self.children.append(item)
self.__weights = _ViewWeights(self.children)
loop = asyncio.get_running_loop()
self.id: str = os.urandom(16).hex()
self.__cancel_callback: Optional[Callable[[View], None]] = None
self.__timeout_expiry: Optional[float] = None
self.__timeout_task: Optional[asyncio.Task[None]] = None
self.__stopped: asyncio.Future[bool] = loop.create_future()
def __repr__(self) -> str:
return f"<{self.__class__.__name__} timeout={self.timeout} children={len(self.children)}>"
async def __timeout_task_impl(self) -> None:
while True:
# Guard just in case someone changes the value of the timeout at runtime
if self.timeout is None:
return
if self.__timeout_expiry is None:
return self._dispatch_timeout()
# Check if we've elapsed our currently set timeout
now = time.monotonic()
if now >= self.__timeout_expiry:
return self._dispatch_timeout()
# Wait N seconds to see if timeout data has been refreshed
await asyncio.sleep(self.__timeout_expiry - now)
def to_components(self) -> List[Dict[str, Any]]:
def key(item: Item) -> int:
return item._rendered_row or 0
children = sorted(self.children, key=key)
components: List[Dict[str, Any]] = []
for _, group in groupby(children, key=key):
children = [item.to_component_dict() for item in group]
if not children:
continue
components.append(
{
"type": 1,
"components": children,
}
)
return components
@classmethod
def from_message(cls, message: Message, /, *, timeout: Optional[float] = 180.0) -> View:
"""Converts a message's components into a :class:`View`.
The :attr:`.Message.components` of a message are read-only
and separate types from those in the ``disnake.ui`` namespace.
In order to modify and edit message components they must be
converted into a :class:`View` first.
Parameters
-----------
message: :class:`disnake.Message`
The message with components to convert into a view.
timeout: Optional[:class:`float`]
The timeout of the converted view.
Returns
--------
:class:`View`
The converted view. This always returns a :class:`View` and not
one of its subclasses.
"""
view = View(timeout=timeout)
for component in _walk_all_components(message.components):
view.add_item(_component_to_item(component))
return view
@property
def _expires_at(self) -> Optional[float]:
if self.timeout:
return time.monotonic() + self.timeout
return None
def add_item(self, item: Item) -> None:
"""Adds an item to the view.
Parameters
-----------
item: :class:`Item`
The item to add to the view.
Raises
--------
TypeError
An :class:`Item` was not passed.
ValueError
Maximum number of children has been exceeded (25)
or the row the item is trying to be added to is full.
"""
if len(self.children) > 25:
raise ValueError("maximum number of children exceeded")
if not isinstance(item, Item):
raise TypeError(f"expected Item not {item.__class__!r}")
self.__weights.add_item(item)
item._view = self
self.children.append(item)
def remove_item(self, item: Item) -> None:
"""Removes an item from the view.
Parameters
-----------
item: :class:`Item`
The item to remove from the view.
"""
try:
self.children.remove(item)
except ValueError:
pass
else:
self.__weights.remove_item(item)
def clear_items(self) -> None:
"""Removes all items from the view."""
self.children.clear()
self.__weights.clear()
async def interaction_check(self, interaction: MessageInteraction) -> bool:
"""|coro|
A callback that is called when an interaction happens within the view
that checks whether the view should process item callbacks for the interaction.
This is useful to override if, for example, you want to ensure that the
interaction author is a given user.
The default implementation of this returns ``True``.
.. note::
If an exception occurs within the body then the check
is considered a failure and :meth:`on_error` is called.
Parameters
-----------
interaction: :class:`~disnake.MessageInteraction`
The interaction that occurred.
Returns
---------
:class:`bool`
Whether the view children's callbacks should be called.
"""
return True
async def on_timeout(self) -> None:
"""|coro|
A callback that is called when a view's timeout elapses without being explicitly stopped.
"""
pass
async def on_error(self, error: Exception, item: Item, interaction: MessageInteraction) -> None:
"""|coro|
A callback that is called when an item's callback or :meth:`interaction_check`
fails with an error.
The default implementation prints the traceback to stderr.
Parameters
-----------
error: :class:`Exception`
The exception that was raised.
item: :class:`Item`
The item that failed the dispatch.
interaction: :class:`~disnake.MessageInteraction`
The interaction that led to the failure.
"""
print(f"Ignoring exception in view {self} for item {item}:", file=sys.stderr)
traceback.print_exception(error.__class__, error, error.__traceback__, file=sys.stderr)
async def _scheduled_task(self, item: Item, interaction: MessageInteraction):
try:
if self.timeout:
self.__timeout_expiry = time.monotonic() + self.timeout
allow = await self.interaction_check(interaction)
if not allow:
return
await item.callback(interaction)
except Exception as e:
return await self.on_error(e, item, interaction)
def _start_listening_from_store(self, store: ViewStore) -> None:
self.__cancel_callback = partial(store.remove_view)
if self.timeout:
loop = asyncio.get_running_loop()
if self.__timeout_task is not None:
self.__timeout_task.cancel()
self.__timeout_expiry = time.monotonic() + self.timeout
self.__timeout_task = loop.create_task(self.__timeout_task_impl())
def _dispatch_timeout(self):
if self.__stopped.done():
return
self.__stopped.set_result(True)
asyncio.create_task(self.on_timeout(), name=f"disnake-ui-view-timeout-{self.id}")
def _dispatch_item(self, item: Item, interaction: MessageInteraction):
if self.__stopped.done():
return
asyncio.create_task(
self._scheduled_task(item, interaction), name=f"disnake-ui-view-dispatch-{self.id}"
)
def refresh(self, components: List[Component]):
# This is pretty hacky at the moment
# fmt: off
old_state: Dict[Tuple[int, str], Item] = {
(item.type.value, item.custom_id): item # type: ignore
for item in self.children
if item.is_dispatchable()
}
# fmt: on
children: List[Item] = []
for component in _walk_all_components(components):
try:
older = old_state[(component.type.value, component.custom_id)] # type: ignore
except (KeyError, AttributeError):
children.append(_component_to_item(component))
else:
older.refresh_component(component)
children.append(older)
self.children = children
def stop(self) -> None:
"""Stops listening to interaction events from this view.
This operation cannot be undone.
"""
if not self.__stopped.done():
self.__stopped.set_result(False)
self.__timeout_expiry = None
if self.__timeout_task is not None:
self.__timeout_task.cancel()
self.__timeout_task = None
if self.__cancel_callback:
self.__cancel_callback(self)
self.__cancel_callback = None
def is_finished(self) -> bool:
"""Whether the view has finished interacting.
:return type: :class:`bool`
"""
return self.__stopped.done()
def is_dispatching(self) -> bool:
"""Whether the view has been added for dispatching purposes.
:return type: :class:`bool`
"""
return self.__cancel_callback is not None
def is_persistent(self) -> bool:
"""Whether the view is set up as persistent.
A persistent view has all their components with a set ``custom_id`` and
a :attr:`timeout` set to ``None``.
:return type: :class:`bool`
"""
return self.timeout is None and all(item.is_persistent() for item in self.children)
async def wait(self) -> bool:
"""Waits until the view has finished interacting.
A view is considered finished when :meth:`stop` is called
or it times out.
Returns
--------
:class:`bool`
If ``True``, then the view timed out. If ``False`` then
the view finished normally.
"""
return await self.__stopped
class ViewStore:
def __init__(self, state: ConnectionState):
# (component_type, message_id, custom_id): (View, Item)
self._views: Dict[Tuple[int, Optional[int], str], Tuple[View, Item]] = {}
# message_id: View
self._synced_message_views: Dict[int, View] = {}
self._state: ConnectionState = state
@property
def persistent_views(self) -> Sequence[View]:
# fmt: off
views = {
view.id: view
for (_, (view, _)) in self._views.items()
if view.is_persistent()
}
# fmt: on
return list(views.values())
def __verify_integrity(self):
to_remove: List[Tuple[int, Optional[int], str]] = []
for (k, (view, _)) in self._views.items():
if view.is_finished():
to_remove.append(k)
for k in to_remove:
del self._views[k]
def add_view(self, view: View, message_id: Optional[int] = None):
self.__verify_integrity()
view._start_listening_from_store(self)
for item in view.children:
if item.is_dispatchable():
self._views[(item.type.value, message_id, item.custom_id)] = (view, item) # type: ignore
if message_id is not None:
self._synced_message_views[message_id] = view
def remove_view(self, view: View):
for item in view.children:
if item.is_dispatchable():
self._views.pop((item.type.value, item.custom_id), None) # type: ignore
for key, value in self._synced_message_views.items():
if value.id == view.id:
del self._synced_message_views[key]
break
def dispatch(self, interaction: MessageInteraction):
self.__verify_integrity()
message_id: Optional[int] = interaction.message and interaction.message.id
component_type = try_enum_to_int(interaction.data.component_type)
custom_id = interaction.data.custom_id
key = (component_type, message_id, custom_id)
# Fallback to None message_id searches in case a persistent view
# was added without an associated message_id
value = self._views.get(key) or self._views.get((component_type, None, custom_id))
if value is None:
return
view, item = value
item.refresh_state(interaction)
view._dispatch_item(item, interaction)
def is_message_tracked(self, message_id: int):
return message_id in self._synced_message_views
def remove_message_tracking(self, message_id: int) -> Optional[View]:
return self._synced_message_views.pop(message_id, None)
def update_from_message(self, message_id: int, components: List[ComponentPayload]):
# pre-req: is_message_tracked == true
view = self._synced_message_views[message_id]
view.refresh([_component_factory(d) for d in components])
| 33.457195 | 105 | 0.621298 |
from __future__ import annotations
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
Optional,
Sequence,
TYPE_CHECKING,
Tuple,
)
from functools import partial
from itertools import groupby
import traceback
import asyncio
import sys
import time
import os
from .item import Item, ItemCallbackType
from ..enums import try_enum_to_int
from ..components import (
Component,
ActionRow as ActionRowComponent,
_component_factory,
Button as ButtonComponent,
SelectMenu as SelectComponent,
)
__all__ = ("View",)
if TYPE_CHECKING:
from ..interactions import MessageInteraction
from ..message import Message
from ..types.components import Component as ComponentPayload
from ..state import ConnectionState
def _walk_all_components(components: List[Component]) -> Iterator[Component]:
for item in components:
if isinstance(item, ActionRowComponent):
yield from item.children
else:
yield item
def _component_to_item(component: Component) -> Item:
if isinstance(component, ButtonComponent):
from .button import Button
return Button.from_component(component)
if isinstance(component, SelectComponent):
from .select import Select
return Select.from_component(component)
return Item.from_component(component)
class _ViewWeights:
__slots__ = ("weights",)
def __init__(self, children: List[Item]):
self.weights: List[int] = [0, 0, 0, 0, 0]
key = lambda i: sys.maxsize if i.row is None else i.row
children = sorted(children, key=key)
for row, group in groupby(children, key=key):
for item in group:
self.add_item(item)
def find_open_space(self, item: Item) -> int:
for index, weight in enumerate(self.weights):
if weight + item.width <= 5:
return index
raise ValueError("could not find open space for item")
def add_item(self, item: Item) -> None:
if item.row is not None:
total = self.weights[item.row] + item.width
if total > 5:
raise ValueError(f"item would not fit at row {item.row} ({total} > 5 width)")
self.weights[item.row] = total
item._rendered_row = item.row
else:
index = self.find_open_space(item)
self.weights[index] += item.width
item._rendered_row = index
def remove_item(self, item: Item) -> None:
if item._rendered_row is not None:
self.weights[item._rendered_row] -= item.width
item._rendered_row = None
def clear(self) -> None:
self.weights = [0, 0, 0, 0, 0]
class View:
__discord_ui_view__: ClassVar[bool] = True
__view_children_items__: ClassVar[List[ItemCallbackType]] = []
def __init_subclass__(cls) -> None:
children: List[ItemCallbackType] = []
for base in reversed(cls.__mro__):
for member in base.__dict__.values():
if hasattr(member, "__discord_ui_model_type__"):
children.append(member)
if len(children) > 25:
raise TypeError("View cannot have more than 25 children")
cls.__view_children_items__ = children
def __init__(self, *, timeout: Optional[float] = 180.0):
self.timeout = timeout
self.children: List[Item] = []
for func in self.__view_children_items__:
item: Item = func.__discord_ui_model_type__(**func.__discord_ui_model_kwargs__)
item.callback = partial(func, self, item)
item._view = self
setattr(self, func.__name__, item)
self.children.append(item)
self.__weights = _ViewWeights(self.children)
loop = asyncio.get_running_loop()
self.id: str = os.urandom(16).hex()
self.__cancel_callback: Optional[Callable[[View], None]] = None
self.__timeout_expiry: Optional[float] = None
self.__timeout_task: Optional[asyncio.Task[None]] = None
self.__stopped: asyncio.Future[bool] = loop.create_future()
def __repr__(self) -> str:
return f"<{self.__class__.__name__} timeout={self.timeout} children={len(self.children)}>"
async def __timeout_task_impl(self) -> None:
while True:
if self.timeout is None:
return
if self.__timeout_expiry is None:
return self._dispatch_timeout()
now = time.monotonic()
if now >= self.__timeout_expiry:
return self._dispatch_timeout()
# Wait N seconds to see if timeout data has been refreshed
await asyncio.sleep(self.__timeout_expiry - now)
def to_components(self) -> List[Dict[str, Any]]:
def key(item: Item) -> int:
return item._rendered_row or 0
children = sorted(self.children, key=key)
components: List[Dict[str, Any]] = []
for _, group in groupby(children, key=key):
children = [item.to_component_dict() for item in group]
if not children:
continue
components.append(
{
"type": 1,
"components": children,
}
)
return components
@classmethod
def from_message(cls, message: Message, /, *, timeout: Optional[float] = 180.0) -> View:
view = View(timeout=timeout)
for component in _walk_all_components(message.components):
view.add_item(_component_to_item(component))
return view
@property
def _expires_at(self) -> Optional[float]:
if self.timeout:
return time.monotonic() + self.timeout
return None
def add_item(self, item: Item) -> None:
if len(self.children) > 25:
raise ValueError("maximum number of children exceeded")
if not isinstance(item, Item):
raise TypeError(f"expected Item not {item.__class__!r}")
self.__weights.add_item(item)
item._view = self
self.children.append(item)
def remove_item(self, item: Item) -> None:
try:
self.children.remove(item)
except ValueError:
pass
else:
self.__weights.remove_item(item)
def clear_items(self) -> None:
self.children.clear()
self.__weights.clear()
async def interaction_check(self, interaction: MessageInteraction) -> bool:
return True
async def on_timeout(self) -> None:
pass
async def on_error(self, error: Exception, item: Item, interaction: MessageInteraction) -> None:
print(f"Ignoring exception in view {self} for item {item}:", file=sys.stderr)
traceback.print_exception(error.__class__, error, error.__traceback__, file=sys.stderr)
async def _scheduled_task(self, item: Item, interaction: MessageInteraction):
try:
if self.timeout:
self.__timeout_expiry = time.monotonic() + self.timeout
allow = await self.interaction_check(interaction)
if not allow:
return
await item.callback(interaction)
except Exception as e:
return await self.on_error(e, item, interaction)
def _start_listening_from_store(self, store: ViewStore) -> None:
self.__cancel_callback = partial(store.remove_view)
if self.timeout:
loop = asyncio.get_running_loop()
if self.__timeout_task is not None:
self.__timeout_task.cancel()
self.__timeout_expiry = time.monotonic() + self.timeout
self.__timeout_task = loop.create_task(self.__timeout_task_impl())
def _dispatch_timeout(self):
if self.__stopped.done():
return
self.__stopped.set_result(True)
asyncio.create_task(self.on_timeout(), name=f"disnake-ui-view-timeout-{self.id}")
def _dispatch_item(self, item: Item, interaction: MessageInteraction):
if self.__stopped.done():
return
asyncio.create_task(
self._scheduled_task(item, interaction), name=f"disnake-ui-view-dispatch-{self.id}"
)
def refresh(self, components: List[Component]):
# This is pretty hacky at the moment
# fmt: off
old_state: Dict[Tuple[int, str], Item] = {
(item.type.value, item.custom_id): item # type: ignore
for item in self.children
if item.is_dispatchable()
}
# fmt: on
children: List[Item] = []
for component in _walk_all_components(components):
try:
older = old_state[(component.type.value, component.custom_id)] # type: ignore
except (KeyError, AttributeError):
children.append(_component_to_item(component))
else:
older.refresh_component(component)
children.append(older)
self.children = children
def stop(self) -> None:
if not self.__stopped.done():
self.__stopped.set_result(False)
self.__timeout_expiry = None
if self.__timeout_task is not None:
self.__timeout_task.cancel()
self.__timeout_task = None
if self.__cancel_callback:
self.__cancel_callback(self)
self.__cancel_callback = None
def is_finished(self) -> bool:
return self.__stopped.done()
def is_dispatching(self) -> bool:
return self.__cancel_callback is not None
def is_persistent(self) -> bool:
return self.timeout is None and all(item.is_persistent() for item in self.children)
async def wait(self) -> bool:
return await self.__stopped
class ViewStore:
def __init__(self, state: ConnectionState):
# (component_type, message_id, custom_id): (View, Item)
self._views: Dict[Tuple[int, Optional[int], str], Tuple[View, Item]] = {}
# message_id: View
self._synced_message_views: Dict[int, View] = {}
self._state: ConnectionState = state
@property
def persistent_views(self) -> Sequence[View]:
# fmt: off
views = {
view.id: view
for (_, (view, _)) in self._views.items()
if view.is_persistent()
}
# fmt: on
return list(views.values())
def __verify_integrity(self):
to_remove: List[Tuple[int, Optional[int], str]] = []
for (k, (view, _)) in self._views.items():
if view.is_finished():
to_remove.append(k)
for k in to_remove:
del self._views[k]
def add_view(self, view: View, message_id: Optional[int] = None):
self.__verify_integrity()
view._start_listening_from_store(self)
for item in view.children:
if item.is_dispatchable():
self._views[(item.type.value, message_id, item.custom_id)] = (view, item) # type: ignore
if message_id is not None:
self._synced_message_views[message_id] = view
def remove_view(self, view: View):
for item in view.children:
if item.is_dispatchable():
self._views.pop((item.type.value, item.custom_id), None) # type: ignore
for key, value in self._synced_message_views.items():
if value.id == view.id:
del self._synced_message_views[key]
break
def dispatch(self, interaction: MessageInteraction):
self.__verify_integrity()
message_id: Optional[int] = interaction.message and interaction.message.id
component_type = try_enum_to_int(interaction.data.component_type)
custom_id = interaction.data.custom_id
key = (component_type, message_id, custom_id)
# Fallback to None message_id searches in case a persistent view
# was added without an associated message_id
value = self._views.get(key) or self._views.get((component_type, None, custom_id))
if value is None:
return
view, item = value
item.refresh_state(interaction)
view._dispatch_item(item, interaction)
def is_message_tracked(self, message_id: int):
return message_id in self._synced_message_views
def remove_message_tracking(self, message_id: int) -> Optional[View]:
return self._synced_message_views.pop(message_id, None)
def update_from_message(self, message_id: int, components: List[ComponentPayload]):
# pre-req: is_message_tracked == true
view = self._synced_message_views[message_id]
view.refresh([_component_factory(d) for d in components])
| true | true |
f7f52b88c58cff8b4319ac9bb9d78e9e62914b3c | 1,144 | py | Python | Python/AOC17_day1.2.py | BhavyaLight/fuzzy-AOC17 | 70c734a5a3ded4b684507a362e864a276b5d2d8b | [
"MIT"
] | null | null | null | Python/AOC17_day1.2.py | BhavyaLight/fuzzy-AOC17 | 70c734a5a3ded4b684507a362e864a276b5d2d8b | [
"MIT"
] | null | null | null | Python/AOC17_day1.2.py | BhavyaLight/fuzzy-AOC17 | 70c734a5a3ded4b684507a362e864a276b5d2d8b | [
"MIT"
] | null | null | null | ###
# Now, instead of considering the next digit, it wants you to consider the digit halfway around the circular list.
# That is, if your list contains 10 items, only include a digit in your sum if the digit 10/2 = 5 steps forward\
# matches it.
# Fortunately, your list has an even number of elements.
#
# ############## Sample Test Cases ######################
# 1212 -> 6
# 12341234 ->20
# 123145 -> 2
# 0 -> 0
# 70 -> 0
# 4444 -> 16
# 12345674 -> 8
###
input_number = int(input("Enter a number:-"))
sum_of_same_digits = 0
copy_input_number = input_number
number_of_digits = 0
# Get total number of digits O(n)
while copy_input_number > 0:
number_of_digits += 1
copy_input_number //= 10
forward_steps = 10 ** (number_of_digits // 2)
# Spread out the circular list 1234 as 123412
double_digit = int(str(input_number)+str(input_number // forward_steps))
# Calculate the sum
while input_number > 0:
current_digit = input_number % 10
if current_digit == double_digit % 10:
sum_of_same_digits += current_digit
input_number //= 10
double_digit //= 10
print("Your final answer is:\n" + str(sum_of_same_digits)) | 28.6 | 114 | 0.683566 | true | true | |
f7f52d799ff6860aafff8dd920ab81080de560a1 | 2,669 | py | Python | scripts/clean.py | nate26/alaska | 17d140f100f34a8dc100188d7a357a7378b2842e | [
"WTFPL"
] | 1 | 2021-07-13T23:40:50.000Z | 2021-07-13T23:40:50.000Z | scripts/clean.py | nate26/alaska | 17d140f100f34a8dc100188d7a357a7378b2842e | [
"WTFPL"
] | null | null | null | scripts/clean.py | nate26/alaska | 17d140f100f34a8dc100188d7a357a7378b2842e | [
"WTFPL"
] | 1 | 2020-06-28T20:23:20.000Z | 2020-06-28T20:23:20.000Z | #!/usr/bin/env python3
import os
import sys
import shutil
import hashlib
import glob, os.path
ROM_NAME = "test.gba"
SRC = './src'
GRAPHICS = './graphics'
ASSEMBLY = './assembly'
STRINGS = './strings'
BUILD = './build'
def PutFileNameInRightFormat(filename):
filename = filename.split('/')
newFileName = ""
if filename[0].upper() == "SRC":
newFileName = SRC
elif filename[0].upper() == "ASSEMBLY":
newFileName = ASSEMBLY
elif filename[0].upper() == "GRAPHICS":
newFileName = GRAPHICS
elif filename[0].upper() == "STRINGS":
newFileName = STRINGS
for i in range(1, len(filename)):
newFileName += "\\" + filename[i]
return newFileName
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
os.remove(ROM_NAME)
except:
pass
if len(sys.argv) > 1:
#Try removing specific file only.
if len(sys.argv) > 2 and sys.argv[1].upper() == 'FILE':
try:
filename = PutFileNameInRightFormat(sys.argv[2])
print(filename)
m = hashlib.md5()
m.update(filename.encode())
newfilename = os.path.join(BUILD, m.hexdigest() + '.o')
try:
os.remove(newfilename)
except FileNotFoundError:
os.remove(BUILD + "\\IMG_" + newfilename.split('\\')[1])
print('"Build for ' + sys.argv[2] + '" removed successfully!')
sys.exit(1)
except:
print('Error: Could not remove build for file "' + sys.argv[2] + '".')
sys.exit(1)
#Don't remove generated repoints if the user only wants to remove the build.
elif sys.argv[1].upper() != 'BUILD' and sys.argv[1].upper() != 'GRAPHICS':
try:
os.remove('generatedrepoints')
except:
pass
else:
try:
os.remove('generatedrepoints')
except:
pass
try:
os.remove('offsets.ini')
except:
pass
if (len(sys.argv) > 1) and sys.argv[1].upper() == 'ALL':
try:
shutil.rmtree('build/')
except:
pass
os.chdir("graphics")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if file.endswith('.h'):
os.remove(os.path.join(root, file))
elif (len(sys.argv) > 1) and sys.argv[1].upper() == 'GRAPHICS':
os.chdir("graphics")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if file.endswith('.h'):
os.remove(os.path.join(root, file))
os.chdir(dir_path.split('\\scripts')[0])
os.chdir("build")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if file.startswith('IMG_'): #Don't remove image file
os.remove(os.path.join(root, file))
else:
os.chdir("build")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if not file.startswith('IMG_'): #Don't remove image file
os.remove(os.path.join(root, file))
print("Directory cleaned!") | 24.045045 | 77 | 0.65193 |
import os
import sys
import shutil
import hashlib
import glob, os.path
ROM_NAME = "test.gba"
SRC = './src'
GRAPHICS = './graphics'
ASSEMBLY = './assembly'
STRINGS = './strings'
BUILD = './build'
def PutFileNameInRightFormat(filename):
filename = filename.split('/')
newFileName = ""
if filename[0].upper() == "SRC":
newFileName = SRC
elif filename[0].upper() == "ASSEMBLY":
newFileName = ASSEMBLY
elif filename[0].upper() == "GRAPHICS":
newFileName = GRAPHICS
elif filename[0].upper() == "STRINGS":
newFileName = STRINGS
for i in range(1, len(filename)):
newFileName += "\\" + filename[i]
return newFileName
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
os.remove(ROM_NAME)
except:
pass
if len(sys.argv) > 1:
if len(sys.argv) > 2 and sys.argv[1].upper() == 'FILE':
try:
filename = PutFileNameInRightFormat(sys.argv[2])
print(filename)
m = hashlib.md5()
m.update(filename.encode())
newfilename = os.path.join(BUILD, m.hexdigest() + '.o')
try:
os.remove(newfilename)
except FileNotFoundError:
os.remove(BUILD + "\\IMG_" + newfilename.split('\\')[1])
print('"Build for ' + sys.argv[2] + '" removed successfully!')
sys.exit(1)
except:
print('Error: Could not remove build for file "' + sys.argv[2] + '".')
sys.exit(1)
elif sys.argv[1].upper() != 'BUILD' and sys.argv[1].upper() != 'GRAPHICS':
try:
os.remove('generatedrepoints')
except:
pass
else:
try:
os.remove('generatedrepoints')
except:
pass
try:
os.remove('offsets.ini')
except:
pass
if (len(sys.argv) > 1) and sys.argv[1].upper() == 'ALL':
try:
shutil.rmtree('build/')
except:
pass
os.chdir("graphics")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if file.endswith('.h'):
os.remove(os.path.join(root, file))
elif (len(sys.argv) > 1) and sys.argv[1].upper() == 'GRAPHICS':
os.chdir("graphics")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if file.endswith('.h'):
os.remove(os.path.join(root, file))
os.chdir(dir_path.split('\\scripts')[0])
os.chdir("build")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if file.startswith('IMG_'): #Don't remove image file
os.remove(os.path.join(root, file))
else:
os.chdir("build")
for root, dirs, files in os.walk(".", topdown = False):
for file in files:
if not file.startswith('IMG_'):
os.remove(os.path.join(root, file))
print("Directory cleaned!") | true | true |
f7f52d8403d6acac984479bd417c204fb5be5108 | 13,450 | py | Python | seq2seq-chatbot/vocabulary.py | JEMurcia/Seq2Seq_CornellMovie | a1e2b12089734ae2309aaa4408cd0bd63b9131a0 | [
"MIT"
] | 104 | 2018-03-28T20:30:25.000Z | 2022-02-18T19:43:21.000Z | seq2seq-chatbot/vocabulary.py | JEMurcia/Seq2Seq_CornellMovie | a1e2b12089734ae2309aaa4408cd0bd63b9131a0 | [
"MIT"
] | 37 | 2018-04-16T15:39:17.000Z | 2021-05-29T11:28:26.000Z | seq2seq-chatbot/vocabulary.py | JEMurcia/Seq2Seq_CornellMovie | a1e2b12089734ae2309aaa4408cd0bd63b9131a0 | [
"MIT"
] | 63 | 2018-05-18T09:52:20.000Z | 2021-07-26T08:11:17.000Z | """
Vocabulary class
"""
import re
class Vocabulary(object):
"""Class representing a chatbot vocabulary.
The Vocabulary class is responsible for encoding words into integers and decoding integers into words.
The number of times each word occurs in the source corpus is also tracked for visualization purposes.
Special tokens that exist in every vocabulary instance:
- PAD ("<PAD>"): The token used for extra sequence timesteps in a batch
- SOS ("<SOS>"): Start Of Sequence token is used as the input of the first decoder timestep
- EOS ("<EOS>"): End Of Sequence token is used to signal that the decoder should stop generating a sequence.
It is also used to separate conversation history (context) questions prepended to the current input question.
- OUT ("<OUT>"): If a word does not exist in the vocabulary, it is substituted with this token.
"""
SHARED_VOCAB_FILENAME = "shared_vocab.tsv"
INPUT_VOCAB_FILENAME = "input_vocab.tsv"
OUTPUT_VOCAB_FILENAME = "output_vocab.tsv"
PAD = "<PAD>"
SOS = "<SOS>"
EOS = "<EOS>"
OUT = "<OUT>"
special_tokens = [PAD, SOS, EOS, OUT]
def __init__(self, external_embeddings = None):
"""Initializes the Vocabulary instance in an non-compiled state.
Compile must be called before the Vocab instance can be used to integer encode/decode words.
Args:
external_embeddings: An optional 2d numpy array (matrix) containing external embedding vectors
"""
self._word2count = {}
self._words2int = {}
self._ints2word = {}
self._compiled = False
self.external_embeddings = external_embeddings
def load_word(self, word, word_int, count = 1):
"""Load a word and its integer encoding into the vocabulary instance.
Args:
word: The word to load.
word_int: The integer encoding of the word to load.
count: (Optional) The number of times the word occurs in the source corpus.
"""
self._validate_compile(False)
self._word2count[word] = count
self._words2int[word] = word_int
self._ints2word[word_int] = word
def add_words(self, words):
"""Add a sequence of words to the vocabulary instance.
If a word occurs more than once, its count will be incremented accordingly.
Args:
words: The sequence of words to add.
"""
self._validate_compile(False)
for i in range(len(words)):
word = words[i]
if word in self._word2count:
self._word2count[word] += 1
else:
self._word2count[word] = 1
def compile(self, vocab_threshold = 1, loading = False):
"""Compile the internal lookup dictionaries that enable words to be integer encoded / decoded.
Args:
vocab_threshold: Minimum number of times any word must appear within word_sequences in order to be included in the vocabulary.
This is useful for filtering out rarely used words in order to reduce the size of the vocabulary
(which consequently reduces the size of the model's embedding matrices & reduces the dimensionality of the output softmax)
This value is ignored if loading is True.
loading: Indicates if the vocabulary is being loaded from disk, in which case the compilation is already done and this method
only needs to set the flag to indicate as such.
"""
self._validate_compile(False)
if not loading:
#Add the special tokens to the lookup dictionaries
for i, special_token in enumerate(Vocabulary.special_tokens):
self._words2int[special_token] = i
self._ints2word[i] = special_token
#Add the words in _word2count to the lookup dictionaries if their count meets the threshold.
#Any words that don't meet the threshold are removed.
word_int = len(self._words2int)
for word, count in sorted(self._word2count.items()):
if count >= vocab_threshold:
self._words2int[word] = word_int
self._ints2word[word_int] = word
word_int += 1
else:
del self._word2count[word]
#Add the special tokens to _word2count so they have count values for saving to disk
self.add_words(Vocabulary.special_tokens)
#The Vocabulary instance may now be used for integer encoding / decoding
self._compiled = True
def size(self):
"""The size (number of words) of the Vocabulary
"""
self._validate_compile(True)
return len(self._word2count)
def word_exists(self, word):
"""Check if the given word exists in the vocabulary.
Args:
word: The word to check.
"""
self._validate_compile(True)
return word in self._words2int
def words2ints(self, words):
"""Encode a sequence of space delimited words into a sequence of integers
Args:
words: The sequence of space delimited words to encode
"""
return [self.word2int(w) for w in words.split()]
def word2int(self, word):
"""Encode a word into an integer
Args:
word: The word to encode
"""
self._validate_compile(True)
return self._words2int[word] if word in self._words2int else self.out_int()
def ints2words(self, words_ints, is_punct_discrete_word = False, capitalize_i = True):
"""Decode a sequence of integers into a sequence of space delimited words
Args:
words_ints: The sequence of integers to decode
is_punct_discrete_word: True to output a space before punctuation
False to place punctuation immediately after the end of the preceeding word (normal usage).
"""
words = ""
for i in words_ints:
word = self.int2word(i, capitalize_i)
if is_punct_discrete_word or word not in ['.', '!', '?']:
words += " "
words += word
words = words.strip()
return words
def int2word(self, word_int, capitalize_i = True):
"""Decode an integer into a word
Args:
words_int: The integer to decode
"""
self._validate_compile(True)
word = self._ints2word[word_int]
if capitalize_i and word == 'i':
word = 'I'
return word
def pad_int(self):
"""Get the integer encoding of the PAD token
"""
return self.word2int(Vocabulary.PAD)
def sos_int(self):
"""Get the integer encoding of the SOS token
"""
return self.word2int(Vocabulary.SOS)
def eos_int(self):
"""Get the integer encoding of the EOS token
"""
return self.word2int(Vocabulary.EOS)
def out_int(self):
"""Get the integer encoding of the OUT token
"""
return self.word2int(Vocabulary.OUT)
def save(self, filepath):
"""Saves the vocabulary to disk.
Args:
filepath: The path of the file to save to
"""
total_words = self.size()
with open(filepath, "w", encoding="utf-8") as file:
file.write('\t'.join(["word", "count"]))
file.write('\n')
for i in range(total_words):
word = self._ints2word[i]
count = self._word2count[word]
file.write('\t'.join([word, str(count)]))
if i < total_words - 1:
file.write('\n')
def _validate_compile(self, expected_status):
"""Validate that the vocabulary is compiled or not based on the needs of the attempted operation
Args:
expected_status: The compilation status expected by the attempted operation
"""
if self._compiled and not expected_status:
raise ValueError("This vocabulary instance has already been compiled.")
if not self._compiled and expected_status:
raise ValueError("This vocabulary instance has not been compiled yet.")
@staticmethod
def load(filepath):
"""Loads the vocabulary from disk.
Args:
filepath: The path of the file to load from
"""
vocabulary = Vocabulary()
with open(filepath, encoding="utf-8") as file:
for index, line in enumerate(file):
if index > 0: #Skip header line
word, count = line.split('\t')
word_int = index - 1
vocabulary.load_word(word, word_int, int(count))
vocabulary.compile(loading = True)
return vocabulary
@staticmethod
def clean_text(text, max_words = None, normalize_words = True):
"""Clean text to prepare for training and inference.
Clean by removing unsupported special characters & extra whitespace,
and by normalizing common word permutations (i.e. can't, cannot, can not)
Args:
text: the text to clean
max_words: maximum number of words to output (assuming words are separated by spaces).
any words beyond this limit are truncated.
Defaults to None (unlimited number of words)
normalize_words: True to replace word contractions with their full forms (e.g. i'm -> i am)
and then strip out any remaining apostrophes.
"""
text = text.lower()
text = re.sub(r"'+", "'", text)
if normalize_words:
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"there's", "there is", text)
text = re.sub(r"what's", "what is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"who's", "who is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"let's", "let us", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"shan't", "shall not", text)
text = re.sub(r"can't", "can not", text)
text = re.sub(r"cannot", "can not", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"'", "", text)
else:
text = re.sub(r"(\W)'", r"\1", text)
text = re.sub(r"'(\W)", r"\1", text)
text = re.sub(r"[()\"#/@;:<>{}`+=~|$&*%\[\]_]", "", text)
text = re.sub(r"[.]+", " . ", text)
text = re.sub(r"[!]+", " ! ", text)
text = re.sub(r"[?]+", " ? ", text)
text = re.sub(r"[,-]+", " ", text)
text = re.sub(r"[\t]+", " ", text)
text = re.sub(r" +", " ", text)
text = text.strip()
#Truncate words beyond the limit, if provided. Remove partial sentences from the end if punctuation exists within the limit.
if max_words is not None:
text_parts = text.split()
if len(text_parts) > max_words:
truncated_text_parts = text_parts[:max_words]
while len(truncated_text_parts) > 0 and not re.match("[.!?]", truncated_text_parts[-1]):
truncated_text_parts.pop(-1)
if len(truncated_text_parts) == 0:
truncated_text_parts = text_parts[:max_words]
text = " ".join(truncated_text_parts)
return text
@staticmethod
def auto_punctuate(text):
"""Automatically apply punctuation to text that does not end with any punctuation marks.
Args:
text: the text to apply punctuation to.
"""
text = text.strip()
if not (text.endswith(".") or text.endswith("?") or text.endswith("!") or text.startswith("--")):
tmp = re.sub(r"'", "", text.lower())
if (tmp.startswith("who") or tmp.startswith("what") or tmp.startswith("when") or
tmp.startswith("where") or tmp.startswith("why") or tmp.startswith("how") or
tmp.endswith("who") or tmp.endswith("what") or tmp.endswith("when") or
tmp.endswith("where") or tmp.endswith("why") or tmp.endswith("how") or
tmp.startswith("are") or tmp.startswith("will") or tmp.startswith("wont") or tmp.startswith("can")):
text = "{}?".format(text)
else:
text = "{}.".format(text)
return text | 40.881459 | 140 | 0.560967 | import re
class Vocabulary(object):
SHARED_VOCAB_FILENAME = "shared_vocab.tsv"
INPUT_VOCAB_FILENAME = "input_vocab.tsv"
OUTPUT_VOCAB_FILENAME = "output_vocab.tsv"
PAD = "<PAD>"
SOS = "<SOS>"
EOS = "<EOS>"
OUT = "<OUT>"
special_tokens = [PAD, SOS, EOS, OUT]
def __init__(self, external_embeddings = None):
self._word2count = {}
self._words2int = {}
self._ints2word = {}
self._compiled = False
self.external_embeddings = external_embeddings
def load_word(self, word, word_int, count = 1):
self._validate_compile(False)
self._word2count[word] = count
self._words2int[word] = word_int
self._ints2word[word_int] = word
def add_words(self, words):
self._validate_compile(False)
for i in range(len(words)):
word = words[i]
if word in self._word2count:
self._word2count[word] += 1
else:
self._word2count[word] = 1
def compile(self, vocab_threshold = 1, loading = False):
self._validate_compile(False)
if not loading:
for i, special_token in enumerate(Vocabulary.special_tokens):
self._words2int[special_token] = i
self._ints2word[i] = special_token
word_int = len(self._words2int)
for word, count in sorted(self._word2count.items()):
if count >= vocab_threshold:
self._words2int[word] = word_int
self._ints2word[word_int] = word
word_int += 1
else:
del self._word2count[word]
#Add the special tokens to _word2count so they have count values for saving to disk
self.add_words(Vocabulary.special_tokens)
#The Vocabulary instance may now be used for integer encoding / decoding
self._compiled = True
def size(self):
self._validate_compile(True)
return len(self._word2count)
def word_exists(self, word):
self._validate_compile(True)
return word in self._words2int
def words2ints(self, words):
return [self.word2int(w) for w in words.split()]
def word2int(self, word):
self._validate_compile(True)
return self._words2int[word] if word in self._words2int else self.out_int()
def ints2words(self, words_ints, is_punct_discrete_word = False, capitalize_i = True):
words = ""
for i in words_ints:
word = self.int2word(i, capitalize_i)
if is_punct_discrete_word or word not in ['.', '!', '?']:
words += " "
words += word
words = words.strip()
return words
def int2word(self, word_int, capitalize_i = True):
self._validate_compile(True)
word = self._ints2word[word_int]
if capitalize_i and word == 'i':
word = 'I'
return word
def pad_int(self):
return self.word2int(Vocabulary.PAD)
def sos_int(self):
return self.word2int(Vocabulary.SOS)
def eos_int(self):
return self.word2int(Vocabulary.EOS)
def out_int(self):
return self.word2int(Vocabulary.OUT)
def save(self, filepath):
total_words = self.size()
with open(filepath, "w", encoding="utf-8") as file:
file.write('\t'.join(["word", "count"]))
file.write('\n')
for i in range(total_words):
word = self._ints2word[i]
count = self._word2count[word]
file.write('\t'.join([word, str(count)]))
if i < total_words - 1:
file.write('\n')
def _validate_compile(self, expected_status):
if self._compiled and not expected_status:
raise ValueError("This vocabulary instance has already been compiled.")
if not self._compiled and expected_status:
raise ValueError("This vocabulary instance has not been compiled yet.")
@staticmethod
def load(filepath):
vocabulary = Vocabulary()
with open(filepath, encoding="utf-8") as file:
for index, line in enumerate(file):
if index > 0: #Skip header line
word, count = line.split('\t')
word_int = index - 1
vocabulary.load_word(word, word_int, int(count))
vocabulary.compile(loading = True)
return vocabulary
@staticmethod
def clean_text(text, max_words = None, normalize_words = True):
text = text.lower()
text = re.sub(r"'+", "'", text)
if normalize_words:
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"there's", "there is", text)
text = re.sub(r"what's", "what is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"who's", "who is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"let's", "let us", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"shan't", "shall not", text)
text = re.sub(r"can't", "can not", text)
text = re.sub(r"cannot", "can not", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"'", "", text)
else:
text = re.sub(r"(\W)'", r"\1", text)
text = re.sub(r"'(\W)", r"\1", text)
text = re.sub(r"[()\"#/@;:<>{}`+=~|$&*%\[\]_]", "", text)
text = re.sub(r"[.]+", " . ", text)
text = re.sub(r"[!]+", " ! ", text)
text = re.sub(r"[?]+", " ? ", text)
text = re.sub(r"[,-]+", " ", text)
text = re.sub(r"[\t]+", " ", text)
text = re.sub(r" +", " ", text)
text = text.strip()
#Truncate words beyond the limit, if provided. Remove partial sentences from the end if punctuation exists within the limit.
if max_words is not None:
text_parts = text.split()
if len(text_parts) > max_words:
truncated_text_parts = text_parts[:max_words]
while len(truncated_text_parts) > 0 and not re.match("[.!?]", truncated_text_parts[-1]):
truncated_text_parts.pop(-1)
if len(truncated_text_parts) == 0:
truncated_text_parts = text_parts[:max_words]
text = " ".join(truncated_text_parts)
return text
@staticmethod
def auto_punctuate(text):
text = text.strip()
if not (text.endswith(".") or text.endswith("?") or text.endswith("!") or text.startswith("--")):
tmp = re.sub(r"'", "", text.lower())
if (tmp.startswith("who") or tmp.startswith("what") or tmp.startswith("when") or
tmp.startswith("where") or tmp.startswith("why") or tmp.startswith("how") or
tmp.endswith("who") or tmp.endswith("what") or tmp.endswith("when") or
tmp.endswith("where") or tmp.endswith("why") or tmp.endswith("how") or
tmp.startswith("are") or tmp.startswith("will") or tmp.startswith("wont") or tmp.startswith("can")):
text = "{}?".format(text)
else:
text = "{}.".format(text)
return text | true | true |
f7f52f14c333b0ad854d4c6557244d346036e874 | 1,432 | py | Python | tests/unit/scheduler/test_client.py | jamespfennell/realtimerail | 352dd7d185d3501d28276476e1390d3288735690 | [
"MIT"
] | 10 | 2018-10-25T13:07:42.000Z | 2022-02-08T20:49:07.000Z | tests/unit/scheduler/test_client.py | jamespfennell/realtimerail | 352dd7d185d3501d28276476e1390d3288735690 | [
"MIT"
] | 80 | 2019-04-06T23:01:44.000Z | 2022-02-05T23:35:54.000Z | tests/unit/scheduler/test_client.py | jamespfennell/realtimerail | 352dd7d185d3501d28276476e1390d3288735690 | [
"MIT"
] | 3 | 2021-05-07T16:43:39.000Z | 2021-07-15T18:06:07.000Z | import unittest.mock as mock
import pytest
import requests
from transiter.scheduler import client
@pytest.fixture
def scheduler_post_response(monkeypatch):
response = mock.Mock()
def post(*args, **kwargs):
return response
monkeypatch.setattr(requests, "post", post)
return response
@pytest.fixture
def scheduler_get_response(monkeypatch):
response = mock.Mock()
def get(*args, **kwargs):
return response
monkeypatch.setattr(requests, "get", get)
return response
def test_ping__pass(scheduler_get_response):
scheduler_get_response.raise_for_status = lambda: None
scheduler_get_response.text = "3"
assert client.ping() == 3
def test_ping__fail(scheduler_get_response):
scheduler_get_response.raise_for_status.side_effect = requests.RequestException()
assert client.ping() is None
def test_refresh_tasks__pass(scheduler_post_response):
scheduler_post_response.raise_for_status = lambda: None
assert client.refresh_tasks() is True
def test_refresh_tasks__fail(scheduler_post_response):
scheduler_post_response.raise_for_status.side_effect = requests.RequestException()
assert client.refresh_tasks() is False
def test_refresh_tasks__do_not_swallow_all_exceptions(scheduler_post_response):
scheduler_post_response.raise_for_status.side_effect = ValueError()
with pytest.raises(ValueError):
client.refresh_tasks()
| 23.47541 | 86 | 0.770251 | import unittest.mock as mock
import pytest
import requests
from transiter.scheduler import client
@pytest.fixture
def scheduler_post_response(monkeypatch):
response = mock.Mock()
def post(*args, **kwargs):
return response
monkeypatch.setattr(requests, "post", post)
return response
@pytest.fixture
def scheduler_get_response(monkeypatch):
response = mock.Mock()
def get(*args, **kwargs):
return response
monkeypatch.setattr(requests, "get", get)
return response
def test_ping__pass(scheduler_get_response):
scheduler_get_response.raise_for_status = lambda: None
scheduler_get_response.text = "3"
assert client.ping() == 3
def test_ping__fail(scheduler_get_response):
scheduler_get_response.raise_for_status.side_effect = requests.RequestException()
assert client.ping() is None
def test_refresh_tasks__pass(scheduler_post_response):
scheduler_post_response.raise_for_status = lambda: None
assert client.refresh_tasks() is True
def test_refresh_tasks__fail(scheduler_post_response):
scheduler_post_response.raise_for_status.side_effect = requests.RequestException()
assert client.refresh_tasks() is False
def test_refresh_tasks__do_not_swallow_all_exceptions(scheduler_post_response):
scheduler_post_response.raise_for_status.side_effect = ValueError()
with pytest.raises(ValueError):
client.refresh_tasks()
| true | true |
f7f52f6ed1cada91ff69e74e4d22264d5c654824 | 3,184 | py | Python | Ejercicios/Ejercicio_6/grafica/scene_graph.py | ElTapia/computacion-grafica | 8d6ec5e1bd2426093f253da9a197a7b74bb656a9 | [
"MIT"
] | null | null | null | Ejercicios/Ejercicio_6/grafica/scene_graph.py | ElTapia/computacion-grafica | 8d6ec5e1bd2426093f253da9a197a7b74bb656a9 | [
"MIT"
] | null | null | null | Ejercicios/Ejercicio_6/grafica/scene_graph.py | ElTapia/computacion-grafica | 8d6ec5e1bd2426093f253da9a197a7b74bb656a9 | [
"MIT"
] | null | null | null | # coding=utf-8
"""A simple scene graph class and functionality"""
from OpenGL.GL import *
import OpenGL.GL.shaders
import numpy as np
import grafica.transformations as tr
import grafica.gpu_shape as gs
__author__ = "Daniel Calderon"
__license__ = "MIT"
class SceneGraphNode:
"""
A simple class to handle a scene graph
Each node represents a group of objects
Each leaf represents a basic figure (GPUShape)
To identify each node properly, it MUST have a unique name
"""
def __init__(self, name):
self.name = name
self.transform = tr.identity()
self.childs = []
def clear(self):
"""Freeing GPU memory"""
for child in self.childs:
child.clear()
def findNode(node, name):
# The name was not found in this path
if isinstance(node, gs.GPUShape):
return None
# This is the requested node
if node.name == name:
return node
# All childs are checked for the requested name
for child in node.childs:
foundNode = findNode(child, name)
if foundNode != None:
return foundNode
# No child of this node had the requested name
return None
def findTransform(node, name, parentTransform=tr.identity()):
# The name was not found in this path
if isinstance(node, gs.GPUShape):
return None
newTransform = np.matmul(parentTransform, node.transform)
# This is the requested node
if node.name == name:
return newTransform
# All childs are checked for the requested name
for child in node.childs:
foundTransform = findTransform(child, name, newTransform)
if isinstance(foundTransform, (np.ndarray, np.generic) ):
return foundTransform
# No child of this node had the requested name
return None
def findPosition(node, name, parentTransform=tr.identity()):
foundTransform = findTransform(node, name, parentTransform)
if isinstance(foundTransform, (np.ndarray, np.generic) ):
zero = np.array([[0,0,0,1]], dtype=np.float32).T
foundPosition = np.matmul(foundTransform, zero)
return foundPosition
return None
def drawSceneGraphNode(node, pipeline, transformName, parentTransform=tr.identity(), mode = GL_TRIANGLES):
assert(isinstance(node, SceneGraphNode))
# Composing the transformations through this path
newTransform = np.matmul(parentTransform, node.transform)
# If the child node is a leaf, it should be a GPUShape.
# Hence, it can be drawn with drawCall
if len(node.childs) == 1 and isinstance(node.childs[0], gs.GPUShape):
leaf = node.childs[0]
glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, transformName), 1, GL_TRUE, newTransform)
pipeline.drawCall(leaf, mode)
# If the child node is not a leaf, it MUST be a SceneGraphNode,
# so this draw function is called recursively
else:
for child in node.childs:
drawSceneGraphNode(child, pipeline, transformName, newTransform, mode)
| 29.757009 | 114 | 0.652324 |
from OpenGL.GL import *
import OpenGL.GL.shaders
import numpy as np
import grafica.transformations as tr
import grafica.gpu_shape as gs
__author__ = "Daniel Calderon"
__license__ = "MIT"
class SceneGraphNode:
def __init__(self, name):
self.name = name
self.transform = tr.identity()
self.childs = []
def clear(self):
for child in self.childs:
child.clear()
def findNode(node, name):
if isinstance(node, gs.GPUShape):
return None
if node.name == name:
return node
for child in node.childs:
foundNode = findNode(child, name)
if foundNode != None:
return foundNode
return None
def findTransform(node, name, parentTransform=tr.identity()):
if isinstance(node, gs.GPUShape):
return None
newTransform = np.matmul(parentTransform, node.transform)
if node.name == name:
return newTransform
for child in node.childs:
foundTransform = findTransform(child, name, newTransform)
if isinstance(foundTransform, (np.ndarray, np.generic) ):
return foundTransform
return None
def findPosition(node, name, parentTransform=tr.identity()):
foundTransform = findTransform(node, name, parentTransform)
if isinstance(foundTransform, (np.ndarray, np.generic) ):
zero = np.array([[0,0,0,1]], dtype=np.float32).T
foundPosition = np.matmul(foundTransform, zero)
return foundPosition
return None
def drawSceneGraphNode(node, pipeline, transformName, parentTransform=tr.identity(), mode = GL_TRIANGLES):
assert(isinstance(node, SceneGraphNode))
newTransform = np.matmul(parentTransform, node.transform)
if len(node.childs) == 1 and isinstance(node.childs[0], gs.GPUShape):
leaf = node.childs[0]
glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, transformName), 1, GL_TRUE, newTransform)
pipeline.drawCall(leaf, mode)
else:
for child in node.childs:
drawSceneGraphNode(child, pipeline, transformName, newTransform, mode)
| true | true |
f7f52f9e9e23a95d5592e5a2ba4309d6b41a76c5 | 8,102 | py | Python | test/functional/test_framework/blocktools.py | zebnacoin/zebna | f383ab60d5226589a8b58ba0911645e02ab3e4e9 | [
"MIT"
] | 1 | 2021-02-06T22:18:29.000Z | 2021-02-06T22:18:29.000Z | test/functional/test_framework/blocktools.py | zebnacoin/zebna | f383ab60d5226589a8b58ba0911645e02ab3e4e9 | [
"MIT"
] | 1 | 2021-02-07T00:57:29.000Z | 2021-02-07T10:22:29.000Z | test/functional/test_framework/blocktools.py | zebnacoin/zebna | f383ab60d5226589a8b58ba0911645e02ab3e4e9 | [
"MIT"
] | 1 | 2021-02-26T22:29:45.000Z | 2021-02-26T22:29:45.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
FromHex,
ToHex,
bytes_to_hex_str,
hash256,
hex_str_to_bytes,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
from .script import (
CScript,
OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
hash160,
)
from .util import assert_equal
from io import BytesIO
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def create_block(hashprev, coinbase, ntime=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
if ntime is None:
import time
block.nTime = int(time.time() + 600)
else:
block.nTime = ntime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
def create_coinbase(height, pubkey=None):
"""Create a coinbase transaction, assuming no miner fees.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output."""
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 70 * COIN
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey is not None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend ouput.
"""
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx)))
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
rawtx = node.createrawtransaction(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-wtiness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return bytes_to_hex_str(pkscript)
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
| 36.827273 | 108 | 0.701185 |
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
FromHex,
ToHex,
bytes_to_hex_str,
hash256,
hex_str_to_bytes,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
from .script import (
CScript,
OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
hash160,
)
from .util import assert_equal
from io import BytesIO
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def create_block(hashprev, coinbase, ntime=None):
block = CBlock()
if ntime is None:
import time
block.nTime = int(time.time() + 600)
else:
block.nTime = ntime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
def create_coinbase(height, pubkey=None):
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 70 * COIN
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey is not None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx)))
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
rawtx = node.createrawtransaction(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
if not use_p2wsh:
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return bytes_to_hex_str(pkscript)
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
| true | true |
f7f52fca710827f4f903924b5af8149a2a3547e6 | 3,889 | py | Python | week05_explore/q_learning_agent.py | RomaKoks/Practical_RL | ddcb71f9e45d4e08fe04da6404cb0e312681b615 | [
"Unlicense"
] | null | null | null | week05_explore/q_learning_agent.py | RomaKoks/Practical_RL | ddcb71f9e45d4e08fe04da6404cb0e312681b615 | [
"Unlicense"
] | null | null | null | week05_explore/q_learning_agent.py | RomaKoks/Practical_RL | ddcb71f9e45d4e08fe04da6404cb0e312681b615 | [
"Unlicense"
] | null | null | null | from collections import defaultdict
import random
import math
import numpy as np
class QLearningAgent:
def __init__(self, alpha, epsilon, discount, get_legal_actions):
"""
Q-Learning Agent
based on https://inst.eecs.berkeley.edu/~cs188/sp19/projects.html
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate aka gamma)
Functions you should use
- self.get_legal_actions(state) {state, hashable -> list of actions, each is hashable}
which returns legal actions for a state
- self.get_qvalue(state,action)
which returns Q(state,action)
- self.set_qvalue(state,action,value)
which sets Q(state,action) := value
!!!Important!!!
Note: please avoid using self._qValues directly.
There's a special self.get_qvalue/set_qvalue for that.
"""
self.get_legal_actions = get_legal_actions
self._qvalues = defaultdict(lambda: defaultdict(lambda: 0))
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def get_qvalue(self, state, action):
""" Returns Q(state,action) """
return self._qvalues[state][action]
def set_qvalue(self, state, action, value):
""" Sets the Qvalue for [state,action] to the given value """
self._qvalues[state][action] = value
def get_value(self, state):
"""
Compute your agent's estimate of V(s) using current q-values
V(s) = max_over_action Q(state,action) over possible actions.
Note: please take into account that q-values can be negative.
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return 0.0
if len(possible_actions) == 0:
return 0.0
value = max([self.get_qvalue(state, a) for a in possible_actions])
return value
def update(self, state, action, reward, next_state, done):
"""
You should do your Q-Value update here:
Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s'))
"""
# agent parameters
gamma = self.discount
learning_rate = self.alpha
q = reward + gamma * (1 - done) * self.get_value(next_state)
q = (1 - learning_rate) * self.get_qvalue(state, action) + learning_rate * q
self.set_qvalue(state, action, q)
def get_best_action(self, state):
"""
Compute the best action to take in a state (using current q-values).
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
idx = np.argmax([self.get_qvalue(state, a) for a in possible_actions])
return possible_actions[idx]
def get_action(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we should take a random action.
otherwise - the best policy action (self.get_best_action).
Note: To pick randomly from a list, use random.choice(list).
To pick True or False with a given probablity, generate uniform number in [0, 1]
and compare it with your probability
"""
# Pick Action
possible_actions = self.get_legal_actions(state)
action = None
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
# agent parameters:
epsilon = self.epsilon
if np.random.rand() < epsilon:
return np.random.choice(possible_actions)
return self.get_best_action(state) | 34.723214 | 96 | 0.614297 | from collections import defaultdict
import random
import math
import numpy as np
class QLearningAgent:
def __init__(self, alpha, epsilon, discount, get_legal_actions):
self.get_legal_actions = get_legal_actions
self._qvalues = defaultdict(lambda: defaultdict(lambda: 0))
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def get_qvalue(self, state, action):
return self._qvalues[state][action]
def set_qvalue(self, state, action, value):
self._qvalues[state][action] = value
def get_value(self, state):
possible_actions = self.get_legal_actions(state)
if len(possible_actions) == 0:
return 0.0
value = max([self.get_qvalue(state, a) for a in possible_actions])
return value
def update(self, state, action, reward, next_state, done):
gamma = self.discount
learning_rate = self.alpha
q = reward + gamma * (1 - done) * self.get_value(next_state)
q = (1 - learning_rate) * self.get_qvalue(state, action) + learning_rate * q
self.set_qvalue(state, action, q)
def get_best_action(self, state):
possible_actions = self.get_legal_actions(state)
if len(possible_actions) == 0:
return None
idx = np.argmax([self.get_qvalue(state, a) for a in possible_actions])
return possible_actions[idx]
def get_action(self, state):
possible_actions = self.get_legal_actions(state)
action = None
if len(possible_actions) == 0:
return None
epsilon = self.epsilon
if np.random.rand() < epsilon:
return np.random.choice(possible_actions)
return self.get_best_action(state) | true | true |
f7f530b36d4517038153015d1de8d0b3e2fa7e50 | 748 | py | Python | src/framework/consts.py | Alex-T13/stc_13-whithout-django-old | 2cbd3d84b8c602d103ee60abcd68cdec33f88482 | [
"MIT"
] | null | null | null | src/framework/consts.py | Alex-T13/stc_13-whithout-django-old | 2cbd3d84b8c602d103ee60abcd68cdec33f88482 | [
"MIT"
] | null | null | null | src/framework/consts.py | Alex-T13/stc_13-whithout-django-old | 2cbd3d84b8c602d103ee60abcd68cdec33f88482 | [
"MIT"
] | null | null | null | from datetime import timedelta
from pathlib import Path
SERVER_RUNNING_BANNER = """
+----------------------------------------+
| SERVER WORKS! |
+----------------------------------------+
Visit http://{host}:{port}
..........................................
"""
_this_file_path = Path(__file__).resolve()
DIR_FRAMEWORK = _this_file_path.parent.resolve()
DIR_SRC = DIR_FRAMEWORK.parent.resolve()
DIR_REPO = DIR_SRC.parent.resolve()
DIR_STATIC = (DIR_REPO / "static").resolve()
DIR_STORAGE = (DIR_REPO / "db").resolve()
METHODS_WITH_REQUEST_BODY = {
"POST",
}
USERS_STORAGE = (DIR_STORAGE / "users.json").resolve()
USER_COOKIE = "z37user"
USER_TTL = timedelta(minutes=5)
DATE_TIME_FMT = "%Y-%m-%d %H:%M:%S"
| 20.216216 | 54 | 0.565508 | from datetime import timedelta
from pathlib import Path
SERVER_RUNNING_BANNER = """
+----------------------------------------+
| SERVER WORKS! |
+----------------------------------------+
Visit http://{host}:{port}
..........................................
"""
_this_file_path = Path(__file__).resolve()
DIR_FRAMEWORK = _this_file_path.parent.resolve()
DIR_SRC = DIR_FRAMEWORK.parent.resolve()
DIR_REPO = DIR_SRC.parent.resolve()
DIR_STATIC = (DIR_REPO / "static").resolve()
DIR_STORAGE = (DIR_REPO / "db").resolve()
METHODS_WITH_REQUEST_BODY = {
"POST",
}
USERS_STORAGE = (DIR_STORAGE / "users.json").resolve()
USER_COOKIE = "z37user"
USER_TTL = timedelta(minutes=5)
DATE_TIME_FMT = "%Y-%m-%d %H:%M:%S"
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.