code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import math
import numpy as np
from scipy import optimize
from range_estimator.hierarchy import Hierarchy
class SmoothHierarchy(Hierarchy):
def __init__(self, users, args):
Hierarchy.__init__(self, users, args)
self.fanout = 16
self.update_fanout()
self.g = self.args.g
if self.args.g == 0:
self.g = self.opt_g()
self.num_levels = int(math.log(self.n / self.g, self.fanout))
self.epsilon = self.args.range_epsilon / self.num_levels
self.granularities = [self.g * self.fanout ** h for h in range(self.num_levels)]
def opt_g(self):
def f(x):
# here x denotes b^s. in the equation in paper (optimizing s).
# the first part is variance
# the second part is bias squared
# for bias, we assume the bias for each value is theta / 3,
# and bias is theta / 3 multiplied by the average number of values in a query.
# assuming there are x/2 values in a query, we have average squared bias x^2/36
# the calculation for the squared average value in a query can be more complicated
# but we keep it simple as we can only approximate each value's bias to be theta / 3
return 2 * (self.fanout - 1) * (math.log(self.args.r / x) / math.log(self.fanout)) ** 3 / (self.args.range_epsilon ** 2) \
+ x ** 2 / 36
g = int(optimize.fmin(f, 256, disp=False)[0])
g_exp = math.log(g, self.fanout)
g_exp = round(g_exp)
return self.fanout ** g_exp
def est_precise(self, ell):
count = self.est_hierarchy(ell)
count = self.consist(count)
return count[0]
def guess(self, ell, hie_leaf, method=None):
if method == 'naive_smoother':
u_list = hie_leaf
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'mean_smoother':
u_list = [np.mean(hie_leaf[:i + 1]) for i in range(len(hie_leaf))]
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'median_smoother':
u_list = [np.median(hie_leaf[:i + 1]) for i in range(len(hie_leaf))]
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'moving_smoother':
u_list = [np.mean(hie_leaf[max(0, i - self.args.moving_w):i + 1]) for i in range(len(hie_leaf))]
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'exp_smoother':
u_list = np.zeros_like(hie_leaf)
u_list[0] = hie_leaf[0]
for i in range(1, len(u_list)):
u_list[i] = u_list[i - 1] * (1 - self.args.exp_smooth_a) + self.args.exp_smooth_a * hie_leaf[i]
return self.set_leaf(ell, u_list, hie_leaf)
else:
raise NotImplementedError(method)
def set_leaf(self, ell, u_list, hie_leaf):
leaf_counts = np.zeros(self.n)
leaf_counts[:self.g] = ell / 2
for i, u in enumerate(u_list[:-2]):
leaf_counts[(i + 1) * self.g:(i + 2) * self.g] = u / self.g
i += 1
leaf_counts[(i + 1) * self.g:] = u_list[i] / (self.n - (i + 1) * self.g)
for i, est in enumerate(hie_leaf):
leaf_counts[min((i + 1) * self.g - 1, self.n - 1)] = est - sum(leaf_counts[i * self.g:(i + 1) * self.g - 1])
return leaf_counts | range_estimator/smooth_hierarchy.py | import math
import numpy as np
from scipy import optimize
from range_estimator.hierarchy import Hierarchy
class SmoothHierarchy(Hierarchy):
def __init__(self, users, args):
Hierarchy.__init__(self, users, args)
self.fanout = 16
self.update_fanout()
self.g = self.args.g
if self.args.g == 0:
self.g = self.opt_g()
self.num_levels = int(math.log(self.n / self.g, self.fanout))
self.epsilon = self.args.range_epsilon / self.num_levels
self.granularities = [self.g * self.fanout ** h for h in range(self.num_levels)]
def opt_g(self):
def f(x):
# here x denotes b^s. in the equation in paper (optimizing s).
# the first part is variance
# the second part is bias squared
# for bias, we assume the bias for each value is theta / 3,
# and bias is theta / 3 multiplied by the average number of values in a query.
# assuming there are x/2 values in a query, we have average squared bias x^2/36
# the calculation for the squared average value in a query can be more complicated
# but we keep it simple as we can only approximate each value's bias to be theta / 3
return 2 * (self.fanout - 1) * (math.log(self.args.r / x) / math.log(self.fanout)) ** 3 / (self.args.range_epsilon ** 2) \
+ x ** 2 / 36
g = int(optimize.fmin(f, 256, disp=False)[0])
g_exp = math.log(g, self.fanout)
g_exp = round(g_exp)
return self.fanout ** g_exp
def est_precise(self, ell):
count = self.est_hierarchy(ell)
count = self.consist(count)
return count[0]
def guess(self, ell, hie_leaf, method=None):
if method == 'naive_smoother':
u_list = hie_leaf
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'mean_smoother':
u_list = [np.mean(hie_leaf[:i + 1]) for i in range(len(hie_leaf))]
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'median_smoother':
u_list = [np.median(hie_leaf[:i + 1]) for i in range(len(hie_leaf))]
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'moving_smoother':
u_list = [np.mean(hie_leaf[max(0, i - self.args.moving_w):i + 1]) for i in range(len(hie_leaf))]
return self.set_leaf(ell, u_list, hie_leaf)
elif method == 'exp_smoother':
u_list = np.zeros_like(hie_leaf)
u_list[0] = hie_leaf[0]
for i in range(1, len(u_list)):
u_list[i] = u_list[i - 1] * (1 - self.args.exp_smooth_a) + self.args.exp_smooth_a * hie_leaf[i]
return self.set_leaf(ell, u_list, hie_leaf)
else:
raise NotImplementedError(method)
def set_leaf(self, ell, u_list, hie_leaf):
leaf_counts = np.zeros(self.n)
leaf_counts[:self.g] = ell / 2
for i, u in enumerate(u_list[:-2]):
leaf_counts[(i + 1) * self.g:(i + 2) * self.g] = u / self.g
i += 1
leaf_counts[(i + 1) * self.g:] = u_list[i] / (self.n - (i + 1) * self.g)
for i, est in enumerate(hie_leaf):
leaf_counts[min((i + 1) * self.g - 1, self.n - 1)] = est - sum(leaf_counts[i * self.g:(i + 1) * self.g - 1])
return leaf_counts | 0.601359 | 0.466299 |
import numpy as np
import pytest
from just_bin_it.endpoints.serialisation import (
deserialise_ev42,
deserialise_hs00,
serialise_ev42,
serialise_hs00,
)
from just_bin_it.histograms.histogram1d import Histogram1d
from just_bin_it.histograms.histogram2d import Histogram2d
NUM_BINS = 5
X_RANGE = (0, 5)
Y_RANGE = (0, 10)
TOF_DATA = np.array([x for x in range(NUM_BINS)])
DET_DATA = np.array([x for x in range(NUM_BINS)])
PULSE_TIME = 12345
def _create_1d_histogrammer():
histogrammer = Histogram1d("topic", NUM_BINS, X_RANGE)
histogrammer.add_data(PULSE_TIME, TOF_DATA)
return histogrammer
def _create_2d_histogrammer():
histogrammer = Histogram2d("topic", NUM_BINS, X_RANGE, Y_RANGE)
histogrammer.add_data(PULSE_TIME, TOF_DATA, DET_DATA)
return histogrammer
class TestSerialisationHs00:
@pytest.fixture(autouse=True)
def prepare(self):
self.hist_1d = _create_1d_histogrammer()
self.hist_2d = _create_2d_histogrammer()
def test_serialises_hs00_message_correctly_for_1d(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
timestamp = 1234567890
buf = serialise_hs00(self.hist_1d, timestamp)
hist = deserialise_hs00(buf)
assert hist["source"] == "just-bin-it"
assert hist["timestamp"] == timestamp
assert hist["current_shape"] == [self.hist_1d.num_bins]
assert np.array_equal(
hist["dim_metadata"][0]["bin_boundaries"], self.hist_1d.x_edges.tolist()
)
assert hist["dim_metadata"][0]["length"] == self.hist_1d.num_bins
assert np.array_equal(hist["data"], self.hist_1d.data)
def test_if_timestamp_not_supplied_then_it_is_zero(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
buf = serialise_hs00(self.hist_1d)
hist = deserialise_hs00(buf)
assert hist["source"] == "just-bin-it"
assert hist["timestamp"] == 0
assert hist["current_shape"] == [self.hist_1d.num_bins]
assert np.array_equal(
hist["dim_metadata"][0]["bin_boundaries"], self.hist_1d.x_edges.tolist()
)
assert hist["dim_metadata"][0]["length"] == self.hist_1d.num_bins
assert np.array_equal(hist["data"], self.hist_1d.data)
def test_serialises_hs00_message_correctly_for_2d(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
buf = serialise_hs00(self.hist_2d)
hist = deserialise_hs00(buf)
assert hist["source"] == "just-bin-it"
assert hist["current_shape"] == [self.hist_2d.num_bins, self.hist_2d.num_bins]
assert np.array_equal(
hist["dim_metadata"][0]["bin_boundaries"], self.hist_2d.x_edges.tolist()
)
assert np.array_equal(
hist["dim_metadata"][1]["bin_boundaries"], self.hist_2d.y_edges.tolist()
)
assert hist["dim_metadata"][0]["length"] == self.hist_2d.num_bins
assert hist["dim_metadata"][1]["length"] == self.hist_2d.num_bins
assert np.array_equal(hist["data"], self.hist_2d.data)
def test_serialises_hs00_message_with_info_field_filled_out_correctly(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
info_message = "info_message"
buf = serialise_hs00(self.hist_1d, info_message=info_message)
hist = deserialise_hs00(buf)
assert hist["info"] == info_message
class TestSerialisationEv42:
def test_serialises_ev42_message_correctly(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
source = "just-bin-it"
message_id = 123456
pulse_time = 1234567890000000000
tofs = [1, 2, 3, 4, 5]
dets = [10, 20, 30, 40, 50]
buf = serialise_ev42(source, message_id, pulse_time, tofs, dets)
info = deserialise_ev42(buf)
assert info.source_name == source
assert info.message_id == message_id
assert info.pulse_time == pulse_time
assert len(info.time_of_flight) == len(tofs)
assert len(info.detector_id) == len(dets)
assert np.array_equal(info.time_of_flight, tofs)
assert np.array_equal(info.detector_id, dets) | tests/test_serialisation.py | import numpy as np
import pytest
from just_bin_it.endpoints.serialisation import (
deserialise_ev42,
deserialise_hs00,
serialise_ev42,
serialise_hs00,
)
from just_bin_it.histograms.histogram1d import Histogram1d
from just_bin_it.histograms.histogram2d import Histogram2d
NUM_BINS = 5
X_RANGE = (0, 5)
Y_RANGE = (0, 10)
TOF_DATA = np.array([x for x in range(NUM_BINS)])
DET_DATA = np.array([x for x in range(NUM_BINS)])
PULSE_TIME = 12345
def _create_1d_histogrammer():
histogrammer = Histogram1d("topic", NUM_BINS, X_RANGE)
histogrammer.add_data(PULSE_TIME, TOF_DATA)
return histogrammer
def _create_2d_histogrammer():
histogrammer = Histogram2d("topic", NUM_BINS, X_RANGE, Y_RANGE)
histogrammer.add_data(PULSE_TIME, TOF_DATA, DET_DATA)
return histogrammer
class TestSerialisationHs00:
@pytest.fixture(autouse=True)
def prepare(self):
self.hist_1d = _create_1d_histogrammer()
self.hist_2d = _create_2d_histogrammer()
def test_serialises_hs00_message_correctly_for_1d(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
timestamp = 1234567890
buf = serialise_hs00(self.hist_1d, timestamp)
hist = deserialise_hs00(buf)
assert hist["source"] == "just-bin-it"
assert hist["timestamp"] == timestamp
assert hist["current_shape"] == [self.hist_1d.num_bins]
assert np.array_equal(
hist["dim_metadata"][0]["bin_boundaries"], self.hist_1d.x_edges.tolist()
)
assert hist["dim_metadata"][0]["length"] == self.hist_1d.num_bins
assert np.array_equal(hist["data"], self.hist_1d.data)
def test_if_timestamp_not_supplied_then_it_is_zero(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
buf = serialise_hs00(self.hist_1d)
hist = deserialise_hs00(buf)
assert hist["source"] == "just-bin-it"
assert hist["timestamp"] == 0
assert hist["current_shape"] == [self.hist_1d.num_bins]
assert np.array_equal(
hist["dim_metadata"][0]["bin_boundaries"], self.hist_1d.x_edges.tolist()
)
assert hist["dim_metadata"][0]["length"] == self.hist_1d.num_bins
assert np.array_equal(hist["data"], self.hist_1d.data)
def test_serialises_hs00_message_correctly_for_2d(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
buf = serialise_hs00(self.hist_2d)
hist = deserialise_hs00(buf)
assert hist["source"] == "just-bin-it"
assert hist["current_shape"] == [self.hist_2d.num_bins, self.hist_2d.num_bins]
assert np.array_equal(
hist["dim_metadata"][0]["bin_boundaries"], self.hist_2d.x_edges.tolist()
)
assert np.array_equal(
hist["dim_metadata"][1]["bin_boundaries"], self.hist_2d.y_edges.tolist()
)
assert hist["dim_metadata"][0]["length"] == self.hist_2d.num_bins
assert hist["dim_metadata"][1]["length"] == self.hist_2d.num_bins
assert np.array_equal(hist["data"], self.hist_2d.data)
def test_serialises_hs00_message_with_info_field_filled_out_correctly(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
info_message = "info_message"
buf = serialise_hs00(self.hist_1d, info_message=info_message)
hist = deserialise_hs00(buf)
assert hist["info"] == info_message
class TestSerialisationEv42:
def test_serialises_ev42_message_correctly(self):
"""
Sanity check: checks the combination of libraries work as expected.
"""
source = "just-bin-it"
message_id = 123456
pulse_time = 1234567890000000000
tofs = [1, 2, 3, 4, 5]
dets = [10, 20, 30, 40, 50]
buf = serialise_ev42(source, message_id, pulse_time, tofs, dets)
info = deserialise_ev42(buf)
assert info.source_name == source
assert info.message_id == message_id
assert info.pulse_time == pulse_time
assert len(info.time_of_flight) == len(tofs)
assert len(info.detector_id) == len(dets)
assert np.array_equal(info.time_of_flight, tofs)
assert np.array_equal(info.detector_id, dets) | 0.662578 | 0.588712 |
import os
import sys
import pandas as pd
_thisdir = os.path.realpath(os.path.split(__file__)[0])
__all__=['template_pptx',
'font_path',
'chart_type_list',
'number_format_data',
'number_format_tick',
'font_default_size',
'summary_loc',
'chart_loc']
def _get_element_path(dir_name,suffix=None):
if not(os.path.exists(os.path.join(_thisdir,dir_name))):
element_path=None
return element_path
element_path=None
filelist=os.listdir(os.path.join(_thisdir,dir_name))
if isinstance(suffix,str):
suffix=[suffix]
elif suffix is not None:
suffix=list(suffix)
for f in filelist:
if isinstance(suffix,list) and os.path.splitext(f)[1][1:] in suffix:
element_path=os.path.join(_thisdir,dir_name,f)
return element_path
# default pptx template
template_pptx=_get_element_path('template',suffix=['pptx'])
#template='template.pptx'
# default font of chinese
font_path=_get_element_path('font',suffix=['ttf','ttc'])
if font_path is None:
if sys.platform.startswith('win'):
#font_path='C:\\windows\\fonts\\msyh.ttc'
fontlist=['calibri.ttf','simfang.ttf','simkai.ttf','simhei.ttf','simsun.ttc','msyh.ttf','MSYH.TTC','msyh.ttc']
for f in fontlist:
if os.path.exists(os.path.join('C:\\windows\\fonts\\',f)):
font_path=os.path.join('C:\\windows\\fonts\\',f)
chart_type_list={\
"COLUMN_CLUSTERED":['柱状图','ChartData','pptx'],\
"BAR_CLUSTERED":['条形图','ChartData','pptx'],
'HIST':['分布图,KDE','XChartData','matplotlib']}
chart_type_list=pd.DataFrame(chart_type_list)
# PPT图表中的数字位数
number_format_data='0"%"'
# PPT图表中坐标轴的数字标签格式
number_format_tick='0"%"'
# 默认字体大小
'''
Pt(8):101600, Pt(10):127000, Pt(12):152400, Pt(14):177800
Pt(16):203200, Pt(18):228600, Pt(20):254000, Pt(22):279400
Pt(24):304800, Pt(26):330200
'''
font_default_size=127000# Pt(10)
# PPT中结论文本框所在的位置
# 四个值依次为left、top、width、height
summary_loc=[0.10,0.14,0.80,0.15]
# PPT中结论文本框所在的位置
# 四个值依次为left、top、width、height
chart_loc=[0.10,0.30,0.80,0.60] | reportgen/config.py | import os
import sys
import pandas as pd
_thisdir = os.path.realpath(os.path.split(__file__)[0])
__all__=['template_pptx',
'font_path',
'chart_type_list',
'number_format_data',
'number_format_tick',
'font_default_size',
'summary_loc',
'chart_loc']
def _get_element_path(dir_name,suffix=None):
if not(os.path.exists(os.path.join(_thisdir,dir_name))):
element_path=None
return element_path
element_path=None
filelist=os.listdir(os.path.join(_thisdir,dir_name))
if isinstance(suffix,str):
suffix=[suffix]
elif suffix is not None:
suffix=list(suffix)
for f in filelist:
if isinstance(suffix,list) and os.path.splitext(f)[1][1:] in suffix:
element_path=os.path.join(_thisdir,dir_name,f)
return element_path
# default pptx template
template_pptx=_get_element_path('template',suffix=['pptx'])
#template='template.pptx'
# default font of chinese
font_path=_get_element_path('font',suffix=['ttf','ttc'])
if font_path is None:
if sys.platform.startswith('win'):
#font_path='C:\\windows\\fonts\\msyh.ttc'
fontlist=['calibri.ttf','simfang.ttf','simkai.ttf','simhei.ttf','simsun.ttc','msyh.ttf','MSYH.TTC','msyh.ttc']
for f in fontlist:
if os.path.exists(os.path.join('C:\\windows\\fonts\\',f)):
font_path=os.path.join('C:\\windows\\fonts\\',f)
chart_type_list={\
"COLUMN_CLUSTERED":['柱状图','ChartData','pptx'],\
"BAR_CLUSTERED":['条形图','ChartData','pptx'],
'HIST':['分布图,KDE','XChartData','matplotlib']}
chart_type_list=pd.DataFrame(chart_type_list)
# PPT图表中的数字位数
number_format_data='0"%"'
# PPT图表中坐标轴的数字标签格式
number_format_tick='0"%"'
# 默认字体大小
'''
Pt(8):101600, Pt(10):127000, Pt(12):152400, Pt(14):177800
Pt(16):203200, Pt(18):228600, Pt(20):254000, Pt(22):279400
Pt(24):304800, Pt(26):330200
'''
font_default_size=127000# Pt(10)
# PPT中结论文本框所在的位置
# 四个值依次为left、top、width、height
summary_loc=[0.10,0.14,0.80,0.15]
# PPT中结论文本框所在的位置
# 四个值依次为left、top、width、height
chart_loc=[0.10,0.30,0.80,0.60] | 0.206014 | 0.072308 |
from typing import Any, Dict, Mapping
from eduid_common.api.app import EduIDBaseApp
from eduid_common.api.logging import merge_config
from eduid_common.api.testing import EduidAPITestCase
from eduid_common.config.base import EduIDBaseAppConfig
__author__ = 'lundberg'
from eduid_common.config.parsers import load_config
class LoggingTestApp(EduIDBaseApp):
pass
class LoggingTest(EduidAPITestCase):
app: LoggingTestApp
def load_app(self, test_config: Mapping[str, Any]) -> LoggingTestApp:
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
config = load_config(typ=EduIDBaseAppConfig, app_name='test_app', ns='webapp', test_config=test_config)
return LoggingTestApp(config)
def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
return config
def tearDown(self):
pass
def test_merge_config(self):
base_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {'()': 'eduid_common.api.logging.EduidFormatter', 'fmt': 'cfg://local_context.format'},
},
'filters': {
'app_filter': {'()': 'eduid_common.api.logging.AppFilter', 'app_name': 'cfg://local_context.app_name',},
'user_filter': {'()': 'eduid_common.api.logging.UserFilter',},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'cfg://local_context.level',
'formatter': 'default',
'filters': ['app_filter', 'user_filter'],
},
},
'root': {'handlers': ['console'], 'level': 'cfg://local_context.level',},
}
settings_config = {
'formatters': {'test': {'format': '%(levelname)s: Module: %(name)s Msg: %(message)s'}},
'handlers': {'console': {'formatter': 'test', 'filters': ['test_filter']}},
}
self.assertIsNone(base_config['formatters'].get('test', None))
self.assertEqual(len(base_config['formatters']), 1)
self.assertIsNotNone(settings_config['formatters'].get('test', None))
self.assertEqual(base_config['handlers']['console']['formatter'], 'default')
self.assertEqual(base_config['handlers']['console']['filters'], ['app_filter', 'user_filter'])
self.assertEqual(settings_config['handlers']['console']['formatter'], 'test')
self.assertEqual(settings_config['handlers']['console']['filters'], ['test_filter'])
res = merge_config(base_config, settings_config)
self.assertIsNotNone(res['formatters'].get('test', None))
self.assertEqual(len(res['formatters']), 2)
self.assertEqual(res['formatters']['test']['format'], '%(levelname)s: Module: %(name)s Msg: %(message)s')
self.assertEqual(res['handlers']['console']['formatter'], 'test')
self.assertEqual(res['handlers']['console']['filters'], ['test_filter']) | src/eduid_common/api/tests/test_logging.py |
from typing import Any, Dict, Mapping
from eduid_common.api.app import EduIDBaseApp
from eduid_common.api.logging import merge_config
from eduid_common.api.testing import EduidAPITestCase
from eduid_common.config.base import EduIDBaseAppConfig
__author__ = 'lundberg'
from eduid_common.config.parsers import load_config
class LoggingTestApp(EduIDBaseApp):
pass
class LoggingTest(EduidAPITestCase):
app: LoggingTestApp
def load_app(self, test_config: Mapping[str, Any]) -> LoggingTestApp:
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
config = load_config(typ=EduIDBaseAppConfig, app_name='test_app', ns='webapp', test_config=test_config)
return LoggingTestApp(config)
def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
return config
def tearDown(self):
pass
def test_merge_config(self):
base_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {'()': 'eduid_common.api.logging.EduidFormatter', 'fmt': 'cfg://local_context.format'},
},
'filters': {
'app_filter': {'()': 'eduid_common.api.logging.AppFilter', 'app_name': 'cfg://local_context.app_name',},
'user_filter': {'()': 'eduid_common.api.logging.UserFilter',},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'cfg://local_context.level',
'formatter': 'default',
'filters': ['app_filter', 'user_filter'],
},
},
'root': {'handlers': ['console'], 'level': 'cfg://local_context.level',},
}
settings_config = {
'formatters': {'test': {'format': '%(levelname)s: Module: %(name)s Msg: %(message)s'}},
'handlers': {'console': {'formatter': 'test', 'filters': ['test_filter']}},
}
self.assertIsNone(base_config['formatters'].get('test', None))
self.assertEqual(len(base_config['formatters']), 1)
self.assertIsNotNone(settings_config['formatters'].get('test', None))
self.assertEqual(base_config['handlers']['console']['formatter'], 'default')
self.assertEqual(base_config['handlers']['console']['filters'], ['app_filter', 'user_filter'])
self.assertEqual(settings_config['handlers']['console']['formatter'], 'test')
self.assertEqual(settings_config['handlers']['console']['filters'], ['test_filter'])
res = merge_config(base_config, settings_config)
self.assertIsNotNone(res['formatters'].get('test', None))
self.assertEqual(len(res['formatters']), 2)
self.assertEqual(res['formatters']['test']['format'], '%(levelname)s: Module: %(name)s Msg: %(message)s')
self.assertEqual(res['handlers']['console']['formatter'], 'test')
self.assertEqual(res['handlers']['console']['filters'], ['test_filter']) | 0.63273 | 0.164081 |
import datetime as dt
import io
import json
from asyncio.exceptions import TimeoutError
from typing import Optional, Tuple
from aiogram import Bot, Dispatcher, executor, md, types
from aioredis import Redis
from aiotracemoeapi import TraceMoe, exceptions
from aiotracemoeapi.types import AniList, AnimeResponse
API_TOKEN = "BOT TOKEN HERE"
bot = Bot(token=API_TOKEN, parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot)
trace_bot = TraceMoe(timeout=10)
class SimpleStorage:
def __init__(self):
self._redis = None
async def get_db(self) -> Redis:
if self._redis is None:
self._redis = await Redis(
host="localhost",
max_connections=10,
decode_responses=True,
db=6,
)
return self._redis
async def check_in_cache(self, file_id: str) -> Optional[AnimeResponse]:
redis = await self.get_db()
addr = f"anime:{file_id}"
_anime = await redis.get(addr)
if _anime is None:
return None
anime = json.loads(_anime)
return AnimeResponse(**anime)
async def add_in_cache(self, file_id, data: AnimeResponse):
redis = await self.get_db()
addr = f"anime:{file_id}"
await redis.set(addr, data.json(by_alias=True), ex=dt.timedelta(weeks=1))
async def close(self):
if self._redis:
await self._redis.close()
storage = SimpleStorage()
@dp.message_handler(commands=["start", "help"])
async def send_welcome(message: types.Message):
await message.reply("You can Send / Forward anime screenshots to me.")
@dp.message_handler(
chat_type=types.ChatType.PRIVATE,
content_types=[
types.ContentType.PHOTO,
types.ContentType.ANIMATION,
types.ContentType.VIDEO,
],
run_task=True,
)
async def search_anime(message: types.Message, send_nsfw: bool = True):
try:
download = None
if message.content_type in types.ContentTypes.VIDEO:
download = message.video.download
file_id = message.video.file_unique_id
elif message.content_type in types.ContentTypes.ANIMATION:
download = message.animation.download
file_id = message.animation.file_unique_id
elif message.content_type in types.ContentTypes.PHOTO:
download = message.photo[-1].download
file_id = message.photo[-1].file_unique_id
else:
await message.reply("This file type is not supported")
return
msg = await message.reply("Search...")
anime = await storage.check_in_cache(file_id)
if not anime:
data = io.BytesIO()
await download(destination_file=data)
anime = await trace_bot.search(data)
await storage.add_in_cache(file_id, anime)
except exceptions.SearchQueueFull:
await msg.edit_text("Search queue is full, try again later")
except exceptions.SearchQuotaDepleted:
await msg.edit_text("Monthly search limit reached")
except exceptions.TraceMoeAPIError as error:
await msg.edit_text(f"Unexpected error:\n{error.text}")
except TimeoutError:
await msg.edit_text("Server timed out. Try again later")
except Exception as error:
await msg.edit_text(f"Unknown error\n{error}")
else:
out, kb = parse_text(anime)
await msg.edit_text(out, disable_web_page_preview=True, reply_markup=kb)
if (not anime.best_result.anilist.is_adult) or (anime.best_result.anilist.is_adult and send_nsfw):
await message.chat.do(types.ChatActions.UPLOAD_VIDEO)
await msg.reply_video(anime.best_result.video)
@dp.message_handler(
commands="wait",
chat_type=[types.ChatType.GROUP, types.ChatType.SUPERGROUP],
is_reply=True,
run_task=True,
)
async def search_anime_in_group(message: types.Message, reply: types.Message):
await search_anime(message=reply, send_nsfw=False)
def parse_text(anime_response: AnimeResponse) -> Tuple[str, types.InlineKeyboardMarkup]:
out = str()
kb = types.InlineKeyboardMarkup()
if isinstance(anime_response.best_result.anilist, AniList):
if len(anime_response.best_result.anilist.title) > 0:
out += "Title:\n"
kb.add(
types.InlineKeyboardButton(
"My Anime List", url=anime_response.best_result.anilist.mal_url
)
)
for k, v in anime_response.best_result.anilist.title.items():
if v is None:
continue
out += f" {k}: {v}\n"
if len(anime_response.best_result.anilist.synonyms) > 0:
out += "Synonyms:\n"
for syn in anime_response.best_result.anilist.synonyms:
out += f" {syn}\n"
if anime_response.best_result.anilist.is_adult:
out += "Hentai🔞\n"
if anime_response.best_result.episode:
episode = anime_response.best_result.episode
if isinstance(anime_response.best_result.episode, list):
episode = " | ".join(str(ep) for ep in anime_response.best_result.episode)
out += f"Episode: {md.hbold(str(episode))}\n"
if anime_response.best_result.anime_from:
out += f"Starting time of the matching scene: {md.hbold(str(dt.timedelta(seconds=int(anime_response.best_result.anime_from))))}\n"
out += f"Similarity: {md.hbold(anime_response.best_result.short_similarity())}\n"
return out, kb
async def on_startup(dp: Dispatcher):
bot_me = await dp.bot.me
tm_me = await trace_bot.me()
print(f"Bot @{bot_me.username} starting")
print(f"You have {tm_me.quota_used}/{tm_me.quota} anime search queries left")
async def on_shutdown(dp: Dispatcher):
await storage.close()
if __name__ == "__main__":
executor.start_polling(dp, on_startup=on_startup, on_shutdown=on_shutdown, skip_updates=True) | examples/redis_telegrambot.py | import datetime as dt
import io
import json
from asyncio.exceptions import TimeoutError
from typing import Optional, Tuple
from aiogram import Bot, Dispatcher, executor, md, types
from aioredis import Redis
from aiotracemoeapi import TraceMoe, exceptions
from aiotracemoeapi.types import AniList, AnimeResponse
API_TOKEN = "BOT TOKEN HERE"
bot = Bot(token=API_TOKEN, parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot)
trace_bot = TraceMoe(timeout=10)
class SimpleStorage:
def __init__(self):
self._redis = None
async def get_db(self) -> Redis:
if self._redis is None:
self._redis = await Redis(
host="localhost",
max_connections=10,
decode_responses=True,
db=6,
)
return self._redis
async def check_in_cache(self, file_id: str) -> Optional[AnimeResponse]:
redis = await self.get_db()
addr = f"anime:{file_id}"
_anime = await redis.get(addr)
if _anime is None:
return None
anime = json.loads(_anime)
return AnimeResponse(**anime)
async def add_in_cache(self, file_id, data: AnimeResponse):
redis = await self.get_db()
addr = f"anime:{file_id}"
await redis.set(addr, data.json(by_alias=True), ex=dt.timedelta(weeks=1))
async def close(self):
if self._redis:
await self._redis.close()
storage = SimpleStorage()
@dp.message_handler(commands=["start", "help"])
async def send_welcome(message: types.Message):
await message.reply("You can Send / Forward anime screenshots to me.")
@dp.message_handler(
chat_type=types.ChatType.PRIVATE,
content_types=[
types.ContentType.PHOTO,
types.ContentType.ANIMATION,
types.ContentType.VIDEO,
],
run_task=True,
)
async def search_anime(message: types.Message, send_nsfw: bool = True):
try:
download = None
if message.content_type in types.ContentTypes.VIDEO:
download = message.video.download
file_id = message.video.file_unique_id
elif message.content_type in types.ContentTypes.ANIMATION:
download = message.animation.download
file_id = message.animation.file_unique_id
elif message.content_type in types.ContentTypes.PHOTO:
download = message.photo[-1].download
file_id = message.photo[-1].file_unique_id
else:
await message.reply("This file type is not supported")
return
msg = await message.reply("Search...")
anime = await storage.check_in_cache(file_id)
if not anime:
data = io.BytesIO()
await download(destination_file=data)
anime = await trace_bot.search(data)
await storage.add_in_cache(file_id, anime)
except exceptions.SearchQueueFull:
await msg.edit_text("Search queue is full, try again later")
except exceptions.SearchQuotaDepleted:
await msg.edit_text("Monthly search limit reached")
except exceptions.TraceMoeAPIError as error:
await msg.edit_text(f"Unexpected error:\n{error.text}")
except TimeoutError:
await msg.edit_text("Server timed out. Try again later")
except Exception as error:
await msg.edit_text(f"Unknown error\n{error}")
else:
out, kb = parse_text(anime)
await msg.edit_text(out, disable_web_page_preview=True, reply_markup=kb)
if (not anime.best_result.anilist.is_adult) or (anime.best_result.anilist.is_adult and send_nsfw):
await message.chat.do(types.ChatActions.UPLOAD_VIDEO)
await msg.reply_video(anime.best_result.video)
@dp.message_handler(
commands="wait",
chat_type=[types.ChatType.GROUP, types.ChatType.SUPERGROUP],
is_reply=True,
run_task=True,
)
async def search_anime_in_group(message: types.Message, reply: types.Message):
await search_anime(message=reply, send_nsfw=False)
def parse_text(anime_response: AnimeResponse) -> Tuple[str, types.InlineKeyboardMarkup]:
out = str()
kb = types.InlineKeyboardMarkup()
if isinstance(anime_response.best_result.anilist, AniList):
if len(anime_response.best_result.anilist.title) > 0:
out += "Title:\n"
kb.add(
types.InlineKeyboardButton(
"My Anime List", url=anime_response.best_result.anilist.mal_url
)
)
for k, v in anime_response.best_result.anilist.title.items():
if v is None:
continue
out += f" {k}: {v}\n"
if len(anime_response.best_result.anilist.synonyms) > 0:
out += "Synonyms:\n"
for syn in anime_response.best_result.anilist.synonyms:
out += f" {syn}\n"
if anime_response.best_result.anilist.is_adult:
out += "Hentai🔞\n"
if anime_response.best_result.episode:
episode = anime_response.best_result.episode
if isinstance(anime_response.best_result.episode, list):
episode = " | ".join(str(ep) for ep in anime_response.best_result.episode)
out += f"Episode: {md.hbold(str(episode))}\n"
if anime_response.best_result.anime_from:
out += f"Starting time of the matching scene: {md.hbold(str(dt.timedelta(seconds=int(anime_response.best_result.anime_from))))}\n"
out += f"Similarity: {md.hbold(anime_response.best_result.short_similarity())}\n"
return out, kb
async def on_startup(dp: Dispatcher):
bot_me = await dp.bot.me
tm_me = await trace_bot.me()
print(f"Bot @{bot_me.username} starting")
print(f"You have {tm_me.quota_used}/{tm_me.quota} anime search queries left")
async def on_shutdown(dp: Dispatcher):
await storage.close()
if __name__ == "__main__":
executor.start_polling(dp, on_startup=on_startup, on_shutdown=on_shutdown, skip_updates=True) | 0.640299 | 0.102394 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
import math
import backtrader as bt
import time
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
import seaborn as sns
class TestStrategy(bt.Strategy):
params = (
("rsi_period_1",10),
("rsi_period_2",20),
("rsi_period_3",40),
("rsi_period_4",60),
("rsi_period_5",80),
("rsi_period_6",100),
("rsi_period_7",120),
("rsi_period_8",150),
("rsi_period_9",180),
("rsi_period_10",210),
("rsi_period_11",250),
("rsi_period_12",300),
("rsi_period_13",350),
("rsi_period_14",400),
("rsi_period_15",500),
)
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
self.dataopen = self.datas[0].open
self.datalow = self.datas[0].low
self.datahigh = self.datas[0].high
self.rsi_1 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_1)
self.rsi_2 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_2)
self.rsi_3 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_3)
self.rsi_4 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_4)
self.rsi_5 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_5)
self.rsi_6 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_6)
self.rsi_7 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_7)
self.rsi_8 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_8)
self.rsi_9 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_9)
self.rsi_10 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_10)
self.rsi_11 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_11)
self.rsi_12 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_12)
self.rsi_13 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_13)
self.rsi_14 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_14)
self.rsi_15 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_15)
self.array_rsi_1 = []
self.array_rsi_2 = []
self.array_rsi_3 = []
self.array_rsi_4 = []
self.array_rsi_5 = []
self.array_rsi_6 = []
self.array_rsi_7 = []
self.array_rsi_8 = []
self.array_rsi_9 = []
self.array_rsi_10 = []
self.array_rsi_11 = []
self.array_rsi_12 = []
self.array_rsi_13 = []
self.array_rsi_14 = []
self.array_rsi_15 = []
def next(self):
self.array_rsi_1.append(self.rsi_1[0])
self.array_rsi_2.append(self.rsi_2[0])
self.array_rsi_3.append(self.rsi_3[0])
self.array_rsi_4.append(self.rsi_4[0])
self.array_rsi_5.append(self.rsi_5[0])
self.array_rsi_6.append(self.rsi_6[0])
self.array_rsi_7.append(self.rsi_7[0])
self.array_rsi_8.append(self.rsi_8[0])
self.array_rsi_9.append(self.rsi_9[0])
self.array_rsi_10.append(self.rsi_10[0])
self.array_rsi_11.append(self.rsi_11[0])
self.array_rsi_12.append(self.rsi_12[0])
self.array_rsi_13.append(self.rsi_13[0])
self.array_rsi_14.append(self.rsi_14[0])
self.array_rsi_15.append(self.rsi_15[0])
def stop(self):
# print(self.array_rsi_1)
figsize=(30, 25)
figure, ax = plt.subplots(figsize=figsize)
plt.subplot(15,2, 1)
plt.boxplot(self.array_rsi_1,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_1(period="+str(self.params.rsi_period_1)+")")
plt.subplot(15,2, 2)
sns.distplot(self.array_rsi_1)
plt.title("rsi_1(period="+str(self.params.rsi_period_1)+")")
plt.subplot(15,2, 3)
plt.boxplot(self.array_rsi_2,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_2(period="+str(self.params.rsi_period_2)+")")
plt.subplot(15,2, 4)
sns.distplot(self.array_rsi_2)
plt.title("rsi_2(period="+str(self.params.rsi_period_2)+")")
plt.subplot(15,2, 5)
plt.boxplot(self.array_rsi_3,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_3(period="+str(self.params.rsi_period_3)+")")
plt.subplot(15,2, 6)
sns.distplot(self.array_rsi_3)
plt.title("rsi_3(period="+str(self.params.rsi_period_3)+")")
plt.subplot(15,2, 7)
plt.boxplot(self.array_rsi_4,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_4(period="+str(self.params.rsi_period_4)+")")
plt.subplot(15,2, 8)
sns.distplot(self.array_rsi_4)
plt.title("rsi_4(period="+str(self.params.rsi_period_4)+")")
plt.subplot(15,2, 9)
plt.boxplot(self.array_rsi_5,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_5(period="+str(self.params.rsi_period_5)+")")
plt.subplot(15,2, 10)
sns.distplot(self.array_rsi_5)
plt.title("rsi_5(period="+str(self.params.rsi_period_5)+")")
plt.subplot(15,2, 11)
plt.boxplot(self.array_rsi_6,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_6(period="+str(self.params.rsi_period_6)+")")
plt.subplot(15,2, 12)
sns.distplot(self.array_rsi_6)
plt.title("rsi_6(period="+str(self.params.rsi_period_6)+")")
plt.subplot(15,2, 13)
plt.boxplot(self.array_rsi_7,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_7(period="+str(self.params.rsi_period_7)+")")
plt.subplot(15,2, 14)
sns.distplot(self.array_rsi_7)
plt.title("rsi_7(period="+str(self.params.rsi_period_7)+")")
plt.subplot(15,2, 15)
plt.boxplot(self.array_rsi_8,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_8(period="+str(self.params.rsi_period_8)+")")
plt.subplot(15,2, 16)
sns.distplot(self.array_rsi_8)
plt.title("rsi_8(period="+str(self.params.rsi_period_8)+")")
plt.subplot(15,2, 17)
plt.boxplot(self.array_rsi_9,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_9(period="+str(self.params.rsi_period_9)+")")
plt.subplot(15,2, 18)
sns.distplot(self.array_rsi_9)
plt.title("rsi_9(period="+str(self.params.rsi_period_9)+")")
plt.subplot(15,2, 19)
plt.boxplot(self.array_rsi_10,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_10(period="+str(self.params.rsi_period_10)+")")
plt.subplot(15,2, 20)
sns.distplot(self.array_rsi_10)
plt.title("rsi_10(period="+str(self.params.rsi_period_10)+")")
plt.subplot(15,2, 21)
plt.boxplot(self.array_rsi_11,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_11(period="+str(self.params.rsi_period_11)+")")
plt.subplot(15,2, 22)
sns.distplot(self.array_rsi_11)
plt.title("rsi_11(period="+str(self.params.rsi_period_11)+")")
plt.subplot(15,2, 23)
plt.boxplot(self.array_rsi_12,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_12(period="+str(self.params.rsi_period_12)+")")
plt.subplot(15,2, 24)
sns.distplot(self.array_rsi_12)
plt.title("rsi_12(period="+str(self.params.rsi_period_12)+")")
plt.subplot(15,2, 25)
plt.boxplot(self.array_rsi_13,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_13(period="+str(self.params.rsi_period_13)+")")
plt.subplot(15,2, 26)
sns.distplot(self.array_rsi_13)
plt.title("rsi_13(period="+str(self.params.rsi_period_13)+")")
plt.subplot(15,2, 27)
plt.boxplot(self.array_rsi_14,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_14(period="+str(self.params.rsi_period_14)+")")
plt.subplot(15,2, 28)
sns.distplot(self.array_rsi_14)
plt.title("rsi_14(period="+str(self.params.rsi_period_14)+")")
plt.subplot(15,2, 29)
plt.boxplot(self.array_rsi_15,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_15(period="+str(self.params.rsi_period_15)+")")
plt.subplot(15,2, 30)
sns.distplot(self.array_rsi_15)
plt.title("rsi_15(period="+str(self.params.rsi_period_15)+")")
plt.savefig('./rsi分析图.png')
if __name__ == '__main__':
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
# datapath = os.path.join(modpath, 'F:/git_repo/backtrader-ccxt/datas/BTC-USD-1D-coinbase-converted-date.data')
datapath = os.path.join(modpath, 'F:/git_repo/backtrader-ccxt/datas/BTC-USD-1H-coinbase-converted-datetime.data')
cerebro = bt.Cerebro()
cerebro.addstrategy(TestStrategy)
data = bt.feeds.BacktraderCSVData(
dataname=datapath,
timeframe=bt.TimeFrame.Days,
# timeframe=bt.TimeFrame.Minutes,
# compression=1,
# fromdate=datetime.datetime(2015, 7, 20),
# todate=datetime.datetime(2015, 10, 21, 21, 25, 0),
reverse=False)
cerebro.adddata(data)
init_value = 5000
cerebro.broker.setcash(init_value)
mycommission = 0.001
cerebro.broker.setcommission(commission=mycommission)
strats = cerebro.run(tradehistory=True)
# cerebro.plot() | mywork/indicator/indicator_rsi.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
import math
import backtrader as bt
import time
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
import seaborn as sns
class TestStrategy(bt.Strategy):
params = (
("rsi_period_1",10),
("rsi_period_2",20),
("rsi_period_3",40),
("rsi_period_4",60),
("rsi_period_5",80),
("rsi_period_6",100),
("rsi_period_7",120),
("rsi_period_8",150),
("rsi_period_9",180),
("rsi_period_10",210),
("rsi_period_11",250),
("rsi_period_12",300),
("rsi_period_13",350),
("rsi_period_14",400),
("rsi_period_15",500),
)
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
self.dataopen = self.datas[0].open
self.datalow = self.datas[0].low
self.datahigh = self.datas[0].high
self.rsi_1 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_1)
self.rsi_2 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_2)
self.rsi_3 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_3)
self.rsi_4 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_4)
self.rsi_5 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_5)
self.rsi_6 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_6)
self.rsi_7 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_7)
self.rsi_8 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_8)
self.rsi_9 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_9)
self.rsi_10 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_10)
self.rsi_11 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_11)
self.rsi_12 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_12)
self.rsi_13 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_13)
self.rsi_14 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_14)
self.rsi_15 = bt.talib.RSI(self.datas[0], timeperiod=self.params.rsi_period_15)
self.array_rsi_1 = []
self.array_rsi_2 = []
self.array_rsi_3 = []
self.array_rsi_4 = []
self.array_rsi_5 = []
self.array_rsi_6 = []
self.array_rsi_7 = []
self.array_rsi_8 = []
self.array_rsi_9 = []
self.array_rsi_10 = []
self.array_rsi_11 = []
self.array_rsi_12 = []
self.array_rsi_13 = []
self.array_rsi_14 = []
self.array_rsi_15 = []
def next(self):
self.array_rsi_1.append(self.rsi_1[0])
self.array_rsi_2.append(self.rsi_2[0])
self.array_rsi_3.append(self.rsi_3[0])
self.array_rsi_4.append(self.rsi_4[0])
self.array_rsi_5.append(self.rsi_5[0])
self.array_rsi_6.append(self.rsi_6[0])
self.array_rsi_7.append(self.rsi_7[0])
self.array_rsi_8.append(self.rsi_8[0])
self.array_rsi_9.append(self.rsi_9[0])
self.array_rsi_10.append(self.rsi_10[0])
self.array_rsi_11.append(self.rsi_11[0])
self.array_rsi_12.append(self.rsi_12[0])
self.array_rsi_13.append(self.rsi_13[0])
self.array_rsi_14.append(self.rsi_14[0])
self.array_rsi_15.append(self.rsi_15[0])
def stop(self):
# print(self.array_rsi_1)
figsize=(30, 25)
figure, ax = plt.subplots(figsize=figsize)
plt.subplot(15,2, 1)
plt.boxplot(self.array_rsi_1,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_1(period="+str(self.params.rsi_period_1)+")")
plt.subplot(15,2, 2)
sns.distplot(self.array_rsi_1)
plt.title("rsi_1(period="+str(self.params.rsi_period_1)+")")
plt.subplot(15,2, 3)
plt.boxplot(self.array_rsi_2,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_2(period="+str(self.params.rsi_period_2)+")")
plt.subplot(15,2, 4)
sns.distplot(self.array_rsi_2)
plt.title("rsi_2(period="+str(self.params.rsi_period_2)+")")
plt.subplot(15,2, 5)
plt.boxplot(self.array_rsi_3,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_3(period="+str(self.params.rsi_period_3)+")")
plt.subplot(15,2, 6)
sns.distplot(self.array_rsi_3)
plt.title("rsi_3(period="+str(self.params.rsi_period_3)+")")
plt.subplot(15,2, 7)
plt.boxplot(self.array_rsi_4,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_4(period="+str(self.params.rsi_period_4)+")")
plt.subplot(15,2, 8)
sns.distplot(self.array_rsi_4)
plt.title("rsi_4(period="+str(self.params.rsi_period_4)+")")
plt.subplot(15,2, 9)
plt.boxplot(self.array_rsi_5,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_5(period="+str(self.params.rsi_period_5)+")")
plt.subplot(15,2, 10)
sns.distplot(self.array_rsi_5)
plt.title("rsi_5(period="+str(self.params.rsi_period_5)+")")
plt.subplot(15,2, 11)
plt.boxplot(self.array_rsi_6,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_6(period="+str(self.params.rsi_period_6)+")")
plt.subplot(15,2, 12)
sns.distplot(self.array_rsi_6)
plt.title("rsi_6(period="+str(self.params.rsi_period_6)+")")
plt.subplot(15,2, 13)
plt.boxplot(self.array_rsi_7,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_7(period="+str(self.params.rsi_period_7)+")")
plt.subplot(15,2, 14)
sns.distplot(self.array_rsi_7)
plt.title("rsi_7(period="+str(self.params.rsi_period_7)+")")
plt.subplot(15,2, 15)
plt.boxplot(self.array_rsi_8,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_8(period="+str(self.params.rsi_period_8)+")")
plt.subplot(15,2, 16)
sns.distplot(self.array_rsi_8)
plt.title("rsi_8(period="+str(self.params.rsi_period_8)+")")
plt.subplot(15,2, 17)
plt.boxplot(self.array_rsi_9,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_9(period="+str(self.params.rsi_period_9)+")")
plt.subplot(15,2, 18)
sns.distplot(self.array_rsi_9)
plt.title("rsi_9(period="+str(self.params.rsi_period_9)+")")
plt.subplot(15,2, 19)
plt.boxplot(self.array_rsi_10,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_10(period="+str(self.params.rsi_period_10)+")")
plt.subplot(15,2, 20)
sns.distplot(self.array_rsi_10)
plt.title("rsi_10(period="+str(self.params.rsi_period_10)+")")
plt.subplot(15,2, 21)
plt.boxplot(self.array_rsi_11,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_11(period="+str(self.params.rsi_period_11)+")")
plt.subplot(15,2, 22)
sns.distplot(self.array_rsi_11)
plt.title("rsi_11(period="+str(self.params.rsi_period_11)+")")
plt.subplot(15,2, 23)
plt.boxplot(self.array_rsi_12,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_12(period="+str(self.params.rsi_period_12)+")")
plt.subplot(15,2, 24)
sns.distplot(self.array_rsi_12)
plt.title("rsi_12(period="+str(self.params.rsi_period_12)+")")
plt.subplot(15,2, 25)
plt.boxplot(self.array_rsi_13,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_13(period="+str(self.params.rsi_period_13)+")")
plt.subplot(15,2, 26)
sns.distplot(self.array_rsi_13)
plt.title("rsi_13(period="+str(self.params.rsi_period_13)+")")
plt.subplot(15,2, 27)
plt.boxplot(self.array_rsi_14,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_14(period="+str(self.params.rsi_period_14)+")")
plt.subplot(15,2, 28)
sns.distplot(self.array_rsi_14)
plt.title("rsi_14(period="+str(self.params.rsi_period_14)+")")
plt.subplot(15,2, 29)
plt.boxplot(self.array_rsi_15,sym='r*',vert=False,patch_artist=True,meanline=False,showmeans=True) #也可用plot.box()
plt.title("rsi_15(period="+str(self.params.rsi_period_15)+")")
plt.subplot(15,2, 30)
sns.distplot(self.array_rsi_15)
plt.title("rsi_15(period="+str(self.params.rsi_period_15)+")")
plt.savefig('./rsi分析图.png')
if __name__ == '__main__':
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
# datapath = os.path.join(modpath, 'F:/git_repo/backtrader-ccxt/datas/BTC-USD-1D-coinbase-converted-date.data')
datapath = os.path.join(modpath, 'F:/git_repo/backtrader-ccxt/datas/BTC-USD-1H-coinbase-converted-datetime.data')
cerebro = bt.Cerebro()
cerebro.addstrategy(TestStrategy)
data = bt.feeds.BacktraderCSVData(
dataname=datapath,
timeframe=bt.TimeFrame.Days,
# timeframe=bt.TimeFrame.Minutes,
# compression=1,
# fromdate=datetime.datetime(2015, 7, 20),
# todate=datetime.datetime(2015, 10, 21, 21, 25, 0),
reverse=False)
cerebro.adddata(data)
init_value = 5000
cerebro.broker.setcash(init_value)
mycommission = 0.001
cerebro.broker.setcommission(commission=mycommission)
strats = cerebro.run(tradehistory=True)
# cerebro.plot() | 0.21892 | 0.409634 |
from typing import Set
from keycloak_scanner.jwt_attack import change_to_none
from keycloak_scanner.keycloak_api import KeyCloakApi
from keycloak_scanner.logging.vuln_flag import VulnFlag
from keycloak_scanner.scan_base.scanner import Scanner
from keycloak_scanner.scan_base.types import NoneSign, Client, WellKnown, SecurityConsole, Realm
from keycloak_scanner.scan_base.wrap import WrapperTypes
class NoneSignScanner(Scanner[NoneSign]):
def __init__(self, username: str = None, password: str = None, **kwargs):
# TODO : use credentials from events
self.username = username
self.password = password
super().__init__(result_type=WrapperTypes.NONE_SIGN,
needs=[WrapperTypes.REALM_TYPE, WrapperTypes.CLIENT_TYPE, WrapperTypes.WELL_KNOWN_TYPE, WrapperTypes.SECURITY_CONSOLE],
**kwargs)
def perform(self, realm: Realm, client: Client, well_known: WellKnown,
security_console: SecurityConsole, **kwargs) -> (Set[NoneSign], VulnFlag):
# TODO : make secret type + use credentials
vf = VulnFlag()
api = KeyCloakApi(well_known=well_known.json, session_provider=super().session, verbose=super().is_verbose())
if well_known.realm == realm and security_console.secret:
if self.username is not None:
password = <PASSWORD>
if self.password is None:
password = <PASSWORD>
if self.test_none(api, client, security_console.secret, self.username, password):
return {NoneSign(realm)}, vf
else:
super().info('No none scan, provide credentials to test jwt none signature')
else:
super().verbose(f'No secret for realm {realm.name}')
return set(), vf
def test_none(self, api, client, client_secret, username, password):
try:
access_token, refresh_token = api.get_token(client.name, client_secret, username, password)
super().info(
'Got token via password method. access_token:{}, refresh_token:{}'.format(access_token, refresh_token))
none_refresh_token = change_to_none(refresh_token)
try:
access_token, refresh_token = api.refresh(client, none_refresh_token)
super().find('NoneSign',
f'Refresh work with none. access_token:{access_token}, refresh_token:{refresh_token}')
return True
except Exception as e:
super().verbose('None refresh token fail : {}'.format(e))
except Exception as e:
raise e
return False | keycloak_scanner/scanners/none_sign_scanner.py | from typing import Set
from keycloak_scanner.jwt_attack import change_to_none
from keycloak_scanner.keycloak_api import KeyCloakApi
from keycloak_scanner.logging.vuln_flag import VulnFlag
from keycloak_scanner.scan_base.scanner import Scanner
from keycloak_scanner.scan_base.types import NoneSign, Client, WellKnown, SecurityConsole, Realm
from keycloak_scanner.scan_base.wrap import WrapperTypes
class NoneSignScanner(Scanner[NoneSign]):
def __init__(self, username: str = None, password: str = None, **kwargs):
# TODO : use credentials from events
self.username = username
self.password = password
super().__init__(result_type=WrapperTypes.NONE_SIGN,
needs=[WrapperTypes.REALM_TYPE, WrapperTypes.CLIENT_TYPE, WrapperTypes.WELL_KNOWN_TYPE, WrapperTypes.SECURITY_CONSOLE],
**kwargs)
def perform(self, realm: Realm, client: Client, well_known: WellKnown,
security_console: SecurityConsole, **kwargs) -> (Set[NoneSign], VulnFlag):
# TODO : make secret type + use credentials
vf = VulnFlag()
api = KeyCloakApi(well_known=well_known.json, session_provider=super().session, verbose=super().is_verbose())
if well_known.realm == realm and security_console.secret:
if self.username is not None:
password = <PASSWORD>
if self.password is None:
password = <PASSWORD>
if self.test_none(api, client, security_console.secret, self.username, password):
return {NoneSign(realm)}, vf
else:
super().info('No none scan, provide credentials to test jwt none signature')
else:
super().verbose(f'No secret for realm {realm.name}')
return set(), vf
def test_none(self, api, client, client_secret, username, password):
try:
access_token, refresh_token = api.get_token(client.name, client_secret, username, password)
super().info(
'Got token via password method. access_token:{}, refresh_token:{}'.format(access_token, refresh_token))
none_refresh_token = change_to_none(refresh_token)
try:
access_token, refresh_token = api.refresh(client, none_refresh_token)
super().find('NoneSign',
f'Refresh work with none. access_token:{access_token}, refresh_token:{refresh_token}')
return True
except Exception as e:
super().verbose('None refresh token fail : {}'.format(e))
except Exception as e:
raise e
return False | 0.397588 | 0.085404 |
# In[ ]:
import tensorflow as tf
import numpy as np
import SSAE
reload(SSAE)
import matplotlib.pyplot as plt
import glob
import os
import sys
from scipy import signal
from IPython import display
get_ipython().magic(u'matplotlib inline')
# In[ ]:
sys.path.append('../FileOps/')
import FileIO
import PatchSample
# In[ ]:
def MakePath(ae, iStack, basePath='../train/SSAE/'):
path = os.path.join(basePath, '%dx%d-xy'%(ae.imgshape[0], ae.imgshape[1]),
'sw-%g-wd-%g-f'%(ae.sparsity[iStack], ae.weight_decay))
for n in ae.nFeatures[:(iStack+1)]:
path += '-%d'%n
return path
# In[ ]:
noisyPath = '/home/data0/dufan/CT_images/quater_dose_image/'
normalPath = '/home/data0/dufan/CT_images/full_dose_image/'
normalSet = ['L067', 'L096', 'L109', 'L192', 'L506']
for i in range(len(normalSet)):
normalSet[i] = os.path.join(normalPath, normalSet[i])
# In[ ]:
samplePath = '../train/sample/%dx%d-xy/'%(imgshape[0], imgshape[1])
nFiles = 10
PatchSample.GenerateTrainingPatchesFromDicomSeq(samplePath, nFiles, 10, 10000, imgshape, normalSet)
# In[ ]:
patches = list()
for iFile in range(nFiles):
patches.append(PatchSample.RetrieveTrainingPatches(samplePath, iFile))
# In[ ]:
imgshape = [16,16,1]
nFeatures = [1024,1024,1024]
sparsity = [5,5,5]
weight_decay = 0.1
nEpoches = 30
batchsize = 100
ae = SSAE.StackedSparseAutoEncoder(imgshape, nFeatures, sparsity, weight_decay)
# In[ ]:
lastPath = ''
for iStack in range(len(nFeatureMaps)):
tf.reset_default_graph()
ae.BuildStackedAutoEncoder(iStack)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
trainer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(ae.loss_current, var_list=ae.vars_upmost)
saver = tf.train.Saver(max_to_keep=1000)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list='3')))
tf.global_variables_initializer().run(session=sess)
if lastPath != "":
var_list = [v for v in ae.vars_encoder + ae.vars_decoder if v not in ae.vars_upmost]
loader = tf.train.Saver(var_list = var_list)
loader.restore(sess, os.path.join(lastPath, '%d'%(nEpoches-1)))
# training
np.random.seed(0)
lastPath = MakePath(ae, iStack, basePath='../train/SSAE/1024x3/')
if not os.path.exists(lastPath):
os.makedirs(lastPath)
for epoch in range(nEpoches):
indFile = range(len(patches))
np.random.shuffle(indFile)
iIter = 0
for iFile in indFile:
normal_imgs = patches[iFile]
for i in range(0, normal_imgs.shape[0], batchsize):
normal_batch = normal_imgs[i:i+batchsize,...]
_, loss_train, loss_s, loss_w, loss_img = sess.run([trainer, ae.loss_upmost, ae.loss_sparse, ae.loss_weight, ae.loss_img],
feed_dict={ae.input_data: normal_batch})
iIter += 1
if iIter % 100 == 0:
sys.__stdout__.write('Stack: %d, Epoch: %d, Iteration: %d, loss = (%f, %f, %f, %f)\n' %(iStack, epoch, iIter, loss_train, loss_s, loss_w, loss_img))
[decode] = sess.run([ae.decode_datas[-1]], feed_dict = {ae.input_data: normal_batch})
display.clear_output()
plt.figure(figsize=[15,6])
for i in range(5):
plt.subplot(2, 5, i+1); plt.imshow(normal_batch[i,...,0], 'Greys_r', vmin=-160/500.0, vmax=240/500.0)
plt.subplot(2, 5, i+6); plt.imshow(decode[i,...,0], 'Greys_r', vmin=-160/500.0, vmax=240/500.0)
plt.show()
saver.save(sess, os.path.join(lastPath, '%d'%epoch))
# In[ ]: | Autoencoders/Train-stack.py |
# In[ ]:
import tensorflow as tf
import numpy as np
import SSAE
reload(SSAE)
import matplotlib.pyplot as plt
import glob
import os
import sys
from scipy import signal
from IPython import display
get_ipython().magic(u'matplotlib inline')
# In[ ]:
sys.path.append('../FileOps/')
import FileIO
import PatchSample
# In[ ]:
def MakePath(ae, iStack, basePath='../train/SSAE/'):
path = os.path.join(basePath, '%dx%d-xy'%(ae.imgshape[0], ae.imgshape[1]),
'sw-%g-wd-%g-f'%(ae.sparsity[iStack], ae.weight_decay))
for n in ae.nFeatures[:(iStack+1)]:
path += '-%d'%n
return path
# In[ ]:
noisyPath = '/home/data0/dufan/CT_images/quater_dose_image/'
normalPath = '/home/data0/dufan/CT_images/full_dose_image/'
normalSet = ['L067', 'L096', 'L109', 'L192', 'L506']
for i in range(len(normalSet)):
normalSet[i] = os.path.join(normalPath, normalSet[i])
# In[ ]:
samplePath = '../train/sample/%dx%d-xy/'%(imgshape[0], imgshape[1])
nFiles = 10
PatchSample.GenerateTrainingPatchesFromDicomSeq(samplePath, nFiles, 10, 10000, imgshape, normalSet)
# In[ ]:
patches = list()
for iFile in range(nFiles):
patches.append(PatchSample.RetrieveTrainingPatches(samplePath, iFile))
# In[ ]:
imgshape = [16,16,1]
nFeatures = [1024,1024,1024]
sparsity = [5,5,5]
weight_decay = 0.1
nEpoches = 30
batchsize = 100
ae = SSAE.StackedSparseAutoEncoder(imgshape, nFeatures, sparsity, weight_decay)
# In[ ]:
lastPath = ''
for iStack in range(len(nFeatureMaps)):
tf.reset_default_graph()
ae.BuildStackedAutoEncoder(iStack)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
trainer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(ae.loss_current, var_list=ae.vars_upmost)
saver = tf.train.Saver(max_to_keep=1000)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list='3')))
tf.global_variables_initializer().run(session=sess)
if lastPath != "":
var_list = [v for v in ae.vars_encoder + ae.vars_decoder if v not in ae.vars_upmost]
loader = tf.train.Saver(var_list = var_list)
loader.restore(sess, os.path.join(lastPath, '%d'%(nEpoches-1)))
# training
np.random.seed(0)
lastPath = MakePath(ae, iStack, basePath='../train/SSAE/1024x3/')
if not os.path.exists(lastPath):
os.makedirs(lastPath)
for epoch in range(nEpoches):
indFile = range(len(patches))
np.random.shuffle(indFile)
iIter = 0
for iFile in indFile:
normal_imgs = patches[iFile]
for i in range(0, normal_imgs.shape[0], batchsize):
normal_batch = normal_imgs[i:i+batchsize,...]
_, loss_train, loss_s, loss_w, loss_img = sess.run([trainer, ae.loss_upmost, ae.loss_sparse, ae.loss_weight, ae.loss_img],
feed_dict={ae.input_data: normal_batch})
iIter += 1
if iIter % 100 == 0:
sys.__stdout__.write('Stack: %d, Epoch: %d, Iteration: %d, loss = (%f, %f, %f, %f)\n' %(iStack, epoch, iIter, loss_train, loss_s, loss_w, loss_img))
[decode] = sess.run([ae.decode_datas[-1]], feed_dict = {ae.input_data: normal_batch})
display.clear_output()
plt.figure(figsize=[15,6])
for i in range(5):
plt.subplot(2, 5, i+1); plt.imshow(normal_batch[i,...,0], 'Greys_r', vmin=-160/500.0, vmax=240/500.0)
plt.subplot(2, 5, i+6); plt.imshow(decode[i,...,0], 'Greys_r', vmin=-160/500.0, vmax=240/500.0)
plt.show()
saver.save(sess, os.path.join(lastPath, '%d'%epoch))
# In[ ]: | 0.355439 | 0.229018 |
def matrix_product(p):
"""Return m and s.
m[i][j] is the minimum number of scalar multiplications needed to compute the
product of matrices A(i), A(i + 1), ..., A(j).
s[i][j] is the index of the matrix after which the product is split in an
optimal parenthesization of the matrix product.
p[0... n] is a list such that matrix A(i) has dimensions p[i - 1] x p[i].
"""
length = len(p) # len(p) = number of matrices + 1
# m[i][j] is the minimum number of multiplications needed to compute the
# product of matrices A(i), A(i+1), ..., A(j)
# s[i][j] is the matrix after which the product is split in the minimum
# number of multiplications needed
m = [[-1]*length for _ in range(length)]
s = [[-1]*length for _ in range(length)]
matrix_product_helper(p, 1, length - 1, m, s)
return m, s
def matrix_product_helper(p, start, end, m, s):
"""Return minimum number of scalar multiplications needed to compute the
product of matrices A(start), A(start + 1), ..., A(end).
The minimum number of scalar multiplications needed to compute the
product of matrices A(i), A(i + 1), ..., A(j) is stored in m[i][j].
The index of the matrix after which the above product is split in an optimal
parenthesization is stored in s[i][j].
p[0... n] is a list such that matrix A(i) has dimensions p[i - 1] x p[i].
"""
if m[start][end] >= 0:
return m[start][end]
if start == end:
q = 0
else:
q = float('inf')
for k in range(start, end):
temp = matrix_product_helper(p, start, k, m, s) \
+ matrix_product_helper(p, k + 1, end, m, s) \
+ p[start - 1]*p[k]*p[end]
if q > temp:
q = temp
s[start][end] = k
m[start][end] = q
return q
def print_parenthesization(s, start, end):
"""Print the optimal parenthesization of the matrix product A(start) x
A(start + 1) x ... x A(end).
s[i][j] is the index of the matrix after which the product is split in an
optimal parenthesization of the matrix product.
"""
if start == end:
print('A[{}]'.format(start), end='')
return
k = s[start][end]
print('(', end='')
print_parenthesization(s, start, k)
print_parenthesization(s, k + 1, end)
print(')', end='')
n = int(input('Enter number of matrices: '))
p = []
for i in range(n):
temp = int(input('Enter number of rows in matrix {}: '.format(i + 1)))
p.append(temp)
temp = int(input('Enter number of columns in matrix {}: '.format(n)))
p.append(temp)
m, s = matrix_product(p)
print('The number of scalar multiplications needed:', m[1][n])
print('Optimal parenthesization: ', end='')
print_parenthesization(s, 1, n) | interview/matrix_chain_multiplication.py | def matrix_product(p):
"""Return m and s.
m[i][j] is the minimum number of scalar multiplications needed to compute the
product of matrices A(i), A(i + 1), ..., A(j).
s[i][j] is the index of the matrix after which the product is split in an
optimal parenthesization of the matrix product.
p[0... n] is a list such that matrix A(i) has dimensions p[i - 1] x p[i].
"""
length = len(p) # len(p) = number of matrices + 1
# m[i][j] is the minimum number of multiplications needed to compute the
# product of matrices A(i), A(i+1), ..., A(j)
# s[i][j] is the matrix after which the product is split in the minimum
# number of multiplications needed
m = [[-1]*length for _ in range(length)]
s = [[-1]*length for _ in range(length)]
matrix_product_helper(p, 1, length - 1, m, s)
return m, s
def matrix_product_helper(p, start, end, m, s):
"""Return minimum number of scalar multiplications needed to compute the
product of matrices A(start), A(start + 1), ..., A(end).
The minimum number of scalar multiplications needed to compute the
product of matrices A(i), A(i + 1), ..., A(j) is stored in m[i][j].
The index of the matrix after which the above product is split in an optimal
parenthesization is stored in s[i][j].
p[0... n] is a list such that matrix A(i) has dimensions p[i - 1] x p[i].
"""
if m[start][end] >= 0:
return m[start][end]
if start == end:
q = 0
else:
q = float('inf')
for k in range(start, end):
temp = matrix_product_helper(p, start, k, m, s) \
+ matrix_product_helper(p, k + 1, end, m, s) \
+ p[start - 1]*p[k]*p[end]
if q > temp:
q = temp
s[start][end] = k
m[start][end] = q
return q
def print_parenthesization(s, start, end):
"""Print the optimal parenthesization of the matrix product A(start) x
A(start + 1) x ... x A(end).
s[i][j] is the index of the matrix after which the product is split in an
optimal parenthesization of the matrix product.
"""
if start == end:
print('A[{}]'.format(start), end='')
return
k = s[start][end]
print('(', end='')
print_parenthesization(s, start, k)
print_parenthesization(s, k + 1, end)
print(')', end='')
n = int(input('Enter number of matrices: '))
p = []
for i in range(n):
temp = int(input('Enter number of rows in matrix {}: '.format(i + 1)))
p.append(temp)
temp = int(input('Enter number of columns in matrix {}: '.format(n)))
p.append(temp)
m, s = matrix_product(p)
print('The number of scalar multiplications needed:', m[1][n])
print('Optimal parenthesization: ', end='')
print_parenthesization(s, 1, n) | 0.677794 | 0.760517 |
import argparse
import collections
import csv
import functools
import logging
import operator
import sys
def process(fh, op, delimiter, join_string=' + '):
'''
apply operation and write to dest
'''
logging.info('reading from stdin...')
out = csv.writer(sys.stdout, delimiter=delimiter)
out = csv.DictWriter(sys.stdout, delimiter=delimiter, fieldnames=fh.fieldnames)
out.writeheader()
out_rows = {}
for idx, row in enumerate(fh):
ops = {}
for o in op:
name, value = o.split('=')
ops[name] = value
key = []
for field in fh.fieldnames:
if field not in ops:
key.append(row[field])
key = tuple(key)
if key not in out_rows:
out_rows[key] = {}
for field in fh.fieldnames:
if field in ops:
if ops[field] == 'sum':
if field not in out_rows[key]:
out_rows[key][field] = 0.0
out_rows[key][field] += float(row[field])
elif ops[field] == 'sumint':
if field not in out_rows[key]:
out_rows[key][field] = 0
out_rows[key][field] += int(row[field])
elif ops[field] == 'count':
if field not in out_rows[key]:
out_rows[key][field] = 0
out_rows[key][field] += 1
elif ops[field] == 'join':
if field not in out_rows[key]:
out_rows[key][field] = row[field]
else:
out_rows[key][field] = join_string.join([out_rows[key][field], row[field]])
elif ops[field] == 'min':
if field not in out_rows[key]:
out_rows[key][field] = row[field]
else:
out_rows[key][field] = min([out_rows[key][field], row[field]])
elif ops[field] == 'max':
if field not in out_rows[key]:
out_rows[key][field] = row[field]
else:
out_rows[key][field] = max([out_rows[key][field], row[field]])
else:
logging.warn('unrecognised operation %s', ops[field])
else:
out_rows[key][field] = row[field]
for key in out_rows:
out.writerow(out_rows[key])
logging.info('done')
def main():
'''
parse command line arguments
'''
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Filter CSV based on values')
parser.add_argument('--op', required=True, nargs='+', help='colname=[join|sum|sumint|count|min|max] ...')
parser.add_argument('--join_string', required=False, default=' + ', help='join delimiter')
parser.add_argument('--delimiter', default=',', help='csv delimiter')
args = parser.parse_args()
process(csv.DictReader(sys.stdin, delimiter=args.delimiter), args.op, args.delimiter, args.join_string)
if __name__ == '__main__':
main() | csvtools/csvgroup.py | import argparse
import collections
import csv
import functools
import logging
import operator
import sys
def process(fh, op, delimiter, join_string=' + '):
'''
apply operation and write to dest
'''
logging.info('reading from stdin...')
out = csv.writer(sys.stdout, delimiter=delimiter)
out = csv.DictWriter(sys.stdout, delimiter=delimiter, fieldnames=fh.fieldnames)
out.writeheader()
out_rows = {}
for idx, row in enumerate(fh):
ops = {}
for o in op:
name, value = o.split('=')
ops[name] = value
key = []
for field in fh.fieldnames:
if field not in ops:
key.append(row[field])
key = tuple(key)
if key not in out_rows:
out_rows[key] = {}
for field in fh.fieldnames:
if field in ops:
if ops[field] == 'sum':
if field not in out_rows[key]:
out_rows[key][field] = 0.0
out_rows[key][field] += float(row[field])
elif ops[field] == 'sumint':
if field not in out_rows[key]:
out_rows[key][field] = 0
out_rows[key][field] += int(row[field])
elif ops[field] == 'count':
if field not in out_rows[key]:
out_rows[key][field] = 0
out_rows[key][field] += 1
elif ops[field] == 'join':
if field not in out_rows[key]:
out_rows[key][field] = row[field]
else:
out_rows[key][field] = join_string.join([out_rows[key][field], row[field]])
elif ops[field] == 'min':
if field not in out_rows[key]:
out_rows[key][field] = row[field]
else:
out_rows[key][field] = min([out_rows[key][field], row[field]])
elif ops[field] == 'max':
if field not in out_rows[key]:
out_rows[key][field] = row[field]
else:
out_rows[key][field] = max([out_rows[key][field], row[field]])
else:
logging.warn('unrecognised operation %s', ops[field])
else:
out_rows[key][field] = row[field]
for key in out_rows:
out.writerow(out_rows[key])
logging.info('done')
def main():
'''
parse command line arguments
'''
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Filter CSV based on values')
parser.add_argument('--op', required=True, nargs='+', help='colname=[join|sum|sumint|count|min|max] ...')
parser.add_argument('--join_string', required=False, default=' + ', help='join delimiter')
parser.add_argument('--delimiter', default=',', help='csv delimiter')
args = parser.parse_args()
process(csv.DictReader(sys.stdin, delimiter=args.delimiter), args.op, args.delimiter, args.join_string)
if __name__ == '__main__':
main() | 0.127327 | 0.125039 |
import abc
from datetime import datetime
from typing import Optional
from django.utils.crypto import get_random_string
from django.utils.text import slugify
import attr
from werkzeug.urls import url_fix
from url_mangler.apps.url_mapper.models import UrlMapping
@attr.s(auto_attribs=True)
class SlugMapping:
"""UrlMapping Model abstraction"""
slug: str
destination_url: str
created_at: datetime
class SlugMappingRepo(abc.ABC):
"""Abstract Repository for SlugMappings. This allows for trivial replacement of the database for testing."""
@classmethod
@abc.abstractmethod
def get(cls, slug: str) -> Optional[SlugMapping]:
raise NotImplementedError
@classmethod
@abc.abstractmethod
def save(cls, destination_mapping: str) -> SlugMapping:
raise NotImplementedError
@classmethod
@abc.abstractmethod
def generate_slug(cls, destination_mapping: str) -> str:
raise NotImplementedError
class DjangoSlugMappingRepo(SlugMappingRepo):
"""Django implementation for returning SlugMappings for UrlMappings and saving SlugMappings as UrlMappings."""
@classmethod
def get(cls, slug: str) -> Optional[SlugMapping]:
"""Return a SlugMapping for a UrlMapping based on a slug."""
try:
mapping: UrlMapping = UrlMapping.objects.get(slug=slug)
return SlugMapping(
slug=slug,
destination_url=mapping.destination_url,
created_at=mapping.created_at,
)
except UrlMapping.DoesNotExist:
return None
@classmethod
def save(cls, destination_mapping: str) -> SlugMapping:
"""Save a destination mapping as a UrlMapping and return the SlugMapping."""
record = UrlMapping(
slug=cls.generate_slug(destination_mapping=destination_mapping),
destination_url=url_fix(destination_mapping),
)
record.save()
return SlugMapping(
slug=record.slug,
destination_url=record.destination_url,
created_at=record.created_at,
)
@classmethod
def generate_slug(cls, destination_mapping: str) -> str:
"""Generate a slug for a destination mapping."""
while 1:
slug = slugify(get_random_string(length=12).lower())
if not UrlMapping.objects.filter(slug=slug).exists():
break
return slug
class SlugMappingBaseUseCase:
"""Overly abstracted Base Use Case to interact with SlugMappings"""
def __init__(self, slug_mapping_repo: SlugMappingRepo = DjangoSlugMappingRepo()):
self._slug_mapping_repo = slug_mapping_repo
class RetrieveSlugMappingUseCase(SlugMappingBaseUseCase):
"""Given an arbitrary slug, retrieve the SlugMapping"""
def retrieve(self, slug: str) -> Optional[SlugMapping]:
return self._slug_mapping_repo.get(slug=slug)
class GenerateAndSaveSlugMappingUseCase(SlugMappingBaseUseCase):
"""Given an arbitrary destination mapping, generate, save, and return a SlugMapping"""
def save(self, destination_mapping: str) -> SlugMapping:
return self._slug_mapping_repo.save(destination_mapping=destination_mapping) | url_mangler/apps/url_mapper/uses.py | import abc
from datetime import datetime
from typing import Optional
from django.utils.crypto import get_random_string
from django.utils.text import slugify
import attr
from werkzeug.urls import url_fix
from url_mangler.apps.url_mapper.models import UrlMapping
@attr.s(auto_attribs=True)
class SlugMapping:
"""UrlMapping Model abstraction"""
slug: str
destination_url: str
created_at: datetime
class SlugMappingRepo(abc.ABC):
"""Abstract Repository for SlugMappings. This allows for trivial replacement of the database for testing."""
@classmethod
@abc.abstractmethod
def get(cls, slug: str) -> Optional[SlugMapping]:
raise NotImplementedError
@classmethod
@abc.abstractmethod
def save(cls, destination_mapping: str) -> SlugMapping:
raise NotImplementedError
@classmethod
@abc.abstractmethod
def generate_slug(cls, destination_mapping: str) -> str:
raise NotImplementedError
class DjangoSlugMappingRepo(SlugMappingRepo):
"""Django implementation for returning SlugMappings for UrlMappings and saving SlugMappings as UrlMappings."""
@classmethod
def get(cls, slug: str) -> Optional[SlugMapping]:
"""Return a SlugMapping for a UrlMapping based on a slug."""
try:
mapping: UrlMapping = UrlMapping.objects.get(slug=slug)
return SlugMapping(
slug=slug,
destination_url=mapping.destination_url,
created_at=mapping.created_at,
)
except UrlMapping.DoesNotExist:
return None
@classmethod
def save(cls, destination_mapping: str) -> SlugMapping:
"""Save a destination mapping as a UrlMapping and return the SlugMapping."""
record = UrlMapping(
slug=cls.generate_slug(destination_mapping=destination_mapping),
destination_url=url_fix(destination_mapping),
)
record.save()
return SlugMapping(
slug=record.slug,
destination_url=record.destination_url,
created_at=record.created_at,
)
@classmethod
def generate_slug(cls, destination_mapping: str) -> str:
"""Generate a slug for a destination mapping."""
while 1:
slug = slugify(get_random_string(length=12).lower())
if not UrlMapping.objects.filter(slug=slug).exists():
break
return slug
class SlugMappingBaseUseCase:
"""Overly abstracted Base Use Case to interact with SlugMappings"""
def __init__(self, slug_mapping_repo: SlugMappingRepo = DjangoSlugMappingRepo()):
self._slug_mapping_repo = slug_mapping_repo
class RetrieveSlugMappingUseCase(SlugMappingBaseUseCase):
"""Given an arbitrary slug, retrieve the SlugMapping"""
def retrieve(self, slug: str) -> Optional[SlugMapping]:
return self._slug_mapping_repo.get(slug=slug)
class GenerateAndSaveSlugMappingUseCase(SlugMappingBaseUseCase):
"""Given an arbitrary destination mapping, generate, save, and return a SlugMapping"""
def save(self, destination_mapping: str) -> SlugMapping:
return self._slug_mapping_repo.save(destination_mapping=destination_mapping) | 0.912486 | 0.189128 |
import sqlite3
def create_database(mops_directory):
"""connects to a database if the database exists. If a database does not exist at that
location then it creates it. Database name is fixed as mops.db
"""
cxn = sqlite3.connect(mops_directory + 'mops.db')
c = cxn.cursor()
#--------------------------------------------------------------------------------------------
#determine whether the database needs to be seeing whether a link succeeds
try:
sql = 'select id from railroad'
c.execute(sql, '')
c.close()
except:
sql = '''create table locotype (id integer primary key autoincrement,
locotype text,
name text,
power_type text,
haulage integer,
fuel_capacity integer,
fuel_rate integer,
maint_interval integer,
works_time integer,
weight integer,
length integer,
oper_mode text)'''
c.execute(sql)
sql = '''create table locomotive (id integer primary key autoincrement,
loco text,
locotype text,
fuel integer,
weight integer,
time_to_maint integer,
time_in_maint integer,
is_powered text,
railroad text,
home_station text,
station text,
place_id integer,
train text)'''
c.execute(sql)
sql = '''create table railroad (id integer primary key autoincrement,
railroad text,
name text)'''
c.execute(sql)
sql = '''create table loading (id integer primary key autoincrement,
loading text,
desc text,
can_load text,
can_unload text)'''
c.execute(sql)
sql = '''create table carclass (id integer primary key autoincrement,
carclass text,
name text)'''
c.execute(sql)
sql = '''create table cartype (id integer primary key autoincrement,
cartype text,
name text,
length integer,
oper_mode text,
capacity integer,
unladen_weight integer,
loading text,
unloading text,
carclass text)'''
c.execute(sql)
sql = '''create table commodity (id integer primary key autoincrement,
commodity text,
name text,
loading text,
loading_rate integer,
unloading_rate integer,
clean_cars text)'''
c.execute(sql)
sql = '''create table area (id integer primary key autoincrement,
area text,
name text,
railroad text)'''
c.execute(sql)
sql = '''create table stationtype (id integer primary key autoincrement,
stationtype text,
desc text)'''
c.execute(sql)
sql = '''create table car (id integer primary key autoincrement,
car text,
cartype text,
time_to_maint integer,
time_in_maint integer,
carclass text,
railroad text,
commodity text,
home_station text,
station text,
place_id integer,
train text,
block text,
weight_loaded integer,
is_attached_set text,
clean_dirty text,
carorder integer)'''
c.execute(sql)
sql = '''create table station (id integer primary key autoincrement,
station text,
short_name text,
long_name text,
area text,
stationtype text,
alias text)'''
c.execute(sql)
sql = '''create table place (id integer primary key autoincrement,
name text,
station text,
code text,
track_length integer,
industry text,
place_type text,
loading text,
unloading text,
car_id text)'''
c.execute(sql)
sql = '''create table route (id integer primary key autoincrement,
route text,
name text,
status text,
default_direction text)'''
c.execute(sql)
sql = '''create table section (id integer primary key autoincrement,
route text,
section integer,
depart_station text,
arrive_station text)'''
c.execute(sql)
sql = '''create table schedule (id integer primary key autoincrement,
schedule text,
route text,
name text,
class text,
status text,
run_days text,
orig_station text,
dest_station text,
direction text)'''
c.execute(sql)
sql = '''create table timings (id integer primary key autoincrement,
section text,
schedule text,
depart_station text,
arrive_station text,
planned_depart text,
planned_arrive text)'''
c.execute(sql)
sql = '''create table instructions(id integer primary key autoincrement,
route text,
schedule text,
station text,
instruction text)'''
c.execute(sql)
sql = '''create table train (id integer primary key autoincrement,
train text,
type text,
station text,
status text,
schedule text)'''
c.execute(sql)
sql = '''create table running (id integer primary key autoincrement,
train text,
timings integer,
depart_station text,
arrive_station text,
est_depart text,
est_arrive text,
act_depart text,
act_arrive text)'''
c.execute(sql)
sql = '''create table warehouse (id integer primary key autoincrement,
industry text,
commodity text,
destination text,
production integer,
threshold_goods integer,
threshold_cars integer,
threshold_class integer,
max_storage integer,
in_storage integer,
ordered integer,
routing text)'''
c.execute(sql)
sql = '''create table routing (id integer primary key autoincrement,
routing text,
desc text)'''
c.execute(sql)
sql = '''create table waybill (id integer primary key autoincrement,
warehouse integer,
type text,
requires text,
clean_cars text,
loading text,
unloading text,
commodity text,
origin text,
destination text,
status text,
timestamp text)'''
c.execute(sql)
sql = '''create table flash (id integer primary key autoincrement,
flash integer,
message text,
user text,
timer integer)'''
c.execute(sql)
sql = '''create table user (id integer primary key autoincrement,
user text,
name text,
passcode text,
user_type text,
is_signed_on text,
has_access_disabled text,
get_new_password text)'''
c.execute(sql)
sql = '''create table calendar (id integer primary key autoincrement,
day text,
month text,
year text,
holiday text,
current text,
dow text)'''
c.execute(sql)
sql = '''create table parameter (id integer primary key autoincrement,
name text,
value text)'''
c.execute(sql)
cxn.commit()
print('MOPS DATABASE CREATED')
cxn.close()
return
def create_calendar(mops_directory):
"""creates a calendar from 1 January 1950 to 31 december 2049. calendar is created in
five year chunks - avoids making the database too big
"""
s_start_year = raw_input('Enter start year 1950/1955/1960/../2010 ==> ')
start_year = int(s_start_year)
t = (start_year,)
sql = 'select id from calendar where year = ?'
cxn = sqlite3.connect(mops_directory + 'mops.db')
c = cxn.cursor()
c.execute(sql, t)
array = c.fetchall()
c.close
cxn.close
if len(array) != 0:
print('Calendar already created on database for this time period')
return
yearcount = 0
gen_julian = 1
leap_year = False
gen_mmm = 'JAN'
gen_dd = 1
gen_yyyy = start_year
if start_year == 1950:
gen_dow = 'SUN'
elif start_year == 1955:
gen_dow = 'SAT'
elif start_year == 1960:
gen_dow = 'FRI'
elif start_year == 1965:
gen_dow = 'FRI'
elif start_year == 1970:
gen_dow = 'THU'
elif start_year == 1975:
gen_dow = 'WED'
elif start_year == 1980:
gen_dow = 'TUE'
elif start_year == 1985:
gen_dow = 'TUE'
elif start_year == 1990:
gen_dow = 'MON'
elif start_year == 1995:
gen_dow = 'SUN'
elif start_year == 2000:
gen_dow = 'SUN'
elif start_year == 2005:
gen_dow = 'SAT'
elif start_year == 2010:
gen_dow = 'FRI'
else:
gen_dow = 'THU'
while yearcount < 5:
sgen_dd = str(gen_dd)
sgen_dd = sgen_dd.rjust(2,'0')
t = (sgen_dd, gen_mmm, str(gen_yyyy), '', '', gen_dow)
sql = 'insert into calendar values (null, ?, ?, ?, ?, ?, ?)'
cxn = sqlite3.connect(mops_directory + 'mops.db')
c = cxn.cursor()
c.execute(sql, t)
cxn.commit()
c.close
cxn.close
if gen_yyyy % 400 == 0:
leap_year = True
elif gen_yyyy % 100 == 0:
leap_year = False
elif gen_yyyy % 4 == 0:
leap_year = True
else:
leap_year = False
gen_dd = gen_dd + 1
gen_julian = gen_julian + 1
if gen_mmm == 'JAN' and gen_dd > 31:
gen_mmm = 'FEB'
gen_dd = 1
elif gen_mmm == 'FEB' and gen_dd > 28 and not leap_year:
gen_mmm = 'MAR'
gen_dd = 1
elif gen_mmm == 'FEB' and gen_dd > 29 and leap_year:
gen_mmm = 'MAR'
gen_dd = 1
elif gen_mmm == 'MAR' and gen_dd > 31:
gen_mmm = 'APR'
gen_dd = 1
elif gen_mmm == 'APR' and gen_dd > 30:
gen_mmm = 'MAY'
gen_dd = 1
elif gen_mmm == 'MAY' and gen_dd > 31:
gen_mmm = 'JUN'
gen_dd = 1
elif gen_mmm == 'JUN' and gen_dd > 30:
gen_mmm = 'JUL'
gen_dd = 1
elif gen_mmm == 'JUL' and gen_dd > 31:
gen_mmm = 'AUG'
gen_dd = 1
elif gen_mmm == 'AUG' and gen_dd > 31:
gen_mmm = 'SEP'
gen_dd = 1
elif gen_mmm == 'SEP' and gen_dd > 30:
gen_mmm = 'OCT'
gen_dd = 1
elif gen_mmm == 'OCT' and gen_dd > 31:
gen_mmm = 'NOV'
gen_dd = 1
elif gen_mmm == 'NOV' and gen_dd > 30:
gen_mmm = 'DEC'
gen_dd = 1
elif gen_mmm == 'DEC' and gen_dd > 31:
gen_mmm = 'JAN'
gen_dd = 1
print('CALENDAR GENERATED FOR ' + str(gen_yyyy))
gen_yyyy = gen_yyyy + 1
gen_julian = 1
yearcount = yearcount + 1
else:
pass
if gen_dow == 'SUN':
gen_dow = 'MON'
elif gen_dow == 'MON':
gen_dow = 'TUE'
elif gen_dow == 'TUE':
gen_dow = 'WED'
elif gen_dow == 'WED':
gen_dow = 'THU'
elif gen_dow == 'THU':
gen_dow = 'FRI'
elif gen_dow == 'FRI':
gen_dow = 'SAT'
elif gen_dow == 'SAT':
gen_dow = 'SUN'
return | trunk/MOPS_Database.py | import sqlite3
def create_database(mops_directory):
"""connects to a database if the database exists. If a database does not exist at that
location then it creates it. Database name is fixed as mops.db
"""
cxn = sqlite3.connect(mops_directory + 'mops.db')
c = cxn.cursor()
#--------------------------------------------------------------------------------------------
#determine whether the database needs to be seeing whether a link succeeds
try:
sql = 'select id from railroad'
c.execute(sql, '')
c.close()
except:
sql = '''create table locotype (id integer primary key autoincrement,
locotype text,
name text,
power_type text,
haulage integer,
fuel_capacity integer,
fuel_rate integer,
maint_interval integer,
works_time integer,
weight integer,
length integer,
oper_mode text)'''
c.execute(sql)
sql = '''create table locomotive (id integer primary key autoincrement,
loco text,
locotype text,
fuel integer,
weight integer,
time_to_maint integer,
time_in_maint integer,
is_powered text,
railroad text,
home_station text,
station text,
place_id integer,
train text)'''
c.execute(sql)
sql = '''create table railroad (id integer primary key autoincrement,
railroad text,
name text)'''
c.execute(sql)
sql = '''create table loading (id integer primary key autoincrement,
loading text,
desc text,
can_load text,
can_unload text)'''
c.execute(sql)
sql = '''create table carclass (id integer primary key autoincrement,
carclass text,
name text)'''
c.execute(sql)
sql = '''create table cartype (id integer primary key autoincrement,
cartype text,
name text,
length integer,
oper_mode text,
capacity integer,
unladen_weight integer,
loading text,
unloading text,
carclass text)'''
c.execute(sql)
sql = '''create table commodity (id integer primary key autoincrement,
commodity text,
name text,
loading text,
loading_rate integer,
unloading_rate integer,
clean_cars text)'''
c.execute(sql)
sql = '''create table area (id integer primary key autoincrement,
area text,
name text,
railroad text)'''
c.execute(sql)
sql = '''create table stationtype (id integer primary key autoincrement,
stationtype text,
desc text)'''
c.execute(sql)
sql = '''create table car (id integer primary key autoincrement,
car text,
cartype text,
time_to_maint integer,
time_in_maint integer,
carclass text,
railroad text,
commodity text,
home_station text,
station text,
place_id integer,
train text,
block text,
weight_loaded integer,
is_attached_set text,
clean_dirty text,
carorder integer)'''
c.execute(sql)
sql = '''create table station (id integer primary key autoincrement,
station text,
short_name text,
long_name text,
area text,
stationtype text,
alias text)'''
c.execute(sql)
sql = '''create table place (id integer primary key autoincrement,
name text,
station text,
code text,
track_length integer,
industry text,
place_type text,
loading text,
unloading text,
car_id text)'''
c.execute(sql)
sql = '''create table route (id integer primary key autoincrement,
route text,
name text,
status text,
default_direction text)'''
c.execute(sql)
sql = '''create table section (id integer primary key autoincrement,
route text,
section integer,
depart_station text,
arrive_station text)'''
c.execute(sql)
sql = '''create table schedule (id integer primary key autoincrement,
schedule text,
route text,
name text,
class text,
status text,
run_days text,
orig_station text,
dest_station text,
direction text)'''
c.execute(sql)
sql = '''create table timings (id integer primary key autoincrement,
section text,
schedule text,
depart_station text,
arrive_station text,
planned_depart text,
planned_arrive text)'''
c.execute(sql)
sql = '''create table instructions(id integer primary key autoincrement,
route text,
schedule text,
station text,
instruction text)'''
c.execute(sql)
sql = '''create table train (id integer primary key autoincrement,
train text,
type text,
station text,
status text,
schedule text)'''
c.execute(sql)
sql = '''create table running (id integer primary key autoincrement,
train text,
timings integer,
depart_station text,
arrive_station text,
est_depart text,
est_arrive text,
act_depart text,
act_arrive text)'''
c.execute(sql)
sql = '''create table warehouse (id integer primary key autoincrement,
industry text,
commodity text,
destination text,
production integer,
threshold_goods integer,
threshold_cars integer,
threshold_class integer,
max_storage integer,
in_storage integer,
ordered integer,
routing text)'''
c.execute(sql)
sql = '''create table routing (id integer primary key autoincrement,
routing text,
desc text)'''
c.execute(sql)
sql = '''create table waybill (id integer primary key autoincrement,
warehouse integer,
type text,
requires text,
clean_cars text,
loading text,
unloading text,
commodity text,
origin text,
destination text,
status text,
timestamp text)'''
c.execute(sql)
sql = '''create table flash (id integer primary key autoincrement,
flash integer,
message text,
user text,
timer integer)'''
c.execute(sql)
sql = '''create table user (id integer primary key autoincrement,
user text,
name text,
passcode text,
user_type text,
is_signed_on text,
has_access_disabled text,
get_new_password text)'''
c.execute(sql)
sql = '''create table calendar (id integer primary key autoincrement,
day text,
month text,
year text,
holiday text,
current text,
dow text)'''
c.execute(sql)
sql = '''create table parameter (id integer primary key autoincrement,
name text,
value text)'''
c.execute(sql)
cxn.commit()
print('MOPS DATABASE CREATED')
cxn.close()
return
def create_calendar(mops_directory):
"""creates a calendar from 1 January 1950 to 31 december 2049. calendar is created in
five year chunks - avoids making the database too big
"""
s_start_year = raw_input('Enter start year 1950/1955/1960/../2010 ==> ')
start_year = int(s_start_year)
t = (start_year,)
sql = 'select id from calendar where year = ?'
cxn = sqlite3.connect(mops_directory + 'mops.db')
c = cxn.cursor()
c.execute(sql, t)
array = c.fetchall()
c.close
cxn.close
if len(array) != 0:
print('Calendar already created on database for this time period')
return
yearcount = 0
gen_julian = 1
leap_year = False
gen_mmm = 'JAN'
gen_dd = 1
gen_yyyy = start_year
if start_year == 1950:
gen_dow = 'SUN'
elif start_year == 1955:
gen_dow = 'SAT'
elif start_year == 1960:
gen_dow = 'FRI'
elif start_year == 1965:
gen_dow = 'FRI'
elif start_year == 1970:
gen_dow = 'THU'
elif start_year == 1975:
gen_dow = 'WED'
elif start_year == 1980:
gen_dow = 'TUE'
elif start_year == 1985:
gen_dow = 'TUE'
elif start_year == 1990:
gen_dow = 'MON'
elif start_year == 1995:
gen_dow = 'SUN'
elif start_year == 2000:
gen_dow = 'SUN'
elif start_year == 2005:
gen_dow = 'SAT'
elif start_year == 2010:
gen_dow = 'FRI'
else:
gen_dow = 'THU'
while yearcount < 5:
sgen_dd = str(gen_dd)
sgen_dd = sgen_dd.rjust(2,'0')
t = (sgen_dd, gen_mmm, str(gen_yyyy), '', '', gen_dow)
sql = 'insert into calendar values (null, ?, ?, ?, ?, ?, ?)'
cxn = sqlite3.connect(mops_directory + 'mops.db')
c = cxn.cursor()
c.execute(sql, t)
cxn.commit()
c.close
cxn.close
if gen_yyyy % 400 == 0:
leap_year = True
elif gen_yyyy % 100 == 0:
leap_year = False
elif gen_yyyy % 4 == 0:
leap_year = True
else:
leap_year = False
gen_dd = gen_dd + 1
gen_julian = gen_julian + 1
if gen_mmm == 'JAN' and gen_dd > 31:
gen_mmm = 'FEB'
gen_dd = 1
elif gen_mmm == 'FEB' and gen_dd > 28 and not leap_year:
gen_mmm = 'MAR'
gen_dd = 1
elif gen_mmm == 'FEB' and gen_dd > 29 and leap_year:
gen_mmm = 'MAR'
gen_dd = 1
elif gen_mmm == 'MAR' and gen_dd > 31:
gen_mmm = 'APR'
gen_dd = 1
elif gen_mmm == 'APR' and gen_dd > 30:
gen_mmm = 'MAY'
gen_dd = 1
elif gen_mmm == 'MAY' and gen_dd > 31:
gen_mmm = 'JUN'
gen_dd = 1
elif gen_mmm == 'JUN' and gen_dd > 30:
gen_mmm = 'JUL'
gen_dd = 1
elif gen_mmm == 'JUL' and gen_dd > 31:
gen_mmm = 'AUG'
gen_dd = 1
elif gen_mmm == 'AUG' and gen_dd > 31:
gen_mmm = 'SEP'
gen_dd = 1
elif gen_mmm == 'SEP' and gen_dd > 30:
gen_mmm = 'OCT'
gen_dd = 1
elif gen_mmm == 'OCT' and gen_dd > 31:
gen_mmm = 'NOV'
gen_dd = 1
elif gen_mmm == 'NOV' and gen_dd > 30:
gen_mmm = 'DEC'
gen_dd = 1
elif gen_mmm == 'DEC' and gen_dd > 31:
gen_mmm = 'JAN'
gen_dd = 1
print('CALENDAR GENERATED FOR ' + str(gen_yyyy))
gen_yyyy = gen_yyyy + 1
gen_julian = 1
yearcount = yearcount + 1
else:
pass
if gen_dow == 'SUN':
gen_dow = 'MON'
elif gen_dow == 'MON':
gen_dow = 'TUE'
elif gen_dow == 'TUE':
gen_dow = 'WED'
elif gen_dow == 'WED':
gen_dow = 'THU'
elif gen_dow == 'THU':
gen_dow = 'FRI'
elif gen_dow == 'FRI':
gen_dow = 'SAT'
elif gen_dow == 'SAT':
gen_dow = 'SUN'
return | 0.274546 | 0.091585 |
from django.db import models
from django.db.models import OuterRef, Subquery, QuerySet
from django_filters import rest_framework as filters
from .models import Rate
class RateFilter(filters.FilterSet):
"""
Rate object filter
"""
user = filters.BooleanFilter(
label="filter rate associated to connected user",
method='user_filter')
key = filters.CharFilter(
label="filter rates with key",
method='key_filter')
key_or_null = filters.CharFilter(
label="filter rates with key or without key",
method='key_or_null_filter')
key_isnull = filters.CharFilter(
label="filter rates without key", method='key_isnull_filter')
value_date = filters.DateFilter(
label="filter rates at a specific date",
field_name='value_date',
lookup_expr='exact')
from_obj = filters.DateFilter(
label="filter rates after a specific date (included)",
field_name='value_date',
lookup_expr='gte')
to_obj = filters.DateFilter(
label="filter rates before a specific date (included)",
field_name='value_date',
lookup_expr='lte')
value = filters.NumberFilter(
label="filter rates with a specific value",
field_name='value',
lookup_expr='exact')
lower_bound = filters.NumberFilter(
label="filter rates with a value higher than the given value",
field_name='value',
lookup_expr='gte')
higher_bound = filters.NumberFilter(
label="filter rates with a value lower than the given value",
field_name='value',
lookup_expr='lte')
currency = filters.CharFilter(
label="filter by target currency",
field_name='currency',
lookup_expr='iexact')
base_currency = filters.CharFilter(
label="filter by base currency",
field_name='base_currency',
lookup_expr='iexact')
currency_latest_values = filters.CharFilter(
label="Only output latest rates for currency",
method='currency_latest_values_filter')
base_currency_latest_values = filters.CharFilter(
label="Only output latest rates for currency",
method='base_currency_latest_values_filter')
ordering = filters.OrderingFilter(
# tuple-mapping retains order
fields=(
('key', 'key'),
('value', 'value'),
('value_date', 'value_date'),
('base_currency', 'base_currency'),
('currency', 'currency'),
),
)
class Meta:
"""
Meta
"""
model = Rate
exclude = ['pk', ]
def user_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on user
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(**{
'user': self.request.user,
})
return queryset.filter(user__isnull=True)
def key_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on key, only filters if request.user is set and authenticated
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(**{
'user': self.request.user,
'key': value
})
return queryset.filter(user__isnull=True)
def key_or_null_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on key if user is authenticated or on records without user
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(
(models.Q(user=self.request.user) & models.Q(key=value)) |
models.Q(key__isnull=True)
)
return queryset.filter(user__isnull=True)
@staticmethod
def key_isnull_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on records without key
"""
return queryset.filter(key__isnull=True)
@staticmethod
def currency_latest_values_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Returns a queryset of latest values fos a currency
"""
queryset = queryset.filter(currency=value)
latest = queryset.filter(
currency=OuterRef('currency')
).order_by('-value_date')
return queryset.annotate(
currency_latest=Subquery(latest.values('value_date')[:1])
).filter(value_date=models.F('currency_latest'))
@staticmethod
def base_currency_latest_values_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Returns a queryset of latest valeus for a base currency
"""
queryset = queryset.filter(base_currency=value)
latest = queryset.filter(
base_currency=OuterRef('base_currency')
).order_by('-value_date')
return queryset.annotate(
base_currency_latest=Subquery(latest.values('value_date')[:1])
).filter(value_date=models.F('base_currency_latest')) | src/geocurrency/rates/filters.py | from django.db import models
from django.db.models import OuterRef, Subquery, QuerySet
from django_filters import rest_framework as filters
from .models import Rate
class RateFilter(filters.FilterSet):
"""
Rate object filter
"""
user = filters.BooleanFilter(
label="filter rate associated to connected user",
method='user_filter')
key = filters.CharFilter(
label="filter rates with key",
method='key_filter')
key_or_null = filters.CharFilter(
label="filter rates with key or without key",
method='key_or_null_filter')
key_isnull = filters.CharFilter(
label="filter rates without key", method='key_isnull_filter')
value_date = filters.DateFilter(
label="filter rates at a specific date",
field_name='value_date',
lookup_expr='exact')
from_obj = filters.DateFilter(
label="filter rates after a specific date (included)",
field_name='value_date',
lookup_expr='gte')
to_obj = filters.DateFilter(
label="filter rates before a specific date (included)",
field_name='value_date',
lookup_expr='lte')
value = filters.NumberFilter(
label="filter rates with a specific value",
field_name='value',
lookup_expr='exact')
lower_bound = filters.NumberFilter(
label="filter rates with a value higher than the given value",
field_name='value',
lookup_expr='gte')
higher_bound = filters.NumberFilter(
label="filter rates with a value lower than the given value",
field_name='value',
lookup_expr='lte')
currency = filters.CharFilter(
label="filter by target currency",
field_name='currency',
lookup_expr='iexact')
base_currency = filters.CharFilter(
label="filter by base currency",
field_name='base_currency',
lookup_expr='iexact')
currency_latest_values = filters.CharFilter(
label="Only output latest rates for currency",
method='currency_latest_values_filter')
base_currency_latest_values = filters.CharFilter(
label="Only output latest rates for currency",
method='base_currency_latest_values_filter')
ordering = filters.OrderingFilter(
# tuple-mapping retains order
fields=(
('key', 'key'),
('value', 'value'),
('value_date', 'value_date'),
('base_currency', 'base_currency'),
('currency', 'currency'),
),
)
class Meta:
"""
Meta
"""
model = Rate
exclude = ['pk', ]
def user_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on user
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(**{
'user': self.request.user,
})
return queryset.filter(user__isnull=True)
def key_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on key, only filters if request.user is set and authenticated
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(**{
'user': self.request.user,
'key': value
})
return queryset.filter(user__isnull=True)
def key_or_null_filter(self, queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on key if user is authenticated or on records without user
"""
if self.request and self.request.user and \
self.request.user.is_authenticated:
return queryset.filter(
(models.Q(user=self.request.user) & models.Q(key=value)) |
models.Q(key__isnull=True)
)
return queryset.filter(user__isnull=True)
@staticmethod
def key_isnull_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Filter on records without key
"""
return queryset.filter(key__isnull=True)
@staticmethod
def currency_latest_values_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Returns a queryset of latest values fos a currency
"""
queryset = queryset.filter(currency=value)
latest = queryset.filter(
currency=OuterRef('currency')
).order_by('-value_date')
return queryset.annotate(
currency_latest=Subquery(latest.values('value_date')[:1])
).filter(value_date=models.F('currency_latest'))
@staticmethod
def base_currency_latest_values_filter(queryset: QuerySet,
name: str, value: str) -> QuerySet:
"""
Returns a queryset of latest valeus for a base currency
"""
queryset = queryset.filter(base_currency=value)
latest = queryset.filter(
base_currency=OuterRef('base_currency')
).order_by('-value_date')
return queryset.annotate(
base_currency_latest=Subquery(latest.values('value_date')[:1])
).filter(value_date=models.F('base_currency_latest')) | 0.832032 | 0.390069 |
import httplib
import urllib
import requests
from urlparse import urlparse
from opensearch import log
from opensearch.exception import HTTPError, ArgumentError
class HttpClient(object):
def request(self, url, method, params):
raise NotImplementedError
@classmethod
def get_httpclient(cls):
return DefaultHttpClient()
class DefaultHttpClient(HttpClient):
def request(self, url, method, params):
if method not in ('GET', 'POST'):
raise ArgumentError("method must be 'POST' or 'GET'")
parse_result = urlparse(url)
host = parse_result.hostname
port = parse_result.port
path = parse_result.path
if parse_result.scheme == 'http':
conn = httplib.HTTPConnection(host, port=port)
else:
conn = httplib.HTTPSConnection(host, port=port)
try:
req_url = path
body = None
headers = {}
if method == 'GET':
req_url = req_url + '?' + urllib.urlencode(params)
else:
body = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
log.debug("[httplib] request url: %s, method: %s params: %s" % (url, method, params))
conn.request(method, req_url, body, headers)
response = conn.getresponse()
http_status = response.status
http_body = response.read()
log.debug("[httplib] response status: %s body: %s" % (http_status, http_body))
except httplib.HTTPException, e:
raise HTTPError("httplib request exception: %s" % e.message)
finally:
conn.close()
if http_status == httplib.OK:
return http_body
else:
raise HTTPError("server http response error code: %s body: %s" % (http_status, http_body))
class RequestsHttpClient(HttpClient):
def request(self, url, method, params):
if method not in ('GET', 'POST'):
raise ArgumentError("method must be 'POST' or 'GET'")
try:
log.debug("[requests] request url:%s method: %s params:%s" % (url, method, params))
if method == 'GET':
r = requests.get(url, params=params)
else:
r = requests.post(url, data=params)
log.debug("[requests] response data:" + r.text)
except requests.HTTPError, e:
raise HTTPError("requests get exception: %s" % e.message)
if r.status_code == 200:
return r.text
else:
raise HTTPError("server http response code: %s" % r.status_code) | opensearch/httpclient.py | import httplib
import urllib
import requests
from urlparse import urlparse
from opensearch import log
from opensearch.exception import HTTPError, ArgumentError
class HttpClient(object):
def request(self, url, method, params):
raise NotImplementedError
@classmethod
def get_httpclient(cls):
return DefaultHttpClient()
class DefaultHttpClient(HttpClient):
def request(self, url, method, params):
if method not in ('GET', 'POST'):
raise ArgumentError("method must be 'POST' or 'GET'")
parse_result = urlparse(url)
host = parse_result.hostname
port = parse_result.port
path = parse_result.path
if parse_result.scheme == 'http':
conn = httplib.HTTPConnection(host, port=port)
else:
conn = httplib.HTTPSConnection(host, port=port)
try:
req_url = path
body = None
headers = {}
if method == 'GET':
req_url = req_url + '?' + urllib.urlencode(params)
else:
body = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
log.debug("[httplib] request url: %s, method: %s params: %s" % (url, method, params))
conn.request(method, req_url, body, headers)
response = conn.getresponse()
http_status = response.status
http_body = response.read()
log.debug("[httplib] response status: %s body: %s" % (http_status, http_body))
except httplib.HTTPException, e:
raise HTTPError("httplib request exception: %s" % e.message)
finally:
conn.close()
if http_status == httplib.OK:
return http_body
else:
raise HTTPError("server http response error code: %s body: %s" % (http_status, http_body))
class RequestsHttpClient(HttpClient):
def request(self, url, method, params):
if method not in ('GET', 'POST'):
raise ArgumentError("method must be 'POST' or 'GET'")
try:
log.debug("[requests] request url:%s method: %s params:%s" % (url, method, params))
if method == 'GET':
r = requests.get(url, params=params)
else:
r = requests.post(url, data=params)
log.debug("[requests] response data:" + r.text)
except requests.HTTPError, e:
raise HTTPError("requests get exception: %s" % e.message)
if r.status_code == 200:
return r.text
else:
raise HTTPError("server http response code: %s" % r.status_code) | 0.26588 | 0.049797 |
from ..identity import PartitionIdentity
from osgeo import gdal, gdal_array, osr
from osgeo.gdalconst import GDT_Float32, GDT_Byte, GDT_Int16
import numpy as np
class OutOfBounds(Exception): pass
class Kernel(object):
def __init__(self, size, matrix=None):
self.size = size
if size%2 == 0:
raise ValueError("Size must be odd")
# For 1-based array indexing, we'd have to +1, but this is zero-based
if size > 1:
self.center = int(size/2)
else:
self.center = 1
self.offset = (self.size - 1) / 2
if matrix is None:
self.matrix = np.ones((self.size, self.size))
else:
self.matrix = matrix
self._dindices = None
def limit(self):
'''Make the matrix spot sort of round, by masking values in the corners'''
# Get the max value for the edge of the enclosed circle.
# This assumes that there is a radial gradient.
row_max = self.matrix[self.center][0]
for (y_m,x_m), value in np.ndenumerate(self.matrix):
if self.matrix[y_m][x_m] > row_max:
self.matrix[y_m][x_m] = 0
def round(self):
'''Make the matrix spot sort of round, using a radius'''
import math
# Get the max value for the edge of the enclosed circle.
# This assumes that there is a radial gradient.
for (x_m,y_m), value in np.ndenumerate(self.matrix):
if math.sqrt( (x_m-self.center)**2 + (y_m-self.center)**2 ) > float(self.size) / 2.:
self.matrix[y_m][x_m] = 0
def norm(self):
#self.matrix /= sum(self.matrix)
self.matrix /= self.matrix.max()
def invert(self):
''''Invert the values, so the cells closer to the center
have the higher values. '''
#range = self.matrix.max() - self.matrix.min()
self.matrix = self.matrix.max() - self.matrix
self.inverted = ~self.inverted
def quantize(self, bins=255):
from util import jenks_breaks
hist, edges = np.histogram(self.matrix.compressed(),bins=bins)
print "Hist", hist
print "Edges",edges
breaks = jenks_breaks(self.matrix.compressed().tolist(), bins)
print "Breaks",breaks
l = list(set(self.matrix.compressed().tolist()))
l.sort()
print "Uniques", l
print self.matrix.compressed()
digits = np.digitize(self.matrix.ravel(), breaks)
print self.matrix.size
print digits.size
print self.matrix.shape[0]
s = np.ma.array(np.reshape(digits, self.matrix.shape), mask=self.matrix.mask)
print s
def bounds(self, a, point):
y_max, x_max = a.shape
m = None
use_m=False
if point.x < self.offset:
if point.x < 0:
return (False, None, None, None, None)
x_start = max(point.x - self.offset,0)
x_end = point.x + self.offset +1
m = self.matrix[:,(self.offset-point.x):self.matrix.shape[1]]
use_m=True
elif point.x+self.offset+1 > x_max :
if point.x > x_max:
return (False, None, None, None, None)
x_start = point.x - self.offset
x_end = min(point.x + self.offset+1, x_max)
m = self.matrix[:,0:self.matrix.shape[1]+ (x_max-point.x-self.offset)-1]
use_m=True
else:
x_start = point.x - self.offset
x_end = point.x + self.offset+1
sm = (m if use_m else self.matrix)
if point.y < self.offset:
if point.y < 0:
return (False, None, None, None, None)
y_start = max(point.y - self.offset,0)
y_end = point.y + self.offset+1
m = sm[(self.offset-point.y):sm.shape[0],:]
use_m=True
elif point.y+self.offset+1 > y_max:
if point.y > y_max:
return (False, None, None, None, None)
y_start = point.y - self.offset
y_end = point.y + self.offset+1
m = sm[0:sm.shape[0]+ (y_max-point.y-self.offset)-1,:]
use_m=True
else:
y_start = point.y - self.offset
y_end = point.y + self.offset+1
if m is None:
m = self.matrix
return ( m, y_start, y_end, x_start, x_end)
@property
def dindices(self):
'''Return the indices of the matrix, sorted by distance from the center'''
import math
if self._dindices is None:
indices = []
c = self.center
for i, v in np.ndenumerate(self.matrix):
indices.append(i)
self._dindices = sorted(indices, key=lambda i: math.sqrt( (i[0]-c)**2 + (i[1]-c)**2 ))
return self._dindices
def diterate(self, a, point):
'''Iterate over the distances from the point in the array a'''
for i in self.dindices:
x = point[0]+i[1]-self.center
x = x if x >=0 else 0
y = point[1]+i[0]-self.center
y = y if y >=0 else 0
yield i, (y,x )
def apply(self,a, point, source = None, f=None, v=None):
"""Apply the values in the kernel onto an array, centered at a point.
:param a: The array to apply to
:type a: numpy.array
:param source: The source for reading data. Must have same dimensions as a
:type a: numpy.array
:param f: A two argument function that decides which value to apply to the array
:type f: callable
:param point: The point, in the array coordinate system, where the center of the
kernel will be applied
:type point: Point
:param v: External value to be passed into the function
:type v: any
"""
if v:
from functools import partial
f = partial(f,v)
(m, y_start, y_end, x_start, x_end) = self.bounds(a, point)
if not source:
source = a
#print a.shape, point, x_start, x_end, y_start, y_end, (m if use_m else self.matrix).shape
if m is not False: #
a[y_start:y_end, x_start:x_end] = f( source[y_start:y_end, x_start:x_end], m)
else:
raise OutOfBounds("Point {} is out of bounds for this array ( {} )".format(str(point), str(a.shape)))
def iterate(self, a, indices = None):
'''Iterate over kernel sized arrays of the input array. If indices are specified, use them to iterate
over some of the cells, rather than all of them. '''
from ..geo import Point
if indices is None:
it = np.nditer(a,flags=['multi_index'] )
while not it.finished:
(m, y_start, y_end, x_start, x_end) = self.bounds(a, Point(it.multi_index[1], it.multi_index[0]))
yield it.multi_index[0], it.multi_index[1], a[y_start:y_end, x_start:x_end], m
it.iternext()
else:
for y,x in zip(indices[0],indices[1]):
(m, y_start, y_end, x_start, x_end) = self.bounds(a, Point(x, y))
yield y,x, a[y_start:y_end, x_start:x_end], m
def apply_add(self,a,point,y=None):
from ..geo import Point
if y is not None:
point = Point(point, y)
return self.apply(a,point, f=lambda x,y: np.add(x,y))
def apply_min(self,a,point):
f = lambda a,b: np.where(a<b, a, b)
return self.apply(a,point, f=f)
def apply_max(self,a,point):
return self.apply(a,point, f=np.max)
class ConstantKernel(Kernel):
"""A Kernel for a constant value"""
def __init__(self, size=1, value = None ):
super(ConstantKernel, self).__init__(size)
self.value = value
if value:
self.matrix = np.ones((size, size))*value
else:
self.matrix = np.ones((size, size))
self.matrix /= sum(self.matrix) # Normalize the sum of all cells in the matrix to 1
self.offset = (self.matrix.shape[0] - 1) / 2
class GaussianKernel(Kernel):
def __init__(self, size=9, fwhm=3 ):
super(GaussianKernel, self).__init__(size)
m = self.makeGaussian(size, fwhm)
self.offset = (m.shape[0] - 1) / 2
self.matrix = m
@staticmethod
def makeGaussian(size, fwhm = 3):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, np.float32)
y = x[:,np.newaxis]
x0 = y0 = size // 2
ar = np.array(np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2))
m = np.ma.masked_less(ar, ar[0,x0+1]).filled(0) #mask less than the value at the edge to make it round.
m /= sum(m) # Normalize the sum of all cells in the matrix to 1
return m
class DistanceKernel(Kernel):
''' Each cell is the distance, in cell widths, from the center '''
def __init__(self, size):
import math
super(DistanceKernel, self).__init__(size)
self.inverted = False
#self.matrix = ma.masked_array(zeros((size,size)), mask=True, dtype=float)
self.matrix = np.zeros((size,size), dtype=float)
row_max = size - self.center - 1 # Max value on a horix or vert edge
for (y_m,x_m), value in np.ndenumerate(self.matrix):
r = np.sqrt( (y_m-self.center)**2 + (x_m-self.center)**2)
self.matrix[y_m,x_m] = r
class MostCommonKernel(ConstantKernel):
"""Applies the most common value in the kernel area"""
def __init__(self, size=1):
super(MostCommonKernel, self).__init__(size, 1)
def apply(self,a, point, source = None, f=None, v=None):
"""Apply the values in the kernel onto an array, centered at a point.
:param a: The array to apply to
:type a: numpy.array
:param source: The source for reading data. Must have same dimensions as a
:type a: numpy.array
:param f: A two argument function that decides which value to apply to the array
:type f: callable
:param point: The point, in the array coordinate system, where the center of the
kernel will be applied
:type point: Point
:param v: External value to be passed into the function
:type v: any
"""
if v:
from functools import partial
f = partial(f,v)
(m, y_start, y_end, x_start, x_end) = self.bounds(a, point)
if source is None:
source = a
d1 = np.ravel(source[y_start:y_end, x_start:x_end])
bc = np.bincount(d1, minlength=10)
am = np.argmax(bc)
if am != a[point[0], point[1]]:
print am
a[y_start:y_end, x_start:x_end] = 1
class ArrayKernel(Kernel):
'''Convert an arbitary ( hopefully small ) numpy array
into a kernel'''
def __init__(self, a , const = None):
y,x = a.shape
size = max(x,y)
if size % 2 == 0:
size += 1
pad_y = size - y
pad_x = size - x
b = np.pad(a,((0,pad_y),(0,pad_x)), 'constant', constant_values=((0,0),(0,0))) # @UndefinedVariable
if const:
b *= const
super(ArrayKernel, self).__init__(size, b)
# original shape.
self.oshape = a.shape | ambry/geo/kernel.py | from ..identity import PartitionIdentity
from osgeo import gdal, gdal_array, osr
from osgeo.gdalconst import GDT_Float32, GDT_Byte, GDT_Int16
import numpy as np
class OutOfBounds(Exception): pass
class Kernel(object):
def __init__(self, size, matrix=None):
self.size = size
if size%2 == 0:
raise ValueError("Size must be odd")
# For 1-based array indexing, we'd have to +1, but this is zero-based
if size > 1:
self.center = int(size/2)
else:
self.center = 1
self.offset = (self.size - 1) / 2
if matrix is None:
self.matrix = np.ones((self.size, self.size))
else:
self.matrix = matrix
self._dindices = None
def limit(self):
'''Make the matrix spot sort of round, by masking values in the corners'''
# Get the max value for the edge of the enclosed circle.
# This assumes that there is a radial gradient.
row_max = self.matrix[self.center][0]
for (y_m,x_m), value in np.ndenumerate(self.matrix):
if self.matrix[y_m][x_m] > row_max:
self.matrix[y_m][x_m] = 0
def round(self):
'''Make the matrix spot sort of round, using a radius'''
import math
# Get the max value for the edge of the enclosed circle.
# This assumes that there is a radial gradient.
for (x_m,y_m), value in np.ndenumerate(self.matrix):
if math.sqrt( (x_m-self.center)**2 + (y_m-self.center)**2 ) > float(self.size) / 2.:
self.matrix[y_m][x_m] = 0
def norm(self):
#self.matrix /= sum(self.matrix)
self.matrix /= self.matrix.max()
def invert(self):
''''Invert the values, so the cells closer to the center
have the higher values. '''
#range = self.matrix.max() - self.matrix.min()
self.matrix = self.matrix.max() - self.matrix
self.inverted = ~self.inverted
def quantize(self, bins=255):
from util import jenks_breaks
hist, edges = np.histogram(self.matrix.compressed(),bins=bins)
print "Hist", hist
print "Edges",edges
breaks = jenks_breaks(self.matrix.compressed().tolist(), bins)
print "Breaks",breaks
l = list(set(self.matrix.compressed().tolist()))
l.sort()
print "Uniques", l
print self.matrix.compressed()
digits = np.digitize(self.matrix.ravel(), breaks)
print self.matrix.size
print digits.size
print self.matrix.shape[0]
s = np.ma.array(np.reshape(digits, self.matrix.shape), mask=self.matrix.mask)
print s
def bounds(self, a, point):
y_max, x_max = a.shape
m = None
use_m=False
if point.x < self.offset:
if point.x < 0:
return (False, None, None, None, None)
x_start = max(point.x - self.offset,0)
x_end = point.x + self.offset +1
m = self.matrix[:,(self.offset-point.x):self.matrix.shape[1]]
use_m=True
elif point.x+self.offset+1 > x_max :
if point.x > x_max:
return (False, None, None, None, None)
x_start = point.x - self.offset
x_end = min(point.x + self.offset+1, x_max)
m = self.matrix[:,0:self.matrix.shape[1]+ (x_max-point.x-self.offset)-1]
use_m=True
else:
x_start = point.x - self.offset
x_end = point.x + self.offset+1
sm = (m if use_m else self.matrix)
if point.y < self.offset:
if point.y < 0:
return (False, None, None, None, None)
y_start = max(point.y - self.offset,0)
y_end = point.y + self.offset+1
m = sm[(self.offset-point.y):sm.shape[0],:]
use_m=True
elif point.y+self.offset+1 > y_max:
if point.y > y_max:
return (False, None, None, None, None)
y_start = point.y - self.offset
y_end = point.y + self.offset+1
m = sm[0:sm.shape[0]+ (y_max-point.y-self.offset)-1,:]
use_m=True
else:
y_start = point.y - self.offset
y_end = point.y + self.offset+1
if m is None:
m = self.matrix
return ( m, y_start, y_end, x_start, x_end)
@property
def dindices(self):
'''Return the indices of the matrix, sorted by distance from the center'''
import math
if self._dindices is None:
indices = []
c = self.center
for i, v in np.ndenumerate(self.matrix):
indices.append(i)
self._dindices = sorted(indices, key=lambda i: math.sqrt( (i[0]-c)**2 + (i[1]-c)**2 ))
return self._dindices
def diterate(self, a, point):
'''Iterate over the distances from the point in the array a'''
for i in self.dindices:
x = point[0]+i[1]-self.center
x = x if x >=0 else 0
y = point[1]+i[0]-self.center
y = y if y >=0 else 0
yield i, (y,x )
def apply(self,a, point, source = None, f=None, v=None):
"""Apply the values in the kernel onto an array, centered at a point.
:param a: The array to apply to
:type a: numpy.array
:param source: The source for reading data. Must have same dimensions as a
:type a: numpy.array
:param f: A two argument function that decides which value to apply to the array
:type f: callable
:param point: The point, in the array coordinate system, where the center of the
kernel will be applied
:type point: Point
:param v: External value to be passed into the function
:type v: any
"""
if v:
from functools import partial
f = partial(f,v)
(m, y_start, y_end, x_start, x_end) = self.bounds(a, point)
if not source:
source = a
#print a.shape, point, x_start, x_end, y_start, y_end, (m if use_m else self.matrix).shape
if m is not False: #
a[y_start:y_end, x_start:x_end] = f( source[y_start:y_end, x_start:x_end], m)
else:
raise OutOfBounds("Point {} is out of bounds for this array ( {} )".format(str(point), str(a.shape)))
def iterate(self, a, indices = None):
'''Iterate over kernel sized arrays of the input array. If indices are specified, use them to iterate
over some of the cells, rather than all of them. '''
from ..geo import Point
if indices is None:
it = np.nditer(a,flags=['multi_index'] )
while not it.finished:
(m, y_start, y_end, x_start, x_end) = self.bounds(a, Point(it.multi_index[1], it.multi_index[0]))
yield it.multi_index[0], it.multi_index[1], a[y_start:y_end, x_start:x_end], m
it.iternext()
else:
for y,x in zip(indices[0],indices[1]):
(m, y_start, y_end, x_start, x_end) = self.bounds(a, Point(x, y))
yield y,x, a[y_start:y_end, x_start:x_end], m
def apply_add(self,a,point,y=None):
from ..geo import Point
if y is not None:
point = Point(point, y)
return self.apply(a,point, f=lambda x,y: np.add(x,y))
def apply_min(self,a,point):
f = lambda a,b: np.where(a<b, a, b)
return self.apply(a,point, f=f)
def apply_max(self,a,point):
return self.apply(a,point, f=np.max)
class ConstantKernel(Kernel):
"""A Kernel for a constant value"""
def __init__(self, size=1, value = None ):
super(ConstantKernel, self).__init__(size)
self.value = value
if value:
self.matrix = np.ones((size, size))*value
else:
self.matrix = np.ones((size, size))
self.matrix /= sum(self.matrix) # Normalize the sum of all cells in the matrix to 1
self.offset = (self.matrix.shape[0] - 1) / 2
class GaussianKernel(Kernel):
def __init__(self, size=9, fwhm=3 ):
super(GaussianKernel, self).__init__(size)
m = self.makeGaussian(size, fwhm)
self.offset = (m.shape[0] - 1) / 2
self.matrix = m
@staticmethod
def makeGaussian(size, fwhm = 3):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, np.float32)
y = x[:,np.newaxis]
x0 = y0 = size // 2
ar = np.array(np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2))
m = np.ma.masked_less(ar, ar[0,x0+1]).filled(0) #mask less than the value at the edge to make it round.
m /= sum(m) # Normalize the sum of all cells in the matrix to 1
return m
class DistanceKernel(Kernel):
''' Each cell is the distance, in cell widths, from the center '''
def __init__(self, size):
import math
super(DistanceKernel, self).__init__(size)
self.inverted = False
#self.matrix = ma.masked_array(zeros((size,size)), mask=True, dtype=float)
self.matrix = np.zeros((size,size), dtype=float)
row_max = size - self.center - 1 # Max value on a horix or vert edge
for (y_m,x_m), value in np.ndenumerate(self.matrix):
r = np.sqrt( (y_m-self.center)**2 + (x_m-self.center)**2)
self.matrix[y_m,x_m] = r
class MostCommonKernel(ConstantKernel):
"""Applies the most common value in the kernel area"""
def __init__(self, size=1):
super(MostCommonKernel, self).__init__(size, 1)
def apply(self,a, point, source = None, f=None, v=None):
"""Apply the values in the kernel onto an array, centered at a point.
:param a: The array to apply to
:type a: numpy.array
:param source: The source for reading data. Must have same dimensions as a
:type a: numpy.array
:param f: A two argument function that decides which value to apply to the array
:type f: callable
:param point: The point, in the array coordinate system, where the center of the
kernel will be applied
:type point: Point
:param v: External value to be passed into the function
:type v: any
"""
if v:
from functools import partial
f = partial(f,v)
(m, y_start, y_end, x_start, x_end) = self.bounds(a, point)
if source is None:
source = a
d1 = np.ravel(source[y_start:y_end, x_start:x_end])
bc = np.bincount(d1, minlength=10)
am = np.argmax(bc)
if am != a[point[0], point[1]]:
print am
a[y_start:y_end, x_start:x_end] = 1
class ArrayKernel(Kernel):
'''Convert an arbitary ( hopefully small ) numpy array
into a kernel'''
def __init__(self, a , const = None):
y,x = a.shape
size = max(x,y)
if size % 2 == 0:
size += 1
pad_y = size - y
pad_x = size - x
b = np.pad(a,((0,pad_y),(0,pad_x)), 'constant', constant_values=((0,0),(0,0))) # @UndefinedVariable
if const:
b *= const
super(ArrayKernel, self).__init__(size, b)
# original shape.
self.oshape = a.shape | 0.660939 | 0.38549 |
from datetime import datetime
from flask_socketio import SocketIO, emit
from flask import Flask, render_template, url_for, copy_current_request_context
from random import random
from time import sleep
from threading import Thread, Event
from monitor import ElementConnected
__author__ = 'slynn'
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
app.config['DEBUG'] = True
#turn the flask app into a socketio app
socketio = SocketIO(app)
#random number Generator Thread
thread = Thread()
thread_stop_event = Event()
connected = 0
elements_connected = []
class RandomThread(Thread):
def __init__(self):
self.delay = 2
super(RandomThread, self).__init__()
def randomNumberGenerator(self):
global connected
"""
Generate a random number every 1 second and emit to a socketio instance (broadcast)
Ideally to be run in a separate thread?
"""
#infinite loop of magical random numbers
clients = ["Everton", "Amanda", "Ivone"]
while connected > 0:
for element in elements_connected:
for client in clients:
sleep(self.delay)
# socketio.emit('monitor', {'paciente': client}, namespace='/monitor')
element.emit_message(socketio, {'paciente': client})
return
def run(self):
self.randomNumberGenerator()
def check_activity():
global elements_connected
print("Checking")
new_list_activity = list()
if len(elements_connected) == 0:
print("Ninguem conectado. Skkiping")
return
for element in elements_connected:
if (datetime.now() - element.date).seconds < 15:
new_list_activity.append(element)
print(f"Element {element.get_id} is active")
else:
print(f"Element {element.get_id} ISN'T ACTIVE ANYMORE.")
elements_connected = new_list_activity
return
@socketio.on('cadastro', namespace='/monitor')
def handle_message(message):
print(message)
print("Cadastrado")
@app.route('/')
def index():
#only by sending this page first will the client be connected to the socketio instance
return render_template('index.html')
@socketio.on('connect', namespace='/monitor')
def test_connect():
# need visibility of the global thread object
global thread, connected
print('Client connected. Connected number: ', connected)
# Start the random number generator thread only if the thread has not been started before.
print("Starting Thread")
connected += 1
print("Connected number:: ", connected)
if connected == 1:
thread = RandomThread()
thread.start()
@socketio.on('health', "/monitor")
def health(_id):
print("Estoy aqui: ", _id)
if not (any([True if element.get_id == _id else False for element in elements_connected])):
elements_connected.append(ElementConnected(_id))
else:
for element in elements_connected:
if element.get_id == _id:
new_date = datetime.now()
element.date = new_date
print("New date setted -> ", new_date)
check_activity()
print(elements_connected)
@socketio.on('disconnect', "/monitor")
def test_disconnect():
global connected
if connected > 0:
connected -= 1
print('Client disconnected. Connected: ', connected)
if __name__ == '__main__':
socketio.run(app) | application.py | from datetime import datetime
from flask_socketio import SocketIO, emit
from flask import Flask, render_template, url_for, copy_current_request_context
from random import random
from time import sleep
from threading import Thread, Event
from monitor import ElementConnected
__author__ = 'slynn'
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
app.config['DEBUG'] = True
#turn the flask app into a socketio app
socketio = SocketIO(app)
#random number Generator Thread
thread = Thread()
thread_stop_event = Event()
connected = 0
elements_connected = []
class RandomThread(Thread):
def __init__(self):
self.delay = 2
super(RandomThread, self).__init__()
def randomNumberGenerator(self):
global connected
"""
Generate a random number every 1 second and emit to a socketio instance (broadcast)
Ideally to be run in a separate thread?
"""
#infinite loop of magical random numbers
clients = ["Everton", "Amanda", "Ivone"]
while connected > 0:
for element in elements_connected:
for client in clients:
sleep(self.delay)
# socketio.emit('monitor', {'paciente': client}, namespace='/monitor')
element.emit_message(socketio, {'paciente': client})
return
def run(self):
self.randomNumberGenerator()
def check_activity():
global elements_connected
print("Checking")
new_list_activity = list()
if len(elements_connected) == 0:
print("Ninguem conectado. Skkiping")
return
for element in elements_connected:
if (datetime.now() - element.date).seconds < 15:
new_list_activity.append(element)
print(f"Element {element.get_id} is active")
else:
print(f"Element {element.get_id} ISN'T ACTIVE ANYMORE.")
elements_connected = new_list_activity
return
@socketio.on('cadastro', namespace='/monitor')
def handle_message(message):
print(message)
print("Cadastrado")
@app.route('/')
def index():
#only by sending this page first will the client be connected to the socketio instance
return render_template('index.html')
@socketio.on('connect', namespace='/monitor')
def test_connect():
# need visibility of the global thread object
global thread, connected
print('Client connected. Connected number: ', connected)
# Start the random number generator thread only if the thread has not been started before.
print("Starting Thread")
connected += 1
print("Connected number:: ", connected)
if connected == 1:
thread = RandomThread()
thread.start()
@socketio.on('health', "/monitor")
def health(_id):
print("Estoy aqui: ", _id)
if not (any([True if element.get_id == _id else False for element in elements_connected])):
elements_connected.append(ElementConnected(_id))
else:
for element in elements_connected:
if element.get_id == _id:
new_date = datetime.now()
element.date = new_date
print("New date setted -> ", new_date)
check_activity()
print(elements_connected)
@socketio.on('disconnect', "/monitor")
def test_disconnect():
global connected
if connected > 0:
connected -= 1
print('Client disconnected. Connected: ', connected)
if __name__ == '__main__':
socketio.run(app) | 0.522689 | 0.09782 |
import bisect
from copy import deepcopy
from Bio.Seq import Seq
from mutalyzer_crossmapper import Coding, Genomic, NonCoding
from mutalyzer_mutator.util import reverse_complement
from ..description_model import (
variant_to_description,
variants_to_description,
yield_sub_model,
)
from ..reference import (
extract_feature_model,
get_internal_selector_model,
slice_to_selector,
yield_locations,
)
from ..util import (
construct_sequence,
get_end,
get_inserted_sequence,
get_start,
set_by_path,
set_end,
set_start,
)
from .to_hgvs_coordinates import genomic_to_point, reverse_strand_shift
def to_rna_reference_model(reference_model, selector_id, transcribe=True):
"""
Get the RNA reference model of the provided selector.
1. Extract the tree corresponding to the selector from the model (including
the parents).
2. Slice the sequence.
3. Update the model features locations using the crossmapper.
TODO: Make sure everything is on the plus strand?
:arg dict reference_model: Reference model.
:arg str selector_id: Selector ID.
:arg bool transcribe: Transcribe the sequence to RNA.
:returns: RNA reference model.
:rtype: dict
"""
rna_model = {
"annotations": deepcopy(
extract_feature_model(reference_model["annotations"], selector_id)[0]
),
"sequence": {
"seq": str(
Seq(slice_to_selector(reference_model, selector_id)).transcribe()
).lower()
if transcribe
else slice_to_selector(reference_model, selector_id)
},
}
s_m = get_internal_selector_model(rna_model["annotations"], selector_id, True)
x = NonCoding(s_m["exon"]).coordinate_to_noncoding
new_start = x(s_m["exon"][0][0])[0] - 1
new_end = x(s_m["exon"][-1][-1])[0]
for location, f_type in yield_locations(rna_model["annotations"]):
if f_type == "CDS":
set_start(location, x(get_start(location))[0] - 1)
set_end(location, x(get_end(location))[0] - 1)
elif f_type == "exon":
set_start(location, x(get_start(location))[0] - 1)
set_end(location, x(get_end(location))[0] + x(get_end(location))[1] - 1)
else:
set_start(location, new_start)
set_end(location, new_end)
return rna_model
def get_position_type(position, exons, len_ss=2, len_as=5):
"""
Get the position location within the exons/introns. Even numbers for
introns and odd numbers for exons are returned. Empty introns are
considered as well in the returned index. The second returned value
represents a splice site (1, -1) or around a splice site (-2, 2) location,
otherwise 0 (within an intron outside the splice (around) sites or
within an exon).
:arg int position: Zero-based position.
:arg list exons: Zero-based half open exon positions list of tuples.
:arg int len_ss: Splice site length.
:arg int len_as: Around splice site length.
:returns: Position type.
:rtype: tuple
"""
x = NonCoding(exons).coordinate_to_noncoding
exons = _get_flatten_exons(exons)
position_x = x(position)
if position_x[1] == 0:
return bisect.bisect_right(exons, position), 0
elif 0 < abs(position_x[1]) <= len_ss:
if position_x[1] > 0:
return bisect.bisect_right(exons, position), 1
else:
return bisect.bisect_left(exons, position), -1
elif len_ss < abs(position_x[1]) <= len_ss + len_as:
if position_x[1] > 0:
return bisect.bisect_right(exons, position), 2
else:
return bisect.bisect_left(exons, position), -2
else:
return bisect.bisect_left(exons, position), 0
def _get_location_type(location, exons):
"""
Returns the location spanning with respect to the exons/introns. Currently
the supported types are: same exon (start and end in the same exon),
exon - exon (start and end in different exons), same intron,
and intron - intron.
:arg dict location: Location model.
:arg list exons: Flatten exon positions.
:returns: Location type within the exons/introns.
:rtype: str
"""
start_i = get_position_type(get_start(location), exons)
end_i = get_position_type(get_end(location) - 1, exons)
if get_start(location) == get_end(location):
# this is an insertion
if start_i[0] % 2 == 1:
return "same exon"
else:
if start_i[1] == 0:
return "same intron"
elif start_i[0] % 2 == 1 and end_i[0] % 2 == 1:
if start_i[0] == end_i[0]:
return "same exon"
else:
return "exon exon"
elif start_i[0] % 2 == 0 and end_i[0] % 2 == 0:
if start_i[0] == end_i[0] and start_i[1] == 0:
return "same intron"
if start_i[0] != end_i[0] and start_i[1] == 0 and end_i[1] == 0:
return "intron intron"
def _get_flatten_exons(exons):
"""
Transform the exon list of tuples into a list of integers.
:params list exons: Exons as a list of tuples.
:return: Flattened exons list.
:rtype: list
"""
return [e for exon in exons for e in exon]
def _get_exon_start_position(position, exons):
"""
Given an intronic position (start), get its appropriate exon position.
:arg int position: Zero-based position.
:arg list exons: Flattened exons list.
:returns: Exon position.
:rtype: int
"""
return exons[bisect.bisect_right(exons, position)]
def _get_exon_end_position(position, exons):
"""
Given an intronic position (end), get its appropriate exon position.
:arg int position: Zero-based position.
:arg list exons: Flattened exons list.
:returns: Exon position.
:rtype: int
"""
return exons[bisect.bisect_left(exons, position) - 1]
def _set_start_to_exon(location, exons):
"""
Update the location start position with its appropriate exon position.
:arg dict location: Zero-based location model.
:arg list exons: Flattened exons list.
"""
set_start(location, _get_exon_start_position(get_start(location), exons))
def _set_end_to_exon(location, exons):
"""
Update the location end position with its appropriate exon position.
:arg dict location: Zero-based location model.
:arg list exons: Flattened exons list.
"""
set_end(location, _get_exon_end_position(get_end(location), exons))
def _trim_to_exons(variants, exons, sequences):
"""
Update variants locations to the corresponding exons.
Notes:
- same intron locations are discarded;
- splice sites checked should have been performed already.
"""
new_variants = []
for v in variants:
new_v = deepcopy(v)
if v.get("location"):
location_type = _get_location_type(v["location"], exons)
if location_type == "intron intron" and not (
v.get("inserted") and construct_sequence(v["inserted"], sequences)
):
_set_start_to_exon(new_v["location"], _get_flatten_exons(exons))
_set_end_to_exon(new_v["location"], _get_flatten_exons(exons))
new_variants.append(new_v)
elif location_type == "exon exon":
new_variants.append(new_v)
elif location_type == "same exon":
new_variants.append(new_v)
return new_variants
def to_rna_variants(variants, sequences, selector_model):
"""
Convert coordinate delins variants to RNA.
:arg list variants: Variants with coordinate locations.
:arg list sequences: List with sequences dictionary.
:arg dict selector_model: Selector model.
:returns: Converted RNA variants.
:rtype: dict
"""
trimmed_variants = _trim_to_exons(variants, selector_model["exon"], sequences)
x = NonCoding(selector_model["exon"]).coordinate_to_noncoding
for variant in trimmed_variants:
if variant.get("location"):
set_start(variant["location"], x(get_start(variant))[0] - 1)
set_end(
variant["location"], x(get_end(variant))[0] + x(get_end(variant))[1] - 1
)
if variant.get("inserted"):
variant["inserted"] = [
{
"source": "description",
"sequence": get_inserted_sequence(variant, sequences),
}
]
return to_rna_sequences(trimmed_variants)
def to_rna_sequences(model):
"""
Convert all the sequences present in the model to RNA.
:args dict model: Description model.
"""
for seq, path in yield_sub_model(model, ["sequence"]):
set_by_path(model, path, str(Seq(seq).transcribe().lower()))
return model
def _point_to_cds_coordinate(point, selector_model, crossmap):
genomic_to_coordinate = Genomic().genomic_to_coordinate
if selector_model.get("inverted"):
if point.get("shift"):
point["position"] -= point["shift"]
coding = crossmap.coordinate_to_coding(point["position"], degenerate=True)
if coding[2] == -1:
return genomic_to_point(0)
else:
return genomic_to_point(genomic_to_coordinate(coding[0]))
def _get_inserted_sequence(insertion, sequences):
if isinstance(insertion["source"], str):
source = insertion["source"]
elif isinstance(insertion["source"], dict):
source = insertion["source"]["id"]
return sequences[source][
get_start(insertion["location"]) : get_end(insertion["location"])
]
def merge_inserted_to_string(inserted, sequences):
inserted_value = ""
for insertion in inserted:
if insertion.get("sequence"):
inserted_value += insertion.get("sequence")
else:
inserted_value += _get_inserted_sequence(insertion, sequences)
if insertion.get("inverted"):
inserted_value = reverse_complement(inserted_value)
return {"source": "description", "sequence": inserted_value}
def variant_to_cds_coordinate(variant, sequences, selector_model, crossmap):
new_variant = deepcopy(variant)
location = new_variant["location"]
if location["type"] == "range":
location["start"] = _point_to_cds_coordinate(
location["start"], selector_model, crossmap
)
location["end"] = _point_to_cds_coordinate(
location["end"], selector_model, crossmap
)
else:
location = _point_to_cds_coordinate(location, selector_model, crossmap)
if new_variant.get("inserted"):
new_variant["inserted"] = [
merge_inserted_to_string(new_variant["inserted"], sequences)
]
new_variant["location"] = location
return new_variant
def reverse_start_end(variants):
for variant in variants:
if variant.get("location") and variant["location"]["type"] == "range":
location = variant["location"]
location["start"], location["end"] = location["end"], location["start"]
location["start"]["position"] -= 1
location["end"]["position"] -= 1
def _get_cds_into_exons(exons, cds):
l_index = bisect.bisect_right(exons, cds[0])
r_index = bisect.bisect_left(exons, cds[1])
return [cds[0]] + exons[l_index:r_index] + [cds[1]]
def _location_in_same_intron(location, exons):
start_i = bisect.bisect_right(exons, get_start(location))
end_i = bisect.bisect_left(exons, get_end(location))
if start_i == end_i and start_i % 2 == 0:
return True
else:
return False
def _splice_site_removal(location, exons):
start_i = bisect.bisect_right(exons, get_start(location))
end_i = bisect.bisect_left(exons, get_end(location))
if end_i - start_i == 1:
return True
def _get_exons_and_cds(selector_model):
exons = [e for l in selector_model["exon"] for e in l]
cds = [selector_model["cds"][0][0], selector_model["cds"][0][1]]
if selector_model.get("inverted"):
cds[0] = exons[0]
else:
cds[1] = exons[-1]
return exons, cds
def _get_exons_and_cds_2(s_m):
exons = [e for l in s_m["exon"] for e in l]
cds = [s_m["cds"][0][0], s_m["cds"][0][1]]
return exons, cds
def to_exon_positions(variants, exons, cds):
exons = _get_cds_into_exons(exons, cds)
new_variants = []
for variant in variants:
if (
variant.get("type") == "deletion_insertion"
and variant.get("location")
and not _location_in_same_intron(variant["location"], exons)
and not (get_start(variant) <= exons[0] and get_end(variant) <= exons[0])
):
n_v = deepcopy(variant)
exon_s = bisect.bisect(exons, get_start(n_v))
if exon_s % 2 == 0 and exon_s < len(exons):
n_v["location"]["start"]["position"] = exons[exon_s]
exon_e = bisect.bisect(exons, get_end(n_v))
if exon_e % 2 == 0 and exon_e < len(exons):
n_v["location"]["end"]["position"] = exons[exon_e]
new_variants.append(n_v)
return new_variants
def _get_splice_site_hits(variants, exons, cds):
hits = []
for i, variant in enumerate(variants):
if (
variant.get("type") == "deletion_insertion"
and variant.get("location")
and _splice_site_removal(
variant["location"], _get_cds_into_exons(exons, cds)
)
):
hits.append(i)
return hits
def reverse_variants(variants, sequences):
reversed_variants = deepcopy(variants)
reverse_strand_shift(reversed_variants, sequences["reference"])
reverse_start_end(reversed_variants)
return reversed_variants
def to_rna_protein_coordinates(variants, sequences, selector_model):
"""
Converts the locations to cds equivalent.
:param variants: Variants with locations in the coordinate system.
:param sequences: Sequences with their ids as keys.
:param selector_model: Selector model according to which
the conversion is performed.
"""
exons, cds = _get_exons_and_cds(selector_model)
crossmap = Coding(selector_model["exon"], cds, selector_model["inverted"])
if selector_model.get("inverted"):
variants = reverse_variants(variants, sequences)
splice_site_hits = _get_splice_site_hits(variants, exons, cds)
coordinate_variants = to_exon_positions(variants, exons, cds)
cds_variants = []
for variant in coordinate_variants:
cds_variants.append(
variant_to_cds_coordinate(variant, sequences, selector_model, crossmap)
)
return cds_variants, splice_site_hits | normalizer/converter/to_rna.py | import bisect
from copy import deepcopy
from Bio.Seq import Seq
from mutalyzer_crossmapper import Coding, Genomic, NonCoding
from mutalyzer_mutator.util import reverse_complement
from ..description_model import (
variant_to_description,
variants_to_description,
yield_sub_model,
)
from ..reference import (
extract_feature_model,
get_internal_selector_model,
slice_to_selector,
yield_locations,
)
from ..util import (
construct_sequence,
get_end,
get_inserted_sequence,
get_start,
set_by_path,
set_end,
set_start,
)
from .to_hgvs_coordinates import genomic_to_point, reverse_strand_shift
def to_rna_reference_model(reference_model, selector_id, transcribe=True):
"""
Get the RNA reference model of the provided selector.
1. Extract the tree corresponding to the selector from the model (including
the parents).
2. Slice the sequence.
3. Update the model features locations using the crossmapper.
TODO: Make sure everything is on the plus strand?
:arg dict reference_model: Reference model.
:arg str selector_id: Selector ID.
:arg bool transcribe: Transcribe the sequence to RNA.
:returns: RNA reference model.
:rtype: dict
"""
rna_model = {
"annotations": deepcopy(
extract_feature_model(reference_model["annotations"], selector_id)[0]
),
"sequence": {
"seq": str(
Seq(slice_to_selector(reference_model, selector_id)).transcribe()
).lower()
if transcribe
else slice_to_selector(reference_model, selector_id)
},
}
s_m = get_internal_selector_model(rna_model["annotations"], selector_id, True)
x = NonCoding(s_m["exon"]).coordinate_to_noncoding
new_start = x(s_m["exon"][0][0])[0] - 1
new_end = x(s_m["exon"][-1][-1])[0]
for location, f_type in yield_locations(rna_model["annotations"]):
if f_type == "CDS":
set_start(location, x(get_start(location))[0] - 1)
set_end(location, x(get_end(location))[0] - 1)
elif f_type == "exon":
set_start(location, x(get_start(location))[0] - 1)
set_end(location, x(get_end(location))[0] + x(get_end(location))[1] - 1)
else:
set_start(location, new_start)
set_end(location, new_end)
return rna_model
def get_position_type(position, exons, len_ss=2, len_as=5):
"""
Get the position location within the exons/introns. Even numbers for
introns and odd numbers for exons are returned. Empty introns are
considered as well in the returned index. The second returned value
represents a splice site (1, -1) or around a splice site (-2, 2) location,
otherwise 0 (within an intron outside the splice (around) sites or
within an exon).
:arg int position: Zero-based position.
:arg list exons: Zero-based half open exon positions list of tuples.
:arg int len_ss: Splice site length.
:arg int len_as: Around splice site length.
:returns: Position type.
:rtype: tuple
"""
x = NonCoding(exons).coordinate_to_noncoding
exons = _get_flatten_exons(exons)
position_x = x(position)
if position_x[1] == 0:
return bisect.bisect_right(exons, position), 0
elif 0 < abs(position_x[1]) <= len_ss:
if position_x[1] > 0:
return bisect.bisect_right(exons, position), 1
else:
return bisect.bisect_left(exons, position), -1
elif len_ss < abs(position_x[1]) <= len_ss + len_as:
if position_x[1] > 0:
return bisect.bisect_right(exons, position), 2
else:
return bisect.bisect_left(exons, position), -2
else:
return bisect.bisect_left(exons, position), 0
def _get_location_type(location, exons):
"""
Returns the location spanning with respect to the exons/introns. Currently
the supported types are: same exon (start and end in the same exon),
exon - exon (start and end in different exons), same intron,
and intron - intron.
:arg dict location: Location model.
:arg list exons: Flatten exon positions.
:returns: Location type within the exons/introns.
:rtype: str
"""
start_i = get_position_type(get_start(location), exons)
end_i = get_position_type(get_end(location) - 1, exons)
if get_start(location) == get_end(location):
# this is an insertion
if start_i[0] % 2 == 1:
return "same exon"
else:
if start_i[1] == 0:
return "same intron"
elif start_i[0] % 2 == 1 and end_i[0] % 2 == 1:
if start_i[0] == end_i[0]:
return "same exon"
else:
return "exon exon"
elif start_i[0] % 2 == 0 and end_i[0] % 2 == 0:
if start_i[0] == end_i[0] and start_i[1] == 0:
return "same intron"
if start_i[0] != end_i[0] and start_i[1] == 0 and end_i[1] == 0:
return "intron intron"
def _get_flatten_exons(exons):
"""
Transform the exon list of tuples into a list of integers.
:params list exons: Exons as a list of tuples.
:return: Flattened exons list.
:rtype: list
"""
return [e for exon in exons for e in exon]
def _get_exon_start_position(position, exons):
"""
Given an intronic position (start), get its appropriate exon position.
:arg int position: Zero-based position.
:arg list exons: Flattened exons list.
:returns: Exon position.
:rtype: int
"""
return exons[bisect.bisect_right(exons, position)]
def _get_exon_end_position(position, exons):
"""
Given an intronic position (end), get its appropriate exon position.
:arg int position: Zero-based position.
:arg list exons: Flattened exons list.
:returns: Exon position.
:rtype: int
"""
return exons[bisect.bisect_left(exons, position) - 1]
def _set_start_to_exon(location, exons):
"""
Update the location start position with its appropriate exon position.
:arg dict location: Zero-based location model.
:arg list exons: Flattened exons list.
"""
set_start(location, _get_exon_start_position(get_start(location), exons))
def _set_end_to_exon(location, exons):
"""
Update the location end position with its appropriate exon position.
:arg dict location: Zero-based location model.
:arg list exons: Flattened exons list.
"""
set_end(location, _get_exon_end_position(get_end(location), exons))
def _trim_to_exons(variants, exons, sequences):
"""
Update variants locations to the corresponding exons.
Notes:
- same intron locations are discarded;
- splice sites checked should have been performed already.
"""
new_variants = []
for v in variants:
new_v = deepcopy(v)
if v.get("location"):
location_type = _get_location_type(v["location"], exons)
if location_type == "intron intron" and not (
v.get("inserted") and construct_sequence(v["inserted"], sequences)
):
_set_start_to_exon(new_v["location"], _get_flatten_exons(exons))
_set_end_to_exon(new_v["location"], _get_flatten_exons(exons))
new_variants.append(new_v)
elif location_type == "exon exon":
new_variants.append(new_v)
elif location_type == "same exon":
new_variants.append(new_v)
return new_variants
def to_rna_variants(variants, sequences, selector_model):
"""
Convert coordinate delins variants to RNA.
:arg list variants: Variants with coordinate locations.
:arg list sequences: List with sequences dictionary.
:arg dict selector_model: Selector model.
:returns: Converted RNA variants.
:rtype: dict
"""
trimmed_variants = _trim_to_exons(variants, selector_model["exon"], sequences)
x = NonCoding(selector_model["exon"]).coordinate_to_noncoding
for variant in trimmed_variants:
if variant.get("location"):
set_start(variant["location"], x(get_start(variant))[0] - 1)
set_end(
variant["location"], x(get_end(variant))[0] + x(get_end(variant))[1] - 1
)
if variant.get("inserted"):
variant["inserted"] = [
{
"source": "description",
"sequence": get_inserted_sequence(variant, sequences),
}
]
return to_rna_sequences(trimmed_variants)
def to_rna_sequences(model):
"""
Convert all the sequences present in the model to RNA.
:args dict model: Description model.
"""
for seq, path in yield_sub_model(model, ["sequence"]):
set_by_path(model, path, str(Seq(seq).transcribe().lower()))
return model
def _point_to_cds_coordinate(point, selector_model, crossmap):
genomic_to_coordinate = Genomic().genomic_to_coordinate
if selector_model.get("inverted"):
if point.get("shift"):
point["position"] -= point["shift"]
coding = crossmap.coordinate_to_coding(point["position"], degenerate=True)
if coding[2] == -1:
return genomic_to_point(0)
else:
return genomic_to_point(genomic_to_coordinate(coding[0]))
def _get_inserted_sequence(insertion, sequences):
if isinstance(insertion["source"], str):
source = insertion["source"]
elif isinstance(insertion["source"], dict):
source = insertion["source"]["id"]
return sequences[source][
get_start(insertion["location"]) : get_end(insertion["location"])
]
def merge_inserted_to_string(inserted, sequences):
inserted_value = ""
for insertion in inserted:
if insertion.get("sequence"):
inserted_value += insertion.get("sequence")
else:
inserted_value += _get_inserted_sequence(insertion, sequences)
if insertion.get("inverted"):
inserted_value = reverse_complement(inserted_value)
return {"source": "description", "sequence": inserted_value}
def variant_to_cds_coordinate(variant, sequences, selector_model, crossmap):
new_variant = deepcopy(variant)
location = new_variant["location"]
if location["type"] == "range":
location["start"] = _point_to_cds_coordinate(
location["start"], selector_model, crossmap
)
location["end"] = _point_to_cds_coordinate(
location["end"], selector_model, crossmap
)
else:
location = _point_to_cds_coordinate(location, selector_model, crossmap)
if new_variant.get("inserted"):
new_variant["inserted"] = [
merge_inserted_to_string(new_variant["inserted"], sequences)
]
new_variant["location"] = location
return new_variant
def reverse_start_end(variants):
for variant in variants:
if variant.get("location") and variant["location"]["type"] == "range":
location = variant["location"]
location["start"], location["end"] = location["end"], location["start"]
location["start"]["position"] -= 1
location["end"]["position"] -= 1
def _get_cds_into_exons(exons, cds):
l_index = bisect.bisect_right(exons, cds[0])
r_index = bisect.bisect_left(exons, cds[1])
return [cds[0]] + exons[l_index:r_index] + [cds[1]]
def _location_in_same_intron(location, exons):
start_i = bisect.bisect_right(exons, get_start(location))
end_i = bisect.bisect_left(exons, get_end(location))
if start_i == end_i and start_i % 2 == 0:
return True
else:
return False
def _splice_site_removal(location, exons):
start_i = bisect.bisect_right(exons, get_start(location))
end_i = bisect.bisect_left(exons, get_end(location))
if end_i - start_i == 1:
return True
def _get_exons_and_cds(selector_model):
exons = [e for l in selector_model["exon"] for e in l]
cds = [selector_model["cds"][0][0], selector_model["cds"][0][1]]
if selector_model.get("inverted"):
cds[0] = exons[0]
else:
cds[1] = exons[-1]
return exons, cds
def _get_exons_and_cds_2(s_m):
exons = [e for l in s_m["exon"] for e in l]
cds = [s_m["cds"][0][0], s_m["cds"][0][1]]
return exons, cds
def to_exon_positions(variants, exons, cds):
exons = _get_cds_into_exons(exons, cds)
new_variants = []
for variant in variants:
if (
variant.get("type") == "deletion_insertion"
and variant.get("location")
and not _location_in_same_intron(variant["location"], exons)
and not (get_start(variant) <= exons[0] and get_end(variant) <= exons[0])
):
n_v = deepcopy(variant)
exon_s = bisect.bisect(exons, get_start(n_v))
if exon_s % 2 == 0 and exon_s < len(exons):
n_v["location"]["start"]["position"] = exons[exon_s]
exon_e = bisect.bisect(exons, get_end(n_v))
if exon_e % 2 == 0 and exon_e < len(exons):
n_v["location"]["end"]["position"] = exons[exon_e]
new_variants.append(n_v)
return new_variants
def _get_splice_site_hits(variants, exons, cds):
hits = []
for i, variant in enumerate(variants):
if (
variant.get("type") == "deletion_insertion"
and variant.get("location")
and _splice_site_removal(
variant["location"], _get_cds_into_exons(exons, cds)
)
):
hits.append(i)
return hits
def reverse_variants(variants, sequences):
reversed_variants = deepcopy(variants)
reverse_strand_shift(reversed_variants, sequences["reference"])
reverse_start_end(reversed_variants)
return reversed_variants
def to_rna_protein_coordinates(variants, sequences, selector_model):
"""
Converts the locations to cds equivalent.
:param variants: Variants with locations in the coordinate system.
:param sequences: Sequences with their ids as keys.
:param selector_model: Selector model according to which
the conversion is performed.
"""
exons, cds = _get_exons_and_cds(selector_model)
crossmap = Coding(selector_model["exon"], cds, selector_model["inverted"])
if selector_model.get("inverted"):
variants = reverse_variants(variants, sequences)
splice_site_hits = _get_splice_site_hits(variants, exons, cds)
coordinate_variants = to_exon_positions(variants, exons, cds)
cds_variants = []
for variant in coordinate_variants:
cds_variants.append(
variant_to_cds_coordinate(variant, sequences, selector_model, crossmap)
)
return cds_variants, splice_site_hits | 0.626696 | 0.540196 |
def parseInput(myinput):
hands = {}
hand_length = 6 #Name plus cards
splitinput = [x for x in myinput.split(' ') if x.strip()!='']
while len(splitinput) > 0:
playerhand = splitinput[0:hand_length]
player = playerhand.pop(0).replace(":", "")
hands[player] = playerhand
splitinput = splitinput[hand_length:]
return hands
print(splitinput)
def getValue(card):
valuemap = {
"A":14,
"K":13,
"Q":12,
"J":11,
"1":10, #Stand in for 10
"10":10, #More flex in implementation
"9":9,
"8":8,
"7":7,
"6":6,
"5":5,
"4":4,
"3":3,
"2":2,
"0":0 #Filler as a default value
}
return valuemap[card[0]]
def cardsort(cards):
return sorted(cards, key=lambda x: getValue(x))
def isSeq(vals):
vals.sort()
for v in range(len(vals)):
if v == 0:
prev = vals[v]
else:
if vals[v]!=prev+1:
return False
return True
def compareHands(hands):
rankings = {}
for player in hands:
rank = hands[player]["rank"]
print(rank)
if rank not in rankings:
rankings[rank] = [player]
else:
rankings[rank].append(player)
results = []
for key in sorted(rankings.keys(), reverse=True):
results.append(rankings[key])
return results
def getHands(name, hand):
handset = {
"vals":[],
"S": [],
"H": [],
"D": [],
"C": []
}
handranks = ["Straight Flush", "Four of a Kind", "Full House", "Flush", "Straight", "Three of a Kind", "Two Pairs", "Pair", "High Card"]
if type(hand) == str: cards = hand.split(' ')
else: cards = hand
for card in cards:
value = getValue(card)
if value not in handset:
handset[value] = [card]
else:
handset[value].append(card)
#Add card to value sets
#Add to suit and values
handset[card[-1]].append(card)
handset["vals"].append(value)
if value==14: #ace
handset["vals"].append(1)
#Check for flush and straights
if len(handset[card[-1]])==5:
handset["Flush"]=cardsort(cards)
if len(handset["vals"]) == 5 and isSeq(handset["vals"]):
if "Flush" in handset:
handset["Straight Flush"] = cardsort(cards)
break
else:
handset["Straight"] = cardsort(cards)
break
#High card
if "High Card" not in handset:
handset["High Card"]=[card]
elif value > getValue(handset["High Card"][-1]):
handset["High Card"].append(card)
else:
handset["High Card"].insert(0, card)
#Pairs and card counts
if len(handset[value])==2:
if "Three of a Kind" in handset:
handset["Full House"] = cardsort(list(set(handset["Three of a Kind"]+handset[value])))
elif "Pair" in handset:
handset["Two Pairs"] = cardsort(list(set(handset["Pair"]+handset[value])))
else:
handset["Pair"] = handset[value]
elif len(handset[value])==3:
if "Two Pairs" in handset:
handset["Full House"] = cardsort(list(set([card]+handset["Two Pairs"])))
else:
handset["Three of a Kind"] = handset[value]
elif handset[value]==4:
handset["Four of a Kind"] = handset[value]
break; #If you have 4 of a kind, you cannot get flush or straight, so no need to check last card if it exists
for h in handranks:
if h in handset:
print("%s's best hand is a %s: %s%s" % (name, h,
str(handset[h]) if h!="High Card" else handset[h][-1],
", high card "+handset[h][-1] if h!="High Card" else ''))
rank = ((len(handranks)-handranks.index(h))*(10**11))
for i in range(1, 2*len(handset[h]), 2):
rank+=getValue(handset[h][(i-1)//2])*(10**i) #Latter portion is to get high cards
return {
"rank":rank,
"title":h,
"hand":handset[h]
}
allinputs = "Black: 2H 3D 5S 9C KD White: 2C 3H 4S 8C AH"
'''Black: 2H 4S 4C 2D 4H White: 2S 8S AS QS 3S
Black: 2H 3D 5S 9C KD White: 2C 3H 4S 8C KH
Black: 2H 3D 5S 9C KD White: 2D 3H 5C 9S KH'''
inputs = allinputs.split('\n')
#myinput = "Black: 2H 3D 5S 9C KD White: 2C 3H 4S 8C AH"
for i in inputs:
hands = parseInput(i)
results = {}
for player in hands:
print("%s: %s" % (player, ' '.join(hands[player])))
results[player] = getHands(player, hands[player])
rankings = compareHands(results)
if len(rankings[0])==1:
print("The winner is %s!" % rankings[0][0])
else:
print("There is a tie between %s!" % ' and '.join(rankings[0]))
print('\n') | 0311-texas/main.py | def parseInput(myinput):
hands = {}
hand_length = 6 #Name plus cards
splitinput = [x for x in myinput.split(' ') if x.strip()!='']
while len(splitinput) > 0:
playerhand = splitinput[0:hand_length]
player = playerhand.pop(0).replace(":", "")
hands[player] = playerhand
splitinput = splitinput[hand_length:]
return hands
print(splitinput)
def getValue(card):
valuemap = {
"A":14,
"K":13,
"Q":12,
"J":11,
"1":10, #Stand in for 10
"10":10, #More flex in implementation
"9":9,
"8":8,
"7":7,
"6":6,
"5":5,
"4":4,
"3":3,
"2":2,
"0":0 #Filler as a default value
}
return valuemap[card[0]]
def cardsort(cards):
return sorted(cards, key=lambda x: getValue(x))
def isSeq(vals):
vals.sort()
for v in range(len(vals)):
if v == 0:
prev = vals[v]
else:
if vals[v]!=prev+1:
return False
return True
def compareHands(hands):
rankings = {}
for player in hands:
rank = hands[player]["rank"]
print(rank)
if rank not in rankings:
rankings[rank] = [player]
else:
rankings[rank].append(player)
results = []
for key in sorted(rankings.keys(), reverse=True):
results.append(rankings[key])
return results
def getHands(name, hand):
handset = {
"vals":[],
"S": [],
"H": [],
"D": [],
"C": []
}
handranks = ["Straight Flush", "Four of a Kind", "Full House", "Flush", "Straight", "Three of a Kind", "Two Pairs", "Pair", "High Card"]
if type(hand) == str: cards = hand.split(' ')
else: cards = hand
for card in cards:
value = getValue(card)
if value not in handset:
handset[value] = [card]
else:
handset[value].append(card)
#Add card to value sets
#Add to suit and values
handset[card[-1]].append(card)
handset["vals"].append(value)
if value==14: #ace
handset["vals"].append(1)
#Check for flush and straights
if len(handset[card[-1]])==5:
handset["Flush"]=cardsort(cards)
if len(handset["vals"]) == 5 and isSeq(handset["vals"]):
if "Flush" in handset:
handset["Straight Flush"] = cardsort(cards)
break
else:
handset["Straight"] = cardsort(cards)
break
#High card
if "High Card" not in handset:
handset["High Card"]=[card]
elif value > getValue(handset["High Card"][-1]):
handset["High Card"].append(card)
else:
handset["High Card"].insert(0, card)
#Pairs and card counts
if len(handset[value])==2:
if "Three of a Kind" in handset:
handset["Full House"] = cardsort(list(set(handset["Three of a Kind"]+handset[value])))
elif "Pair" in handset:
handset["Two Pairs"] = cardsort(list(set(handset["Pair"]+handset[value])))
else:
handset["Pair"] = handset[value]
elif len(handset[value])==3:
if "Two Pairs" in handset:
handset["Full House"] = cardsort(list(set([card]+handset["Two Pairs"])))
else:
handset["Three of a Kind"] = handset[value]
elif handset[value]==4:
handset["Four of a Kind"] = handset[value]
break; #If you have 4 of a kind, you cannot get flush or straight, so no need to check last card if it exists
for h in handranks:
if h in handset:
print("%s's best hand is a %s: %s%s" % (name, h,
str(handset[h]) if h!="High Card" else handset[h][-1],
", high card "+handset[h][-1] if h!="High Card" else ''))
rank = ((len(handranks)-handranks.index(h))*(10**11))
for i in range(1, 2*len(handset[h]), 2):
rank+=getValue(handset[h][(i-1)//2])*(10**i) #Latter portion is to get high cards
return {
"rank":rank,
"title":h,
"hand":handset[h]
}
allinputs = "Black: 2H 3D 5S 9C KD White: 2C 3H 4S 8C AH"
'''Black: 2H 4S 4C 2D 4H White: 2S 8S AS QS 3S
Black: 2H 3D 5S 9C KD White: 2C 3H 4S 8C KH
Black: 2H 3D 5S 9C KD White: 2D 3H 5C 9S KH'''
inputs = allinputs.split('\n')
#myinput = "Black: 2H 3D 5S 9C KD White: 2C 3H 4S 8C AH"
for i in inputs:
hands = parseInput(i)
results = {}
for player in hands:
print("%s: %s" % (player, ' '.join(hands[player])))
results[player] = getHands(player, hands[player])
rankings = compareHands(results)
if len(rankings[0])==1:
print("The winner is %s!" % rankings[0][0])
else:
print("There is a tie between %s!" % ' and '.join(rankings[0]))
print('\n') | 0.128854 | 0.395835 |
import cioppy
ciop = cioppy.Cioppy()
import urllib.parse as urlparse
import datetime
import pandas as pd
import gdal
from shapely.geometry import box
def log_input(reference):
"""
Just logs the input reference, using the ciop.log function
"""
ciop.log('INFO', 'processing input: ' + reference)
def pass_next_node(input):
"""
Pass the input reference to the next node as is, without storing it on HDFS
"""
ciop.publish(input, mode='silent')
def name_date_from_enclosure(row):
series = dict()
series['name']=(row['enclosure'].split('/')[-1]).split('.')[0]
print(series['name'])
series['day']=series['name'][-26:-18]
series['jday'] = '{}{}'.format(datetime.datetime.strptime(series['day'], '%Y%m%d').timetuple().tm_year,
"%03d"%datetime.datetime.strptime(series['day'], '%Y%m%d').timetuple().tm_yday)
return pd.Series(series)
def tojulian(x):
"""
Parses datetime object to julian date string.
Args:
datetime object
Returns:
x: julian date as string YYYYJJJ
"""
return '{}{}'.format(datetime.datetime.strptime(x, '%Y-%m-%d').timetuple().tm_year,
"%03d"%datetime.datetime.strptime(x, '%Y-%m-%d').timetuple().tm_yday)
def fromjulian(x):
"""
Parses julian date string to datetime object.
Args:
x: julian date as string YYYYJJJ
Returns:
datetime object parsed from julian date
"""
return datetime.datetime.strptime(x, '%Y%j').date()
def get_vsi_url(enclosure, user, api_key):
parsed_url = urlparse.urlparse(enclosure)
url = '/vsicurl/%s://%s:%s@%s/api%s' % (list(parsed_url)[0],
user,
api_key,
list(parsed_url)[1],
list(parsed_url)[2])
return url
def get_raster_wkt(raster):
src = gdal.Open(raster)
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
from osgeo import ogr
from osgeo import osr
# Setup the source projection - you can also import from epsg, proj4...
source = osr.SpatialReference()
source.ImportFromWkt(src.GetProjection())
# The target projection
target = osr.SpatialReference()
target.ImportFromEPSG(4326)
# Create the transform - this can be used repeatedly
transform = osr.CoordinateTransformation(source, target)
# return box(transform.TransformPoint(ulx, lry)[0],
# transform.TransformPoint(ulx, lry)[1],
# transform.TransformPoint(lrx, uly)[0],
# transform.TransformPoint(lrx, uly)[1]).wkt
return box(transform.TransformPoint(ulx, lry)[1],
transform.TransformPoint(ulx, lry)[0],
transform.TransformPoint(lrx, uly)[1],
transform.TransformPoint(lrx, uly)[0]).wkt | src/main/app-resources/util/util.py |
import cioppy
ciop = cioppy.Cioppy()
import urllib.parse as urlparse
import datetime
import pandas as pd
import gdal
from shapely.geometry import box
def log_input(reference):
"""
Just logs the input reference, using the ciop.log function
"""
ciop.log('INFO', 'processing input: ' + reference)
def pass_next_node(input):
"""
Pass the input reference to the next node as is, without storing it on HDFS
"""
ciop.publish(input, mode='silent')
def name_date_from_enclosure(row):
series = dict()
series['name']=(row['enclosure'].split('/')[-1]).split('.')[0]
print(series['name'])
series['day']=series['name'][-26:-18]
series['jday'] = '{}{}'.format(datetime.datetime.strptime(series['day'], '%Y%m%d').timetuple().tm_year,
"%03d"%datetime.datetime.strptime(series['day'], '%Y%m%d').timetuple().tm_yday)
return pd.Series(series)
def tojulian(x):
"""
Parses datetime object to julian date string.
Args:
datetime object
Returns:
x: julian date as string YYYYJJJ
"""
return '{}{}'.format(datetime.datetime.strptime(x, '%Y-%m-%d').timetuple().tm_year,
"%03d"%datetime.datetime.strptime(x, '%Y-%m-%d').timetuple().tm_yday)
def fromjulian(x):
"""
Parses julian date string to datetime object.
Args:
x: julian date as string YYYYJJJ
Returns:
datetime object parsed from julian date
"""
return datetime.datetime.strptime(x, '%Y%j').date()
def get_vsi_url(enclosure, user, api_key):
parsed_url = urlparse.urlparse(enclosure)
url = '/vsicurl/%s://%s:%s@%s/api%s' % (list(parsed_url)[0],
user,
api_key,
list(parsed_url)[1],
list(parsed_url)[2])
return url
def get_raster_wkt(raster):
src = gdal.Open(raster)
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
from osgeo import ogr
from osgeo import osr
# Setup the source projection - you can also import from epsg, proj4...
source = osr.SpatialReference()
source.ImportFromWkt(src.GetProjection())
# The target projection
target = osr.SpatialReference()
target.ImportFromEPSG(4326)
# Create the transform - this can be used repeatedly
transform = osr.CoordinateTransformation(source, target)
# return box(transform.TransformPoint(ulx, lry)[0],
# transform.TransformPoint(ulx, lry)[1],
# transform.TransformPoint(lrx, uly)[0],
# transform.TransformPoint(lrx, uly)[1]).wkt
return box(transform.TransformPoint(ulx, lry)[1],
transform.TransformPoint(ulx, lry)[0],
transform.TransformPoint(lrx, uly)[1],
transform.TransformPoint(lrx, uly)[0]).wkt | 0.608594 | 0.400398 |
from __future__ import absolute_import
import six
from datetime import timedelta
from django.db.models import Q
from django.utils import timezone
from rest_framework.response import Response
from functools32 import partial
from sentry import options, quotas, tagstore
from sentry.api.base import DocSection, EnvironmentMixin
from sentry.api.bases import GroupEndpoint
from sentry.api.serializers.models.event import SnubaEvent
from sentry.api.serializers import serialize
from sentry.api.paginator import DateTimePaginator, GenericOffsetPaginator
from sentry.models import Environment, Event, Group
from sentry.search.utils import parse_query
from sentry.search.utils import InvalidQuery
from sentry.utils.apidocs import scenario, attach_scenarios
from sentry.utils.validators import is_event_id
from sentry.utils.snuba import raw_query
class NoResults(Exception):
pass
@scenario('ListAvailableSamples')
def list_available_samples_scenario(runner):
group = Group.objects.filter(project=runner.default_project).first()
runner.request(method='GET', path='/issues/%s/events/' % group.id)
class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
doc_section = DocSection.EVENTS
@attach_scenarios([list_available_samples_scenario])
def get(self, request, group):
"""
List an Issue's Events
``````````````````````
This endpoint lists an issue's events.
:pparam string issue_id: the ID of the issue to retrieve.
:auth: required
"""
try:
environment = self._get_environment(request, group)
query, tags = self._get_search_query_and_tags(request, group, environment)
except InvalidQuery as exc:
return Response({'detail': six.text_type(exc)}, status=400)
except NoResults:
return Response([])
use_snuba = options.get('snuba.events-queries.enabled')
backend = self._get_events_snuba if use_snuba else self._get_events_legacy
return backend(request, group, environment, query, tags)
def _get_events_snuba(self, request, group, environment, query, tags):
conditions = []
if query:
msg_substr = ['positionCaseInsensitive', ['message', "'%s'" % (query,)]]
message_condition = [msg_substr, '!=', 0]
if is_event_id(query):
or_condition = [message_condition, ['event_id', '=', query]]
conditions.append(or_condition)
else:
conditions.append(message_condition)
if tags:
conditions.extend([[u'tags[{}]'.format(k), '=', v] for (k, v) in tags.items()])
now = timezone.now()
data_fn = partial(
# extract 'data' from raw_query result
lambda *args, **kwargs: raw_query(*args, **kwargs)['data'],
start=now - timedelta(days=90),
end=now,
conditions=conditions,
filter_keys={
'project_id': [group.project_id],
'issue': [group.id]
},
selected_columns=SnubaEvent.selected_columns + ['tags.key', 'tags.value'],
orderby='-timestamp',
referrer='api.group-events',
)
return self.paginate(
request=request,
on_results=lambda results: serialize(
[SnubaEvent(row) for row in results], request.user),
paginator=GenericOffsetPaginator(data_fn=data_fn)
)
def _get_events_legacy(self, request, group, environment, query, tags):
events = Event.objects.filter(group_id=group.id)
if query:
q = Q(message__icontains=query)
if is_event_id(query):
q |= Q(event_id__exact=query)
events = events.filter(q)
if tags:
event_filter = tagstore.get_group_event_filter(
group.project_id,
group.id,
environment.id if environment is not None else None,
tags,
)
if not event_filter:
return Response([])
events = events.filter(**event_filter)
# filter out events which are beyond the retention period
retention = quotas.get_event_retention(organization=group.project.organization)
if retention:
events = events.filter(
datetime__gte=timezone.now() - timedelta(days=retention)
)
return self.paginate(
request=request,
queryset=events,
order_by='-datetime',
on_results=lambda x: serialize(x, request.user),
paginator_cls=DateTimePaginator,
)
def _get_environment(self, request, group):
try:
return self._get_environment_from_request(
request,
group.project.organization_id,
)
except Environment.DoesNotExist:
raise NoResults
def _get_search_query_and_tags(self, request, group, environment=None):
raw_query = request.GET.get('query')
if raw_query:
query_kwargs = parse_query([group.project], raw_query, request.user)
query = query_kwargs.pop('query', None)
tags = query_kwargs.pop('tags', {})
else:
query = None
tags = {}
if environment is not None:
if 'environment' in tags and tags['environment'] != environment.name:
# An event can only be associated with a single
# environment, so if the environment associated with
# the request is different than the environment
# provided as a tag lookup, the query cannot contain
# any valid results.
raise NoResults
else:
tags['environment'] = environment.name
return query, tags | src/sentry/api/endpoints/group_events.py | from __future__ import absolute_import
import six
from datetime import timedelta
from django.db.models import Q
from django.utils import timezone
from rest_framework.response import Response
from functools32 import partial
from sentry import options, quotas, tagstore
from sentry.api.base import DocSection, EnvironmentMixin
from sentry.api.bases import GroupEndpoint
from sentry.api.serializers.models.event import SnubaEvent
from sentry.api.serializers import serialize
from sentry.api.paginator import DateTimePaginator, GenericOffsetPaginator
from sentry.models import Environment, Event, Group
from sentry.search.utils import parse_query
from sentry.search.utils import InvalidQuery
from sentry.utils.apidocs import scenario, attach_scenarios
from sentry.utils.validators import is_event_id
from sentry.utils.snuba import raw_query
class NoResults(Exception):
pass
@scenario('ListAvailableSamples')
def list_available_samples_scenario(runner):
group = Group.objects.filter(project=runner.default_project).first()
runner.request(method='GET', path='/issues/%s/events/' % group.id)
class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
doc_section = DocSection.EVENTS
@attach_scenarios([list_available_samples_scenario])
def get(self, request, group):
"""
List an Issue's Events
``````````````````````
This endpoint lists an issue's events.
:pparam string issue_id: the ID of the issue to retrieve.
:auth: required
"""
try:
environment = self._get_environment(request, group)
query, tags = self._get_search_query_and_tags(request, group, environment)
except InvalidQuery as exc:
return Response({'detail': six.text_type(exc)}, status=400)
except NoResults:
return Response([])
use_snuba = options.get('snuba.events-queries.enabled')
backend = self._get_events_snuba if use_snuba else self._get_events_legacy
return backend(request, group, environment, query, tags)
def _get_events_snuba(self, request, group, environment, query, tags):
conditions = []
if query:
msg_substr = ['positionCaseInsensitive', ['message', "'%s'" % (query,)]]
message_condition = [msg_substr, '!=', 0]
if is_event_id(query):
or_condition = [message_condition, ['event_id', '=', query]]
conditions.append(or_condition)
else:
conditions.append(message_condition)
if tags:
conditions.extend([[u'tags[{}]'.format(k), '=', v] for (k, v) in tags.items()])
now = timezone.now()
data_fn = partial(
# extract 'data' from raw_query result
lambda *args, **kwargs: raw_query(*args, **kwargs)['data'],
start=now - timedelta(days=90),
end=now,
conditions=conditions,
filter_keys={
'project_id': [group.project_id],
'issue': [group.id]
},
selected_columns=SnubaEvent.selected_columns + ['tags.key', 'tags.value'],
orderby='-timestamp',
referrer='api.group-events',
)
return self.paginate(
request=request,
on_results=lambda results: serialize(
[SnubaEvent(row) for row in results], request.user),
paginator=GenericOffsetPaginator(data_fn=data_fn)
)
def _get_events_legacy(self, request, group, environment, query, tags):
events = Event.objects.filter(group_id=group.id)
if query:
q = Q(message__icontains=query)
if is_event_id(query):
q |= Q(event_id__exact=query)
events = events.filter(q)
if tags:
event_filter = tagstore.get_group_event_filter(
group.project_id,
group.id,
environment.id if environment is not None else None,
tags,
)
if not event_filter:
return Response([])
events = events.filter(**event_filter)
# filter out events which are beyond the retention period
retention = quotas.get_event_retention(organization=group.project.organization)
if retention:
events = events.filter(
datetime__gte=timezone.now() - timedelta(days=retention)
)
return self.paginate(
request=request,
queryset=events,
order_by='-datetime',
on_results=lambda x: serialize(x, request.user),
paginator_cls=DateTimePaginator,
)
def _get_environment(self, request, group):
try:
return self._get_environment_from_request(
request,
group.project.organization_id,
)
except Environment.DoesNotExist:
raise NoResults
def _get_search_query_and_tags(self, request, group, environment=None):
raw_query = request.GET.get('query')
if raw_query:
query_kwargs = parse_query([group.project], raw_query, request.user)
query = query_kwargs.pop('query', None)
tags = query_kwargs.pop('tags', {})
else:
query = None
tags = {}
if environment is not None:
if 'environment' in tags and tags['environment'] != environment.name:
# An event can only be associated with a single
# environment, so if the environment associated with
# the request is different than the environment
# provided as a tag lookup, the query cannot contain
# any valid results.
raise NoResults
else:
tags['environment'] = environment.name
return query, tags | 0.529263 | 0.087058 |
import pandas as pd
import numpy as np
from random import choice, randint, uniform, random
import util
from util import Stack, Item, Solution
import time
class GA:
def __init__(self, _instance, _popSize, _mutationRate, _maxIterations,
_widthPlates, _heightPlates,_crossoverAlgorithm,
_mutationAlgorithm,_selectionAlgorithm,_localSearchRate, _eliteRate):
"""
Parameters and general variables
"""
self.widthPlates = _widthPlates
self.heightPlates = _heightPlates
self.crossoverAlgorithm = _crossoverAlgorithm
self.mutationAlgorithm = _mutationAlgorithm
self.selectionAlgorithm = _selectionAlgorithm
self.output = []
self.population = []
self.elitePopulation= []
self.newPopulation = []
self.eliteRate = _eliteRate
self.eliteSize = _popSize * _eliteRate
self.newPopulSize = _popSize - self.eliteSize
self.matingPool = []
self.best = None
self.popSize = _popSize
self.stacks = {}
self.mutationRate = _mutationRate
self.localSearchRate= _localSearchRate
self.maxIterations = _maxIterations
self.iteration = 0
self.iterationOfBest= 0
self.instance = _instance
self.listBestFit = []
self.listAvgFit = []
self.timeIteration = []
self.table = {}
self.readInstance()
self.initPopulation()
self.genSize = len(self.templateSolution)
def readInstance(self):
batch = self.instance + "_batch.csv"
batch = pd.read_csv(batch, sep = ";")
stack = Stack(batch.STACK[0])
self.stacks[stack.idStack] = stack
self.templateSolution = []
for ix,it in batch.iterrows():
item = Item(it.ITEM_ID,it.LENGTH_ITEM, it.WIDTH_ITEM)
if stack.idStack != it.STACK:
stack = Stack(it.STACK)
self.stacks[stack.idStack] = stack
stack.add(item)
self.templateSolution.append(it.STACK)
def getCost(self, solution):
[stack.reset() for stack in self.stacks.values()]
if random() < self.localSearchRate:
solution.localSearch(self.stacks)
else:
solution.computeCost(self.stacks)
return solution
def initPopulation(self):
"""
Creating random individuals in the population
"""
for i in range(0, self.popSize):
solution = Solution(self.templateSolution[0:], self.widthPlates, self.heightPlates)
solution = self.getCost(solution)
self.population.append(solution)
self.best = self.population[0].copy()
for sol_i in self.population:
#elite
if len(self.elitePopulation) < self.eliteSize:
self.elitePopulation.append(sol_i)
else:
fit = [i.cost for i in self.elitePopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > sol_i.cost:
self.elitePopulation[fit.index(maxFit)] = sol_i
#best fit
if self.best.cost > sol_i.cost:
self.best = sol_i.copy()
print ("Best initial sol: ",self.best.cost)
self.bestInitialSol = self.best.cost
self.addOutput(0,self.best.cost, self.population)
def updateBest(self, candidate):
if candidate.cost < self.best.cost:
self.best = candidate.copy()
print ("iteration: ",self.iteration, "best: ",self.best.cost)
self.iterationOfBest = self.iteration
def addOutput(self,time,bestCost, population):
row = [i.cost for i in population]
row.append(time)
row.append(bestCost)
self.output.append(row)
def randomSelection(self):
"""
Random (uniform) selection of two individuals
"""
solA = self.matingPool[ randint(0, self.popSize-1) ]
solB = self.matingPool[ randint(0, self.popSize-1) ]
return [solA, solB]
def stochasticUniversalSampling(self):
"""
stochastic universal sampling Selection Implementation
"""
solA = self.matingPool[ choice(self.indexs) ]
solB = self.matingPool[ choice(self.indexs) ]
return [solA, solB]
def TournamentSelection(self):
"""
Tournament selection of two individuals
"""
sol1 = self.matingPool[ randint(0, self.popSize-1) ]
sol2 = self.matingPool[ randint(0, self.popSize-1) ]
solA = sol1 if sol1.cost < sol2.cost else sol2
sol3 = self.matingPool[ randint(0, self.popSize-1) ]
sol4 = self.matingPool[ randint(0, self.popSize-1) ]
solB = sol3 if sol3.cost < sol4.cost else sol4
if solA.cost == solB.cost:
if solA.cost != sol1.cost:
solA = sol1
elif solA.cost != sol2.cost:
solA = sol2
elif solA.cost != sol3.cost:
solA = sol3
elif solA.cost != sol4.cost:
solA = sol4
return [solA, solB]
def uniformCrossover(self, solA, solB):
"""
Uniform Crossover Implementation
"""
child1 = solA.copy()
child2 = solB.copy()
tmpIndA = solA.genes[0:]
tmpIndB = solB.genes[0:]
tmpIndex= []
for i in range(0, self.genSize):
if choice([True,False]):
tmpIndA.remove(child2.genes[i])
tmpIndB.remove(child1.genes[i])
tmpRotetion = child2.genes[self.genSize+i]
child2.genes[self.genSize+i] = child1.genes[self.genSize+i]
child1.genes[self.genSize+i] = tmpRotetion
tmpCut = child2.genes[(self.genSize*2)+i]
child2.genes[(self.genSize*2)+i] = child1.genes[(self.genSize*2)+i]
child1.genes[(self.genSize*2)+i] = tmpCut
else:
tmpIndex.append(i)
i=0
for g in tmpIndex:
child2.genes[g] = tmpIndA[i]
child1.genes[g] = tmpIndB[i]
i +=1
return (child1, child2)
def updateChild(self,indFix,indComp,index,i ):
"""
This fuction updates the child with second parent genes based on PMX crossover.
"""
if indComp.genes[i] not in indFix.genes[index[0]:index[1]+1]:
childGene = indComp.genes[i]
else:
gene = indComp.genes[indFix.genes.index(indComp.genes[i])]
while gene in indFix.genes[index[0]:index[1]+1]:
gene = indComp.genes[indFix.genes.index(gene)]
childGene = gene
return childGene
def pmxCrossover(self, indA, indB):
"""
PMX Crossover Implementation
"""
child1 = indA.copy()
child2 = indB.copy()
index = [randint(0, self.genSize-1) for _ in range(2)]
index.sort()
for i in range(0, self.genSize):
if i >= index[0] and i <= index[1]:
tmpRotetion = child2.genes[self.genSize+i]
child2.genes[self.genSize+i] = child1.genes[self.genSize+i]
child1.genes[self.genSize+i] = tmpRotetion
tmpCut = child2.genes[(self.genSize*2)+i]
child2.genes[(self.genSize*2)+i] = child1.genes[(self.genSize*2)+i]
child1.genes[(self.genSize*2)+i] = tmpCut
else:
child1.genes[i] = self.updateChild(indA,indB,index,i)
child2.genes[i] = self.updateChild(indB,indA,index,i)
return (child1, child2)
def reciprocalExchangeMutation(self, ind):
"""
Reciprocal Exchange Mutation implementation
"""
if random() < self.mutationRate:
indexA = randint(0, (self.genSize*3)-1)
if indexA < self.genSize:
indexB = randint(0, self.genSize-1)
tmp = ind.genes[indexA]
ind.genes[indexA] = ind.genes[indexB]
ind.genes[indexB] = tmp
else:
ind.genes[indexA] = not(ind.genes[indexA])
ind = self.getCost(ind)
self.updateBest(ind)
return ind
def inversionMutation(self, ind):
"""
Inversion Mutation implementation
"""
if random() < self.mutationRate:
indexA = randint(0, (self.genSize*3)-1)
if indexA < self.genSize:
index = [randint(0, self.genSize-1) for _ in range(2)]
index.sort()
ind.genes[index[0]:index[1]+1] = reversed(ind.genes[index[0]:index[1]+1])
else:
ind.genes[indexA] = not(ind.genes[indexA])
ind = self.getCost(ind)
self.updateBest(ind)
return ind
def inversionMutationNew(self, ind):
"""
A variation of Inversion Mutation implementation, where we change the
position of a gene with the next gene. I believe that this function works
bether when the initial population are create by Nearest neighbor insertion.
"""
if random() < self.mutationRate:
indexA = randint(1, (self.genSize*3)-1)
if indexA < self.genSize:
ind.genes[indexA-1:indexA+1] = reversed(ind.genes[indexA-1:indexA+1])
else:
ind.genes[indexA] = not(ind.genes[indexA])
ind = self.getCost(ind)
self.updateBest(ind)
return ind
def eliteSurvival(self, ind):
"""
Ensuring that only the best individuals will be added to the population.
"""
fit = [i.cost for i in self.elitePopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > ind.cost:
self.elitePopulation[fit.index(maxFit)] = ind.copy()
if len(self.newPopulation) < self.popSize:
self.newPopulation.append(ind)
else:
fit = [i.cost for i in self.newPopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > ind.cost:
self.newPopulation[fit.index(maxFit)] = ind
def updateMatingPool(self):
"""
Updating the mating pool before creating a new generation
"""
self.matingPool = [s.copy() for s in self.population]
fit = [i.cost for i in self.matingPool]
self.listBestFit.append(min(fit))
self.listAvgFit.append(sum(fit)/len(fit))
"""
Updating the indexs for stochastic Universal Sampling before creating a new generation
"""
if self.selectionAlgorithm == 'S':
fitnessMinim = [1/i.cost for i in self.matingPool]
sumFitnessMinim = sum(fitnessMinim)
fracFitnessMinim = [i/sumFitnessMinim for i in fitnessMinim]
cumSumFracFitnessMinim = [sum(fracFitnessMinim[:i]) for i in range(1, len(fracFitnessMinim)+1)]
N = int(len(self.matingPool))
startPoint = uniform(0, (1/N))
marks = [startPoint + ((1/N) * i) for i in range(0,N)]
self.indexs = []
i = 0
for point in marks:
while(cumSumFracFitnessMinim[i]<point):
i +=1
self.indexs.append(i)
def newGeneration(self):
"""
Creating a new generation
1. Selection
2. Crossover
3. Mutation
"""
self.newPopulation = self.elitePopulation[:]
for i in range(0, int(self.newPopulSize/2)+1):
"""
Depending of your experiment you need to use the most suitable algorithms for:
1. Select two candidates
2. Apply Crossover
3. Apply Mutation
"""
if self.selectionAlgorithm == 'S':
indA, indB = self.stochasticUniversalSampling()
elif self.selectionAlgorithm == 'T':
indA, indB = self.TournamentSelection()
else:
indA, indB = self.randomSelection()
if self.crossoverAlgorithm == 'P':
child1,child2 = self.pmxCrossover(indA, indB)
else:
child1,child2 = self.uniformCrossover(indA, indB)
if self.mutationAlgorithm == 'I':
child1 = self.inversionMutation(child1)
child2 = self.inversionMutation(child2)
elif self.mutationAlgorithm == 'INEW':
child1 = self.inversionMutationNew(child1)
child2 = self.inversionMutationNew(child2)
else:
child1 = self.reciprocalExchangeMutation(child1)
child2 = self.reciprocalExchangeMutation(child2)
self.eliteSurvival(child1)
self.eliteSurvival(child2)
self.population = self.newPopulation
def filterDuplicate(self):
sol = np.array([str(i.getSequence(self.stacks,i.localSearchGenes)) for i in self.population])
indexDupl = [idx for idx, val in enumerate(sol) if val in sol[:idx]]
for i in indexDupl:
solution = Solution(self.templateSolution[0:], self.widthPlates, self.heightPlates)
index = [randint(self.genSize, self.genSize*3) for _ in range(2)]
index.sort()
solution.genes[index[0]:index[1]] = self.best.genes[index[0]:index[1]]
solution = self.getCost(solution)
self.population[i] = solution
self.elitePopulation = []
for sol_i in self.population:
#elite
if len(self.elitePopulation) < self.eliteSize:
self.elitePopulation.append(sol_i)
else:
fit = [i.cost for i in self.elitePopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > sol_i.cost:
self.elitePopulation[fit.index(maxFit)] = sol_i
#best fit
if self.best.cost > sol_i.cost:
self.best = sol_i.copy()
def GAStep(self):
"""
One step in the GA main algorithm
1. Updating mating pool with current population
2. Creating a new Generation
"""
start = time.time()
self.updateMatingPool()
self.newGeneration()
if self.iteration % 50 == 0:
self.filterDuplicate()
end = time.time()
self.timeIteration.append(end-start)
self.addOutput(end-start,self.best.cost, self.population)
def search(self):
"""
General search template.
Iterates for a given number of steps
"""
self.iteration = 0
while self.iteration < self.maxIterations and self.best.cost > 0:
self.GAStep()
self.iteration += 1
if self.iteration% 500 == 0:
print("Iteration ", self.iteration)
print ("Total iterations: ",self.iteration)
print ("Best Solution: ", self.best.cost) | GA_C.py | import pandas as pd
import numpy as np
from random import choice, randint, uniform, random
import util
from util import Stack, Item, Solution
import time
class GA:
def __init__(self, _instance, _popSize, _mutationRate, _maxIterations,
_widthPlates, _heightPlates,_crossoverAlgorithm,
_mutationAlgorithm,_selectionAlgorithm,_localSearchRate, _eliteRate):
"""
Parameters and general variables
"""
self.widthPlates = _widthPlates
self.heightPlates = _heightPlates
self.crossoverAlgorithm = _crossoverAlgorithm
self.mutationAlgorithm = _mutationAlgorithm
self.selectionAlgorithm = _selectionAlgorithm
self.output = []
self.population = []
self.elitePopulation= []
self.newPopulation = []
self.eliteRate = _eliteRate
self.eliteSize = _popSize * _eliteRate
self.newPopulSize = _popSize - self.eliteSize
self.matingPool = []
self.best = None
self.popSize = _popSize
self.stacks = {}
self.mutationRate = _mutationRate
self.localSearchRate= _localSearchRate
self.maxIterations = _maxIterations
self.iteration = 0
self.iterationOfBest= 0
self.instance = _instance
self.listBestFit = []
self.listAvgFit = []
self.timeIteration = []
self.table = {}
self.readInstance()
self.initPopulation()
self.genSize = len(self.templateSolution)
def readInstance(self):
batch = self.instance + "_batch.csv"
batch = pd.read_csv(batch, sep = ";")
stack = Stack(batch.STACK[0])
self.stacks[stack.idStack] = stack
self.templateSolution = []
for ix,it in batch.iterrows():
item = Item(it.ITEM_ID,it.LENGTH_ITEM, it.WIDTH_ITEM)
if stack.idStack != it.STACK:
stack = Stack(it.STACK)
self.stacks[stack.idStack] = stack
stack.add(item)
self.templateSolution.append(it.STACK)
def getCost(self, solution):
[stack.reset() for stack in self.stacks.values()]
if random() < self.localSearchRate:
solution.localSearch(self.stacks)
else:
solution.computeCost(self.stacks)
return solution
def initPopulation(self):
"""
Creating random individuals in the population
"""
for i in range(0, self.popSize):
solution = Solution(self.templateSolution[0:], self.widthPlates, self.heightPlates)
solution = self.getCost(solution)
self.population.append(solution)
self.best = self.population[0].copy()
for sol_i in self.population:
#elite
if len(self.elitePopulation) < self.eliteSize:
self.elitePopulation.append(sol_i)
else:
fit = [i.cost for i in self.elitePopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > sol_i.cost:
self.elitePopulation[fit.index(maxFit)] = sol_i
#best fit
if self.best.cost > sol_i.cost:
self.best = sol_i.copy()
print ("Best initial sol: ",self.best.cost)
self.bestInitialSol = self.best.cost
self.addOutput(0,self.best.cost, self.population)
def updateBest(self, candidate):
if candidate.cost < self.best.cost:
self.best = candidate.copy()
print ("iteration: ",self.iteration, "best: ",self.best.cost)
self.iterationOfBest = self.iteration
def addOutput(self,time,bestCost, population):
row = [i.cost for i in population]
row.append(time)
row.append(bestCost)
self.output.append(row)
def randomSelection(self):
"""
Random (uniform) selection of two individuals
"""
solA = self.matingPool[ randint(0, self.popSize-1) ]
solB = self.matingPool[ randint(0, self.popSize-1) ]
return [solA, solB]
def stochasticUniversalSampling(self):
"""
stochastic universal sampling Selection Implementation
"""
solA = self.matingPool[ choice(self.indexs) ]
solB = self.matingPool[ choice(self.indexs) ]
return [solA, solB]
def TournamentSelection(self):
"""
Tournament selection of two individuals
"""
sol1 = self.matingPool[ randint(0, self.popSize-1) ]
sol2 = self.matingPool[ randint(0, self.popSize-1) ]
solA = sol1 if sol1.cost < sol2.cost else sol2
sol3 = self.matingPool[ randint(0, self.popSize-1) ]
sol4 = self.matingPool[ randint(0, self.popSize-1) ]
solB = sol3 if sol3.cost < sol4.cost else sol4
if solA.cost == solB.cost:
if solA.cost != sol1.cost:
solA = sol1
elif solA.cost != sol2.cost:
solA = sol2
elif solA.cost != sol3.cost:
solA = sol3
elif solA.cost != sol4.cost:
solA = sol4
return [solA, solB]
def uniformCrossover(self, solA, solB):
"""
Uniform Crossover Implementation
"""
child1 = solA.copy()
child2 = solB.copy()
tmpIndA = solA.genes[0:]
tmpIndB = solB.genes[0:]
tmpIndex= []
for i in range(0, self.genSize):
if choice([True,False]):
tmpIndA.remove(child2.genes[i])
tmpIndB.remove(child1.genes[i])
tmpRotetion = child2.genes[self.genSize+i]
child2.genes[self.genSize+i] = child1.genes[self.genSize+i]
child1.genes[self.genSize+i] = tmpRotetion
tmpCut = child2.genes[(self.genSize*2)+i]
child2.genes[(self.genSize*2)+i] = child1.genes[(self.genSize*2)+i]
child1.genes[(self.genSize*2)+i] = tmpCut
else:
tmpIndex.append(i)
i=0
for g in tmpIndex:
child2.genes[g] = tmpIndA[i]
child1.genes[g] = tmpIndB[i]
i +=1
return (child1, child2)
def updateChild(self,indFix,indComp,index,i ):
"""
This fuction updates the child with second parent genes based on PMX crossover.
"""
if indComp.genes[i] not in indFix.genes[index[0]:index[1]+1]:
childGene = indComp.genes[i]
else:
gene = indComp.genes[indFix.genes.index(indComp.genes[i])]
while gene in indFix.genes[index[0]:index[1]+1]:
gene = indComp.genes[indFix.genes.index(gene)]
childGene = gene
return childGene
def pmxCrossover(self, indA, indB):
"""
PMX Crossover Implementation
"""
child1 = indA.copy()
child2 = indB.copy()
index = [randint(0, self.genSize-1) for _ in range(2)]
index.sort()
for i in range(0, self.genSize):
if i >= index[0] and i <= index[1]:
tmpRotetion = child2.genes[self.genSize+i]
child2.genes[self.genSize+i] = child1.genes[self.genSize+i]
child1.genes[self.genSize+i] = tmpRotetion
tmpCut = child2.genes[(self.genSize*2)+i]
child2.genes[(self.genSize*2)+i] = child1.genes[(self.genSize*2)+i]
child1.genes[(self.genSize*2)+i] = tmpCut
else:
child1.genes[i] = self.updateChild(indA,indB,index,i)
child2.genes[i] = self.updateChild(indB,indA,index,i)
return (child1, child2)
def reciprocalExchangeMutation(self, ind):
"""
Reciprocal Exchange Mutation implementation
"""
if random() < self.mutationRate:
indexA = randint(0, (self.genSize*3)-1)
if indexA < self.genSize:
indexB = randint(0, self.genSize-1)
tmp = ind.genes[indexA]
ind.genes[indexA] = ind.genes[indexB]
ind.genes[indexB] = tmp
else:
ind.genes[indexA] = not(ind.genes[indexA])
ind = self.getCost(ind)
self.updateBest(ind)
return ind
def inversionMutation(self, ind):
"""
Inversion Mutation implementation
"""
if random() < self.mutationRate:
indexA = randint(0, (self.genSize*3)-1)
if indexA < self.genSize:
index = [randint(0, self.genSize-1) for _ in range(2)]
index.sort()
ind.genes[index[0]:index[1]+1] = reversed(ind.genes[index[0]:index[1]+1])
else:
ind.genes[indexA] = not(ind.genes[indexA])
ind = self.getCost(ind)
self.updateBest(ind)
return ind
def inversionMutationNew(self, ind):
"""
A variation of Inversion Mutation implementation, where we change the
position of a gene with the next gene. I believe that this function works
bether when the initial population are create by Nearest neighbor insertion.
"""
if random() < self.mutationRate:
indexA = randint(1, (self.genSize*3)-1)
if indexA < self.genSize:
ind.genes[indexA-1:indexA+1] = reversed(ind.genes[indexA-1:indexA+1])
else:
ind.genes[indexA] = not(ind.genes[indexA])
ind = self.getCost(ind)
self.updateBest(ind)
return ind
def eliteSurvival(self, ind):
"""
Ensuring that only the best individuals will be added to the population.
"""
fit = [i.cost for i in self.elitePopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > ind.cost:
self.elitePopulation[fit.index(maxFit)] = ind.copy()
if len(self.newPopulation) < self.popSize:
self.newPopulation.append(ind)
else:
fit = [i.cost for i in self.newPopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > ind.cost:
self.newPopulation[fit.index(maxFit)] = ind
def updateMatingPool(self):
"""
Updating the mating pool before creating a new generation
"""
self.matingPool = [s.copy() for s in self.population]
fit = [i.cost for i in self.matingPool]
self.listBestFit.append(min(fit))
self.listAvgFit.append(sum(fit)/len(fit))
"""
Updating the indexs for stochastic Universal Sampling before creating a new generation
"""
if self.selectionAlgorithm == 'S':
fitnessMinim = [1/i.cost for i in self.matingPool]
sumFitnessMinim = sum(fitnessMinim)
fracFitnessMinim = [i/sumFitnessMinim for i in fitnessMinim]
cumSumFracFitnessMinim = [sum(fracFitnessMinim[:i]) for i in range(1, len(fracFitnessMinim)+1)]
N = int(len(self.matingPool))
startPoint = uniform(0, (1/N))
marks = [startPoint + ((1/N) * i) for i in range(0,N)]
self.indexs = []
i = 0
for point in marks:
while(cumSumFracFitnessMinim[i]<point):
i +=1
self.indexs.append(i)
def newGeneration(self):
"""
Creating a new generation
1. Selection
2. Crossover
3. Mutation
"""
self.newPopulation = self.elitePopulation[:]
for i in range(0, int(self.newPopulSize/2)+1):
"""
Depending of your experiment you need to use the most suitable algorithms for:
1. Select two candidates
2. Apply Crossover
3. Apply Mutation
"""
if self.selectionAlgorithm == 'S':
indA, indB = self.stochasticUniversalSampling()
elif self.selectionAlgorithm == 'T':
indA, indB = self.TournamentSelection()
else:
indA, indB = self.randomSelection()
if self.crossoverAlgorithm == 'P':
child1,child2 = self.pmxCrossover(indA, indB)
else:
child1,child2 = self.uniformCrossover(indA, indB)
if self.mutationAlgorithm == 'I':
child1 = self.inversionMutation(child1)
child2 = self.inversionMutation(child2)
elif self.mutationAlgorithm == 'INEW':
child1 = self.inversionMutationNew(child1)
child2 = self.inversionMutationNew(child2)
else:
child1 = self.reciprocalExchangeMutation(child1)
child2 = self.reciprocalExchangeMutation(child2)
self.eliteSurvival(child1)
self.eliteSurvival(child2)
self.population = self.newPopulation
def filterDuplicate(self):
sol = np.array([str(i.getSequence(self.stacks,i.localSearchGenes)) for i in self.population])
indexDupl = [idx for idx, val in enumerate(sol) if val in sol[:idx]]
for i in indexDupl:
solution = Solution(self.templateSolution[0:], self.widthPlates, self.heightPlates)
index = [randint(self.genSize, self.genSize*3) for _ in range(2)]
index.sort()
solution.genes[index[0]:index[1]] = self.best.genes[index[0]:index[1]]
solution = self.getCost(solution)
self.population[i] = solution
self.elitePopulation = []
for sol_i in self.population:
#elite
if len(self.elitePopulation) < self.eliteSize:
self.elitePopulation.append(sol_i)
else:
fit = [i.cost for i in self.elitePopulation]
fit.append(0)
maxFit = max(fit)
if maxFit > sol_i.cost:
self.elitePopulation[fit.index(maxFit)] = sol_i
#best fit
if self.best.cost > sol_i.cost:
self.best = sol_i.copy()
def GAStep(self):
"""
One step in the GA main algorithm
1. Updating mating pool with current population
2. Creating a new Generation
"""
start = time.time()
self.updateMatingPool()
self.newGeneration()
if self.iteration % 50 == 0:
self.filterDuplicate()
end = time.time()
self.timeIteration.append(end-start)
self.addOutput(end-start,self.best.cost, self.population)
def search(self):
"""
General search template.
Iterates for a given number of steps
"""
self.iteration = 0
while self.iteration < self.maxIterations and self.best.cost > 0:
self.GAStep()
self.iteration += 1
if self.iteration% 500 == 0:
print("Iteration ", self.iteration)
print ("Total iterations: ",self.iteration)
print ("Best Solution: ", self.best.cost) | 0.441071 | 0.160299 |
u"""
.. module:: test_create_organization
"""
from apps.volontulo.tests.views.test_organizations import TestOrganizations
from apps.volontulo.models import Organization
class TestCreateOrganization(TestOrganizations):
u"""Class responsible for testing editing organization specific views."""
def test__create_organization_get_form_anonymous(self):
u"""Test getting form for creating organization as anonymous."""
# Disable for anonymous user
response = self.client.get('/organizations/create')
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response,
'http://testserver/login?next=/organizations/create',
302,
200,
)
def test__create_organization_get_form_authorized(self):
u"""Test getting form for creating organization as authorized."""
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
response = self.client.get('/organizations/create')
self.assertTemplateUsed(
response,
'organizations/organization_form.html'
)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(response, u'Tworzenie organizacji')
def test__create_organization_post_form_anonymous(self):
u"""Test posting form for creating organization as anonymous."""
# Disable for anonymous user
response = self.client.post('/organizations/create')
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response,
'http://testserver/login?next=/organizations/create',
302,
200,
)
def test__create_empty_organization_post_form(self):
u"""Test posting form for creating empty (not filled) organization."""
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
form_params = {
'name': u'',
'address': u'',
'description': u'',
}
response = self.client.post('/organizations/create', form_params)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
u"Należy wypełnić wszystkie pola formularza."
)
def test__create_organization_post_form_fill_fields(self):
u"""Test posting form and check fields population."""
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
form_params = {
'name': u'Halperin Organix',
'address': u'East Street 123',
}
response = self.client.post('/organizations/create', form_params)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
u'Halperin Organix'
)
self.assertContains(
response,
u'East Street 123'
)
form_params = {
'description': u'User unfriendly organization',
}
response = self.client.post('/organizations/create', form_params)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
u'User unfriendly organization'
)
def test__create_valid_organization_form_post(self):
u"""Test posting valid form for creating organization."""
org_name = u'Halperin Organix'
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
form_params = {
'name': org_name,
'address': u'East Street 123',
'description': u'User unfriendly organization',
}
response = self.client.post(
'/organizations/create',
form_params,
follow=True
)
self.assertContains(
response,
u"Organizacja została dodana."
)
record = Organization.objects.get(name=org_name)
self.assertRedirects(
response,
'http://testserver/organizations/halperin-organix/{}'.format(
record.id),
302, 200)
self.assertEqual(record.name, org_name)
self.assertEqual(record.address, u'East Street 123')
self.assertEqual(record.description, u'User unfriendly organization')
def test__create_organization_one_column_template(self):
"""Test validate one column template on create page."""
# Disable for anonymous user
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
response = self.client.get('/organizations/create')
self.assertTemplateUsed(
response,
'common/col1.html'
) | apps/volontulo/tests/views/test_create_orgranization.py |
u"""
.. module:: test_create_organization
"""
from apps.volontulo.tests.views.test_organizations import TestOrganizations
from apps.volontulo.models import Organization
class TestCreateOrganization(TestOrganizations):
u"""Class responsible for testing editing organization specific views."""
def test__create_organization_get_form_anonymous(self):
u"""Test getting form for creating organization as anonymous."""
# Disable for anonymous user
response = self.client.get('/organizations/create')
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response,
'http://testserver/login?next=/organizations/create',
302,
200,
)
def test__create_organization_get_form_authorized(self):
u"""Test getting form for creating organization as authorized."""
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
response = self.client.get('/organizations/create')
self.assertTemplateUsed(
response,
'organizations/organization_form.html'
)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(response, u'Tworzenie organizacji')
def test__create_organization_post_form_anonymous(self):
u"""Test posting form for creating organization as anonymous."""
# Disable for anonymous user
response = self.client.post('/organizations/create')
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response,
'http://testserver/login?next=/organizations/create',
302,
200,
)
def test__create_empty_organization_post_form(self):
u"""Test posting form for creating empty (not filled) organization."""
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
form_params = {
'name': u'',
'address': u'',
'description': u'',
}
response = self.client.post('/organizations/create', form_params)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
u"Należy wypełnić wszystkie pola formularza."
)
def test__create_organization_post_form_fill_fields(self):
u"""Test posting form and check fields population."""
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
form_params = {
'name': u'Halperin Organix',
'address': u'East Street 123',
}
response = self.client.post('/organizations/create', form_params)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
u'Halperin Organix'
)
self.assertContains(
response,
u'East Street 123'
)
form_params = {
'description': u'User unfriendly organization',
}
response = self.client.post('/organizations/create', form_params)
self.assertIn('organization', response.context)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
u'User unfriendly organization'
)
def test__create_valid_organization_form_post(self):
u"""Test posting valid form for creating organization."""
org_name = u'Halperin Organix'
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
form_params = {
'name': org_name,
'address': u'East Street 123',
'description': u'User unfriendly organization',
}
response = self.client.post(
'/organizations/create',
form_params,
follow=True
)
self.assertContains(
response,
u"Organizacja została dodana."
)
record = Organization.objects.get(name=org_name)
self.assertRedirects(
response,
'http://testserver/organizations/halperin-organix/{}'.format(
record.id),
302, 200)
self.assertEqual(record.name, org_name)
self.assertEqual(record.address, u'East Street 123')
self.assertEqual(record.description, u'User unfriendly organization')
def test__create_organization_one_column_template(self):
"""Test validate one column template on create page."""
# Disable for anonymous user
self.client.post('/login', {
'email': u'<EMAIL>',
'password': '<PASSWORD>',
})
response = self.client.get('/organizations/create')
self.assertTemplateUsed(
response,
'common/col1.html'
) | 0.719482 | 0.306618 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import warnings
import pymysql
import random
from rasa_core.actions import Action
from rasa_core.agent import Agent
from rasa_core.channels.console import ConsoleInputChannel
from rasa_core.events import SlotSet
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
def search_sql(item,choice):
#打开数据库连接
db = pymysql.connect("localhost","root","123456","songDB")
#使用cursor方法获取操作游标
cursor = db.cursor()
#SQL查询语句
if choice == 1:
sql = "select * from song where name = \'{}\' and singer = \'{}\'".format(item[0],item[1])
elif choice == 2:
sql = "select * from song where name = \'{}\'".format(item[0])
elif choice == 3:
sql = "select * from song where singer = \'{}\' and style = \'{}\'".format(item[1],item[2])
elif choice == 4:
sql = "select * from song where singer = \'{}\'".format(item[1])
elif choice == 5:
sql = "select * from song where style = \'{}\'".format(item[2])
#执行SQL查询语句
cursor.execute(sql)
#获取所有记录列表
results = cursor.fetchall()
if results == ():
return None
songlist = []
for row in results:
songlist.append({'name':row[0],'singer':row[1],'style':row[2]})
db.close()
songlen = len(songlist)
i = random.randint(0,songlen - 1)
return songlist[i]
def judge(item,choice):
db = pymysql.connect("localhost","root","123456","songDB")
cursor = db.cursor()
if choice == 1:
sql = "select * from song where name = \'{}\' ".format(item)
elif choice == 2:
sql = "select * from song where singer = \'{}\' ".format(item)
elif choice == 3:
sql = "select * from song where style = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results == ():
return True
else:
return False
def judge_exist(item):
db = pymysql.connect("localhost","root","123456","songDB")
cursor = db.cursor()
sql = "select * from song where name = \'{}\' or singer = \'{}\' or style = \'{}\' ".format(item,item,item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return True
else:
return False
def find_true_slot(item):
db = pymysql.connect("localhost","root","123456","songDB")
cursor = db.cursor()
sql = "select * from song where name = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return 1
sql = "select * from song where singer = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return 2
sql = "select * from song where style = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return 3
return 0
class ActionSearchConsume(Action):
def name(self):
return 'action_search_consume'
def run(self, dispatcher, tracker, domain):
item = []
item.append(tracker.get_slot("name"))# 歌名
item.append(tracker.get_slot("singer"))# 歌手
item.append(tracker.get_slot("style"))# 风格
if item[0]:
if item[1]:
choice = 1
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,正在为您播放{}的歌曲{},风格:{}".format(item[1], item[0],song['style']))
else:
choice = 2
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,为您播放歌曲{}。歌曲名:{},歌手:{}".format(item[0],song['name'],song['singer']))
elif item[1]:
if item[2]:
# 已知歌手和风格,随机选一个歌名
choice = 3
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,为您随机播放{}的一首{}风格的歌曲{}。".format(song['singer'],song['style'],song['name']))
else:
#已知歌手,随机选
choice = 4
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,正在为您随机播放{}的一首{}的歌曲。".format(song['singer'],song['name']))
elif item[2]:
# 已知风格,随机选
choice = 5
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的{}的歌曲".format(item[2]))
return dispatcher.utter_message("好哒,正在为您播放一首{}风格的歌曲{}。".format(song['style'],song['name']))
else:
return dispatcher.utter_template("utter_default",tracker)
class ActionSearchListen(Action):
def name(self):
return 'action_search_listen'
def run(self,dispatcher,tracker,domain):
item1 = tracker.get_slot("name")
item2 = tracker.get_slot("singer")
item3 = tracker.get_slot("style")
if item1 and judge_exist(item1) == False:
return dispatcher.utter_message("很遗憾,没能为您找到名为{}的歌曲。".format(item1))
if item2 and judge_exist(item2) == False:
return dispatcher.utter_message("很遗憾,没能为您找到{}的歌曲。".format(item2))
if item1:
choice = 1
if judge(item1,choice) == True:
tracker._set_slot('name',None)
num = find_true_slot(item1)
if num == 2:
tracker._set_slot('singer',item1)
if num == 3:
tracker._set_slot('style',item1)
if item2:
choice = 2
if judge(item2,choice) == True:
tracker._set_slot('singer',None)
num = find_true_slot(item2)
if num == 1:
tracker._set_slot('name',item2)
if num == 3:
tracker._set_slot('style',item2)
if item3:
choice = 3
if judge(item3,choice) == True:
tracker._set_slot('style',None)
num = find_true_slot(item3)
if num == 2:
tracker._set_slot('singer',item3)
if num == 1:
tracker._set_slot('name',item3)
item1 = tracker.get_slot("name")
item2 = tracker.get_slot("singer")
item3 = tracker.get_slot("style")
if item1 and item2:
return dispatcher.utter_message("好哒,请稍等")
else:
if item1 == None:
return dispatcher.utter_template("utter_ask_name",tracker)
if item2 == None:
return dispatcher.utter_template("utter_ask_singer",tracker)
if item3 == None:
return dispatcher.utter_template("utter_ask_style",tracker)
def train_dialogue(domain_file="music_domain.yml",
model_path="models/dialogue",
training_data_file="data/music_story.md"):
from rasa_core.policies.fallback import FallbackPolicy
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.agent import Agent
fallback = FallbackPolicy(fallback_action_name="utter_default",
core_threshold=0.3,
nlu_threshold=0.3)
agent = Agent(domain_file,
policies=[MemoizationPolicy(), KerasPolicy(),fallback])
training_data = agent.load_data(training_data_file)
agent.train(
training_data,
epochs=200,
batch_size=16,
augmentation_factor=50,
validation_split=0.2
)
agent.persist(model_path)
return agent
def run_ivrbot_online(input_channel=ConsoleInputChannel(),
interpreter=RasaNLUInterpreter("models/ivr_nlu/demo"),
domain_file="music_domain.yml",
training_data_file="data/music_story.md"):
agent = Agent(domain_file,
policies=[MemoizationPolicy(), KerasPolicy()],
interpreter=interpreter)
training_data = agent.load_data(training_data_file)
agent.train_online(training_data,
input_channel=input_channel,
batch_size=16,
epochs=200,
max_training_samples=300)
return agent
def train_nlu():
from rasa_nlu.training_data import load_data
from rasa_nlu import config
from rasa_nlu.model import Trainer
training_data = load_data("data/music_nlu_data.json")
trainer = Trainer(config.load("ivr_chatbot.yml"))
trainer.train(training_data)
model_directory = trainer.persist("models/", project_name="ivr_nlu", fixed_model_name="demo")
return model_directory
def run(serve_forever=True):
agent = Agent.load("models/dialogue",
interpreter=RasaNLUInterpreter("models/ivr_nlu/demo"))
if serve_forever:
agent.handle_channel(ConsoleInputChannel())
return agent
if __name__ == "__main__":
logging.basicConfig(level="INFO")
parser = argparse.ArgumentParser(
description="starts the bot")
parser.add_argument(
"task",
choices=["train-nlu", "train-dialogue", "run", "online-train"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-dialogue":
train_dialogue()
elif task == "run":
run()
elif task == "online-train":
run_ivrbot_online()
else:
warnings.warn("Need to pass either 'train-nlu', 'train-dialogue' or "
"'run' to use the script.")
exit(1) | bot.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import warnings
import pymysql
import random
from rasa_core.actions import Action
from rasa_core.agent import Agent
from rasa_core.channels.console import ConsoleInputChannel
from rasa_core.events import SlotSet
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
def search_sql(item,choice):
#打开数据库连接
db = pymysql.connect("localhost","root","123456","songDB")
#使用cursor方法获取操作游标
cursor = db.cursor()
#SQL查询语句
if choice == 1:
sql = "select * from song where name = \'{}\' and singer = \'{}\'".format(item[0],item[1])
elif choice == 2:
sql = "select * from song where name = \'{}\'".format(item[0])
elif choice == 3:
sql = "select * from song where singer = \'{}\' and style = \'{}\'".format(item[1],item[2])
elif choice == 4:
sql = "select * from song where singer = \'{}\'".format(item[1])
elif choice == 5:
sql = "select * from song where style = \'{}\'".format(item[2])
#执行SQL查询语句
cursor.execute(sql)
#获取所有记录列表
results = cursor.fetchall()
if results == ():
return None
songlist = []
for row in results:
songlist.append({'name':row[0],'singer':row[1],'style':row[2]})
db.close()
songlen = len(songlist)
i = random.randint(0,songlen - 1)
return songlist[i]
def judge(item,choice):
db = pymysql.connect("localhost","root","123456","songDB")
cursor = db.cursor()
if choice == 1:
sql = "select * from song where name = \'{}\' ".format(item)
elif choice == 2:
sql = "select * from song where singer = \'{}\' ".format(item)
elif choice == 3:
sql = "select * from song where style = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results == ():
return True
else:
return False
def judge_exist(item):
db = pymysql.connect("localhost","root","123456","songDB")
cursor = db.cursor()
sql = "select * from song where name = \'{}\' or singer = \'{}\' or style = \'{}\' ".format(item,item,item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return True
else:
return False
def find_true_slot(item):
db = pymysql.connect("localhost","root","123456","songDB")
cursor = db.cursor()
sql = "select * from song where name = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return 1
sql = "select * from song where singer = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return 2
sql = "select * from song where style = \'{}\' ".format(item)
cursor.execute(sql)
results = cursor.fetchall()
if results != ():
return 3
return 0
class ActionSearchConsume(Action):
def name(self):
return 'action_search_consume'
def run(self, dispatcher, tracker, domain):
item = []
item.append(tracker.get_slot("name"))# 歌名
item.append(tracker.get_slot("singer"))# 歌手
item.append(tracker.get_slot("style"))# 风格
if item[0]:
if item[1]:
choice = 1
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,正在为您播放{}的歌曲{},风格:{}".format(item[1], item[0],song['style']))
else:
choice = 2
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,为您播放歌曲{}。歌曲名:{},歌手:{}".format(item[0],song['name'],song['singer']))
elif item[1]:
if item[2]:
# 已知歌手和风格,随机选一个歌名
choice = 3
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,为您随机播放{}的一首{}风格的歌曲{}。".format(song['singer'],song['style'],song['name']))
else:
#已知歌手,随机选
choice = 4
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的歌曲")
return dispatcher.utter_message("好哒,正在为您随机播放{}的一首{}的歌曲。".format(song['singer'],song['name']))
elif item[2]:
# 已知风格,随机选
choice = 5
song = search_sql(item,choice)
if song == None:
return dispatcher.utter_message("对不起,我们没能找到您想要的{}的歌曲".format(item[2]))
return dispatcher.utter_message("好哒,正在为您播放一首{}风格的歌曲{}。".format(song['style'],song['name']))
else:
return dispatcher.utter_template("utter_default",tracker)
class ActionSearchListen(Action):
def name(self):
return 'action_search_listen'
def run(self,dispatcher,tracker,domain):
item1 = tracker.get_slot("name")
item2 = tracker.get_slot("singer")
item3 = tracker.get_slot("style")
if item1 and judge_exist(item1) == False:
return dispatcher.utter_message("很遗憾,没能为您找到名为{}的歌曲。".format(item1))
if item2 and judge_exist(item2) == False:
return dispatcher.utter_message("很遗憾,没能为您找到{}的歌曲。".format(item2))
if item1:
choice = 1
if judge(item1,choice) == True:
tracker._set_slot('name',None)
num = find_true_slot(item1)
if num == 2:
tracker._set_slot('singer',item1)
if num == 3:
tracker._set_slot('style',item1)
if item2:
choice = 2
if judge(item2,choice) == True:
tracker._set_slot('singer',None)
num = find_true_slot(item2)
if num == 1:
tracker._set_slot('name',item2)
if num == 3:
tracker._set_slot('style',item2)
if item3:
choice = 3
if judge(item3,choice) == True:
tracker._set_slot('style',None)
num = find_true_slot(item3)
if num == 2:
tracker._set_slot('singer',item3)
if num == 1:
tracker._set_slot('name',item3)
item1 = tracker.get_slot("name")
item2 = tracker.get_slot("singer")
item3 = tracker.get_slot("style")
if item1 and item2:
return dispatcher.utter_message("好哒,请稍等")
else:
if item1 == None:
return dispatcher.utter_template("utter_ask_name",tracker)
if item2 == None:
return dispatcher.utter_template("utter_ask_singer",tracker)
if item3 == None:
return dispatcher.utter_template("utter_ask_style",tracker)
def train_dialogue(domain_file="music_domain.yml",
model_path="models/dialogue",
training_data_file="data/music_story.md"):
from rasa_core.policies.fallback import FallbackPolicy
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.agent import Agent
fallback = FallbackPolicy(fallback_action_name="utter_default",
core_threshold=0.3,
nlu_threshold=0.3)
agent = Agent(domain_file,
policies=[MemoizationPolicy(), KerasPolicy(),fallback])
training_data = agent.load_data(training_data_file)
agent.train(
training_data,
epochs=200,
batch_size=16,
augmentation_factor=50,
validation_split=0.2
)
agent.persist(model_path)
return agent
def run_ivrbot_online(input_channel=ConsoleInputChannel(),
interpreter=RasaNLUInterpreter("models/ivr_nlu/demo"),
domain_file="music_domain.yml",
training_data_file="data/music_story.md"):
agent = Agent(domain_file,
policies=[MemoizationPolicy(), KerasPolicy()],
interpreter=interpreter)
training_data = agent.load_data(training_data_file)
agent.train_online(training_data,
input_channel=input_channel,
batch_size=16,
epochs=200,
max_training_samples=300)
return agent
def train_nlu():
from rasa_nlu.training_data import load_data
from rasa_nlu import config
from rasa_nlu.model import Trainer
training_data = load_data("data/music_nlu_data.json")
trainer = Trainer(config.load("ivr_chatbot.yml"))
trainer.train(training_data)
model_directory = trainer.persist("models/", project_name="ivr_nlu", fixed_model_name="demo")
return model_directory
def run(serve_forever=True):
agent = Agent.load("models/dialogue",
interpreter=RasaNLUInterpreter("models/ivr_nlu/demo"))
if serve_forever:
agent.handle_channel(ConsoleInputChannel())
return agent
if __name__ == "__main__":
logging.basicConfig(level="INFO")
parser = argparse.ArgumentParser(
description="starts the bot")
parser.add_argument(
"task",
choices=["train-nlu", "train-dialogue", "run", "online-train"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-dialogue":
train_dialogue()
elif task == "run":
run()
elif task == "online-train":
run_ivrbot_online()
else:
warnings.warn("Need to pass either 'train-nlu', 'train-dialogue' or "
"'run' to use the script.")
exit(1) | 0.253491 | 0.09343 |
import numpy as np
import warnings
class EmptyTriclusterException(Exception):
pass
class DeltaTrimax():
"""
The delta-TRIMAX clustering algorithm.
Attributes
----------
D : ndarray
The data to be clustered
delta : float
The delta parameter of the algorithm. Must be > 0.0
l : float
The lambda parameter of the algorithm. Must be >= 1.0
chrom_cutoff : int
The deletion threshold for the chromosome axis
gene_cutoff : int
The deletion threshold for the gene axis
sample_cutoff : int
The deletion threshold for the sample axis
tol : float
The algorithm's tolerance
mask_mode : {'random', 'nan'}
The masking method for the clustered values. If 'random', the values
are replaced by random floats. If 'nan', they are replaced by nan
values.
n_chroms : int
The number of chromosome pairs
n_genes : int
The number of genes
n_samples : int
The number of samples
result_chroms : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #chromosomes and contains
True if the respective chromosome is contained in the tricluster,
False otherwise.
result_genes : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #genes and contains
True if the respective gene is contained in the tricluster,
False otherwise.
result_samples : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #samples and contains
True if the respective sample is contained in the tricluster,
False otherwise.
MSR : float
The Mean Squared Residue of each cell.
MSR_chrom : float
The Mean Squared Residue of each chromosome.
MSR_gene : float
The Mean Squared Residue of each gene.
MSR_sample : float
The Mean Squared Residue of each sample.
Methods
-------
fit(self, delta=2.5, l=1.005, chrom_cutoff=50, gene_cutoff=50,
sample_cutoff=50, tol=1e-5, mask_mode='nan', verbose=False)
Run the delta-TRIMAX algorithm for the given parameters.
get_triclusters()
Return the triclusters found by the algorithm.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, ‘Coexpression and coregulation analysis of
time-series gene expression data in estrogen-induced breast cancer
cell’, Algorithms Mol. Biol., τ. 8, τχ. 1, σ 9, 2013.
"""
def __init__(self, D):
"""
Parameters
----------
D : ndarray
The data to be clustered
"""
self.D = D.copy()
def _check_parameters(self):
"""
Checks the parameters given by the user. If the values are not valid,
a ValueError is raised.
"""
if (self.delta < 0):
raise ValueError("'delta' must be > 0.0, but its value"
" is {}".format(self.delta))
if (self.l < 1):
raise ValueError("'lambda' must be >= 1.0, but its"
" value is {}".format(self.l))
if (self.gene_cutoff < 1):
raise ValueError("'gene deletion cutoff' must be > 1.0, but its"
" value is {}".format(self.gene_cutoff))
if (self.sample_cutoff < 1):
raise ValueError("'sample deletion cutoff' must be > 1.0, but its"
" value is {}".format(self.sample_cutoff))
if (self.chrom_cutoff < 1):
raise ValueError("'chromosomes deletion cutoff' must be > 1.0, but"
" its value is {}".format(self.chrom_cutoff))
if (self.mask_mode not in ['nan', 'random']):
raise ValueError("'mask mode' must be either 'nan' or 'random',"
" but its value is {}".format(self.mask_mode))
def _compute_MSR(self, chroms, genes, samples):
"""
Computes the Mean Squared Residue (MSR) for the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Note
----
Updates the n_chorms, n_genes, n_samples, MSR, MSR_chrom, MSR_gene and
MSR_sample attributes.
"""
chrom_idx = np.expand_dims(np.expand_dims(np.nonzero(chroms)[0], axis=1), axis=1)
gene_idx = np.expand_dims(np.expand_dims(np.nonzero(genes)[0], axis=0), axis=2)
sample_idx = np.expand_dims(np.expand_dims(np.nonzero(samples)[0], axis=0), axis=0)
if (not chrom_idx.size) or (not gene_idx.size) or (not sample_idx.size):
raise EmptyTriclusterException()
subarr = self.D[chrom_idx, gene_idx, sample_idx]
self.n_chroms = subarr.shape[0]
self.n_genes = subarr.shape[1]
self.n_samples = subarr.shape[2]
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
# Computation of m_iJK
m_iJK = np.nanmean(np.nanmean(subarr, axis=2), axis=1)
m_iJK = np.expand_dims(np.expand_dims(m_iJK, axis=1), axis=1)
# Computation of m_IjK
m_IjK = np.nanmean(np.nanmean(subarr, axis=2), axis=0)
m_IjK = np.expand_dims(np.expand_dims(m_IjK, axis=0), axis=2)
# Computation of m_IJk
m_IJk = np.nansum(np.nansum(subarr, axis=0, keepdims=1), axis=1, keepdims=1)
m_IJk = m_IJk / ((subarr.shape[0] * subarr.shape[1]) - np.count_nonzero(np.isnan(subarr[:,:,0])))
# Computation of m_IJK
m_IJK = np.nanmean(subarr)
# Computation of MSR
residue = subarr - m_iJK - m_IjK - m_IJk + (2*m_IJK)
SR = np.square(residue)
self.MSR = np.nanmean(SR)
self.MSR_chrom = np.nanmean(np.nanmean(SR, axis=2), axis=1)
self.MSR_gene = np.nanmean(np.nanmean(SR, axis=2), axis=0)
self.MSR_sample = np.nanmean(np.nanmean(SR, axis=0), axis=0)
# Check tolerance
self.MSR_chrom[self.MSR_chrom < self.tol] = 0
self.MSR_gene[self.MSR_gene < self.tol] = 0
self.MSR_sample[self.MSR_sample < self.tol] = 0
self.MSR = 0 if (self.MSR < self.tol or np.isnan(self.MSR)) else self.MSR
def _single_node_deletion(self, chroms, genes, samples):
"""
The single node deletion routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
self._compute_MSR(chroms, genes, samples)
while (self.MSR > self.delta):
chrom_idx = np.nanargmax(self.MSR_chrom)
gene_idx = np.nanargmax(self.MSR_gene)
sample_idx = np.nanargmax(self.MSR_sample)
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
if (self.MSR_chrom[chrom_idx] > self.MSR_gene[gene_idx]):
if (self.MSR_chrom[chrom_idx] > self.MSR_sample[sample_idx]):
# Delete chrom
nonz_idx = chroms.nonzero()[0]
chroms.put(nonz_idx[chrom_idx], 0)
else:
# Delete sample
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[sample_idx], 0)
else:
if (self.MSR_gene[gene_idx] > self.MSR_sample[sample_idx]):
# Delete gene
nonz_idx = genes.nonzero()[0]
genes.put(nonz_idx[gene_idx], 0)
else:
# Delete sample
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[sample_idx], 0)
self._compute_MSR(chroms, genes, samples)
return chroms, genes, samples
def _multiple_node_deletion(self, chroms, genes, samples):
"""
The multiple node deletion routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
self._compute_MSR(chroms, genes, samples)
while (self.MSR > self.delta):
deleted = 0
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
if (self.n_chroms > self.chrom_cutoff):
chroms_to_del = self.MSR_chrom > (self.l * self.MSR)
nonz_idx = chroms.nonzero()[0]
if (chroms_to_del.any()):
deleted = 1
chroms.put(nonz_idx[chroms_to_del], 0)
if (self.n_genes > self.gene_cutoff):
genes_to_del = self.MSR_gene > (self.l * self.MSR)
nonz_idx = genes.nonzero()[0]
if (genes_to_del.any()):
deleted = 1
genes.put(nonz_idx[genes_to_del], 0)
if (self.n_samples > self.sample_cutoff):
samples_to_del = self.MSR_sample > (self.l * self.MSR)
nonz_idx = samples.nonzero()[0]
if (samples_to_del.any()):
deleted = 1
samples.put(nonz_idx[samples_to_del], 0)
if (not deleted):
break
self._compute_MSR(chroms, genes, samples)
return chroms, genes, samples
def _node_addition(self, chroms, genes, samples):
"""
The single node addition routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
while True:
self._compute_MSR(chroms, genes, samples)
n_chroms = np.count_nonzero(chroms)
n_genes = np.count_nonzero(genes)
n_samples = np.count_nonzero(samples)
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
elems_to_add = self.MSR_chrom <= self.MSR
nonz_idx = chroms.nonzero()[0]
chroms.put(nonz_idx[elems_to_add], 1)
elems_to_add = self.MSR_gene <= self.MSR
nonz_idx = genes.nonzero()[0]
genes.put(nonz_idx[elems_to_add], 1)
elems_to_add = self.MSR_sample <= self.MSR
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[elems_to_add], 1)
if (n_chroms == np.count_nonzero(chroms)) and \
(n_genes == np.count_nonzero(genes)) and \
(n_samples == np.count_nonzero(samples)):
break
return chroms, genes, samples
def _mask(self, chroms, genes, samples, minval, maxval):
"""
Masks the values of the array that have been used in triclusters
with either random float numbers, or nan.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
minval : float
Lower boundary of the output interval for the random generator.
maxval : float
Upper boundary of the output interval for the random generator.
"""
c = np.expand_dims(np.expand_dims(chroms.nonzero()[0], axis=1), axis=1)
g = np.expand_dims(np.expand_dims(genes.nonzero()[0], axis=0), axis=2)
s = np.expand_dims(np.expand_dims(samples.nonzero()[0], axis=0), axis=0)
if (self.mask_mode == 'random'):
shape = np.count_nonzero(chroms), np.count_nonzero(genes), np.count_nonzero(samples)
mask_vals = np.random.uniform(minval, maxval, shape)
self.D[c, g, s] = mask_vals
else:
self.D[c, g, s] = np.nan
def fit(self, delta=2.5, l=1.005, chrom_cutoff=50, gene_cutoff=50,
sample_cutoff=50, tol=1e-5, mask_mode='nan', verbose=False):
"""
Runs the delta-TRIMAX algorithm with the given parameters.
Parameters
----------
delta : float, default 2.5
The delta parameter of the algorithm. Must be > 0.0
l : float, default 1.005
The lambda parameter of the algorithm. Must be >= 1.0
chrom_cutoff : int, default 50
The deletion threshold for the chromosome axis
gene_cutoff : int, default 50
The deletion threshold for the gene axis
sample_cutoff : int, default 50
The deletion threshold for the sample axis
tol : float, default 1e-5
The algorithm's tolerance
mask_mode : {'random', 'nan'}, default 'nan'
The masking method for the clustered values. If 'random', the values
are replaced by random floats. If 'nan', they are replaced by nan
values.
verbose : bool, default False
Verbose mode for debugging.
"""
self.delta = delta
self.l = l
self.chrom_cutoff = chrom_cutoff
self.gene_cutoff = gene_cutoff
self.sample_cutoff = sample_cutoff
self.tol = tol
self.mask_mode = mask_mode
self._check_parameters()
n_chroms, n_genes, n_samples = self.D.shape
minval, maxval = np.nanmin(self.D), np.nanmax(self.D)
result_chroms = []
result_genes = []
result_samples = []
i = 1
while True:
if (verbose):
print(i)
chroms = np.ones(n_chroms, dtype=np.bool)
genes = np.ones(n_genes, dtype=np.bool)
samples = np.ones(n_samples, dtype=np.bool)
# Multiple node deletion
chroms, genes, samples = self._multiple_node_deletion(chroms,
genes,
samples)
# Single node deletion
chroms, genes, samples = self._single_node_deletion(chroms,
genes,
samples)
# Node addition
chroms, genes, samples = self._node_addition(chroms,
genes,
samples)
# Check for trivial tricluster
if (chroms.sum() == 1) or (genes.sum() == 1) or (samples.sum() == 1):
break # trivial bicluster
# Check if the aren't any unused values in D
if ((mask_mode == 'nan') and (np.isnan(self.D).all())):
break
# Mask values
self._mask(chroms, genes, samples, minval, maxval)
result_chroms.append(chroms)
result_genes.append(genes)
result_samples.append(samples)
if (verbose):
print("--- MSR = " + str(self.MSR))
i += 1
self.result_chroms = result_chroms
self.result_genes = result_genes
self.result_samples = result_samples
def get_triclusters(self):
"""
Returns the triclusters found by the algorithm.
"""
return self.result_chroms, self.result_genes, self.result_samples | mycluster/DeltaTrimax.py |
import numpy as np
import warnings
class EmptyTriclusterException(Exception):
pass
class DeltaTrimax():
"""
The delta-TRIMAX clustering algorithm.
Attributes
----------
D : ndarray
The data to be clustered
delta : float
The delta parameter of the algorithm. Must be > 0.0
l : float
The lambda parameter of the algorithm. Must be >= 1.0
chrom_cutoff : int
The deletion threshold for the chromosome axis
gene_cutoff : int
The deletion threshold for the gene axis
sample_cutoff : int
The deletion threshold for the sample axis
tol : float
The algorithm's tolerance
mask_mode : {'random', 'nan'}
The masking method for the clustered values. If 'random', the values
are replaced by random floats. If 'nan', they are replaced by nan
values.
n_chroms : int
The number of chromosome pairs
n_genes : int
The number of genes
n_samples : int
The number of samples
result_chroms : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #chromosomes and contains
True if the respective chromosome is contained in the tricluster,
False otherwise.
result_genes : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #genes and contains
True if the respective gene is contained in the tricluster,
False otherwise.
result_samples : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #samples and contains
True if the respective sample is contained in the tricluster,
False otherwise.
MSR : float
The Mean Squared Residue of each cell.
MSR_chrom : float
The Mean Squared Residue of each chromosome.
MSR_gene : float
The Mean Squared Residue of each gene.
MSR_sample : float
The Mean Squared Residue of each sample.
Methods
-------
fit(self, delta=2.5, l=1.005, chrom_cutoff=50, gene_cutoff=50,
sample_cutoff=50, tol=1e-5, mask_mode='nan', verbose=False)
Run the delta-TRIMAX algorithm for the given parameters.
get_triclusters()
Return the triclusters found by the algorithm.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, ‘Coexpression and coregulation analysis of
time-series gene expression data in estrogen-induced breast cancer
cell’, Algorithms Mol. Biol., τ. 8, τχ. 1, σ 9, 2013.
"""
def __init__(self, D):
"""
Parameters
----------
D : ndarray
The data to be clustered
"""
self.D = D.copy()
def _check_parameters(self):
"""
Checks the parameters given by the user. If the values are not valid,
a ValueError is raised.
"""
if (self.delta < 0):
raise ValueError("'delta' must be > 0.0, but its value"
" is {}".format(self.delta))
if (self.l < 1):
raise ValueError("'lambda' must be >= 1.0, but its"
" value is {}".format(self.l))
if (self.gene_cutoff < 1):
raise ValueError("'gene deletion cutoff' must be > 1.0, but its"
" value is {}".format(self.gene_cutoff))
if (self.sample_cutoff < 1):
raise ValueError("'sample deletion cutoff' must be > 1.0, but its"
" value is {}".format(self.sample_cutoff))
if (self.chrom_cutoff < 1):
raise ValueError("'chromosomes deletion cutoff' must be > 1.0, but"
" its value is {}".format(self.chrom_cutoff))
if (self.mask_mode not in ['nan', 'random']):
raise ValueError("'mask mode' must be either 'nan' or 'random',"
" but its value is {}".format(self.mask_mode))
def _compute_MSR(self, chroms, genes, samples):
"""
Computes the Mean Squared Residue (MSR) for the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Note
----
Updates the n_chorms, n_genes, n_samples, MSR, MSR_chrom, MSR_gene and
MSR_sample attributes.
"""
chrom_idx = np.expand_dims(np.expand_dims(np.nonzero(chroms)[0], axis=1), axis=1)
gene_idx = np.expand_dims(np.expand_dims(np.nonzero(genes)[0], axis=0), axis=2)
sample_idx = np.expand_dims(np.expand_dims(np.nonzero(samples)[0], axis=0), axis=0)
if (not chrom_idx.size) or (not gene_idx.size) or (not sample_idx.size):
raise EmptyTriclusterException()
subarr = self.D[chrom_idx, gene_idx, sample_idx]
self.n_chroms = subarr.shape[0]
self.n_genes = subarr.shape[1]
self.n_samples = subarr.shape[2]
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
# Computation of m_iJK
m_iJK = np.nanmean(np.nanmean(subarr, axis=2), axis=1)
m_iJK = np.expand_dims(np.expand_dims(m_iJK, axis=1), axis=1)
# Computation of m_IjK
m_IjK = np.nanmean(np.nanmean(subarr, axis=2), axis=0)
m_IjK = np.expand_dims(np.expand_dims(m_IjK, axis=0), axis=2)
# Computation of m_IJk
m_IJk = np.nansum(np.nansum(subarr, axis=0, keepdims=1), axis=1, keepdims=1)
m_IJk = m_IJk / ((subarr.shape[0] * subarr.shape[1]) - np.count_nonzero(np.isnan(subarr[:,:,0])))
# Computation of m_IJK
m_IJK = np.nanmean(subarr)
# Computation of MSR
residue = subarr - m_iJK - m_IjK - m_IJk + (2*m_IJK)
SR = np.square(residue)
self.MSR = np.nanmean(SR)
self.MSR_chrom = np.nanmean(np.nanmean(SR, axis=2), axis=1)
self.MSR_gene = np.nanmean(np.nanmean(SR, axis=2), axis=0)
self.MSR_sample = np.nanmean(np.nanmean(SR, axis=0), axis=0)
# Check tolerance
self.MSR_chrom[self.MSR_chrom < self.tol] = 0
self.MSR_gene[self.MSR_gene < self.tol] = 0
self.MSR_sample[self.MSR_sample < self.tol] = 0
self.MSR = 0 if (self.MSR < self.tol or np.isnan(self.MSR)) else self.MSR
def _single_node_deletion(self, chroms, genes, samples):
"""
The single node deletion routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
self._compute_MSR(chroms, genes, samples)
while (self.MSR > self.delta):
chrom_idx = np.nanargmax(self.MSR_chrom)
gene_idx = np.nanargmax(self.MSR_gene)
sample_idx = np.nanargmax(self.MSR_sample)
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
if (self.MSR_chrom[chrom_idx] > self.MSR_gene[gene_idx]):
if (self.MSR_chrom[chrom_idx] > self.MSR_sample[sample_idx]):
# Delete chrom
nonz_idx = chroms.nonzero()[0]
chroms.put(nonz_idx[chrom_idx], 0)
else:
# Delete sample
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[sample_idx], 0)
else:
if (self.MSR_gene[gene_idx] > self.MSR_sample[sample_idx]):
# Delete gene
nonz_idx = genes.nonzero()[0]
genes.put(nonz_idx[gene_idx], 0)
else:
# Delete sample
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[sample_idx], 0)
self._compute_MSR(chroms, genes, samples)
return chroms, genes, samples
def _multiple_node_deletion(self, chroms, genes, samples):
"""
The multiple node deletion routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
self._compute_MSR(chroms, genes, samples)
while (self.MSR > self.delta):
deleted = 0
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
if (self.n_chroms > self.chrom_cutoff):
chroms_to_del = self.MSR_chrom > (self.l * self.MSR)
nonz_idx = chroms.nonzero()[0]
if (chroms_to_del.any()):
deleted = 1
chroms.put(nonz_idx[chroms_to_del], 0)
if (self.n_genes > self.gene_cutoff):
genes_to_del = self.MSR_gene > (self.l * self.MSR)
nonz_idx = genes.nonzero()[0]
if (genes_to_del.any()):
deleted = 1
genes.put(nonz_idx[genes_to_del], 0)
if (self.n_samples > self.sample_cutoff):
samples_to_del = self.MSR_sample > (self.l * self.MSR)
nonz_idx = samples.nonzero()[0]
if (samples_to_del.any()):
deleted = 1
samples.put(nonz_idx[samples_to_del], 0)
if (not deleted):
break
self._compute_MSR(chroms, genes, samples)
return chroms, genes, samples
def _node_addition(self, chroms, genes, samples):
"""
The single node addition routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
while True:
self._compute_MSR(chroms, genes, samples)
n_chroms = np.count_nonzero(chroms)
n_genes = np.count_nonzero(genes)
n_samples = np.count_nonzero(samples)
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
elems_to_add = self.MSR_chrom <= self.MSR
nonz_idx = chroms.nonzero()[0]
chroms.put(nonz_idx[elems_to_add], 1)
elems_to_add = self.MSR_gene <= self.MSR
nonz_idx = genes.nonzero()[0]
genes.put(nonz_idx[elems_to_add], 1)
elems_to_add = self.MSR_sample <= self.MSR
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[elems_to_add], 1)
if (n_chroms == np.count_nonzero(chroms)) and \
(n_genes == np.count_nonzero(genes)) and \
(n_samples == np.count_nonzero(samples)):
break
return chroms, genes, samples
def _mask(self, chroms, genes, samples, minval, maxval):
"""
Masks the values of the array that have been used in triclusters
with either random float numbers, or nan.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
minval : float
Lower boundary of the output interval for the random generator.
maxval : float
Upper boundary of the output interval for the random generator.
"""
c = np.expand_dims(np.expand_dims(chroms.nonzero()[0], axis=1), axis=1)
g = np.expand_dims(np.expand_dims(genes.nonzero()[0], axis=0), axis=2)
s = np.expand_dims(np.expand_dims(samples.nonzero()[0], axis=0), axis=0)
if (self.mask_mode == 'random'):
shape = np.count_nonzero(chroms), np.count_nonzero(genes), np.count_nonzero(samples)
mask_vals = np.random.uniform(minval, maxval, shape)
self.D[c, g, s] = mask_vals
else:
self.D[c, g, s] = np.nan
def fit(self, delta=2.5, l=1.005, chrom_cutoff=50, gene_cutoff=50,
sample_cutoff=50, tol=1e-5, mask_mode='nan', verbose=False):
"""
Runs the delta-TRIMAX algorithm with the given parameters.
Parameters
----------
delta : float, default 2.5
The delta parameter of the algorithm. Must be > 0.0
l : float, default 1.005
The lambda parameter of the algorithm. Must be >= 1.0
chrom_cutoff : int, default 50
The deletion threshold for the chromosome axis
gene_cutoff : int, default 50
The deletion threshold for the gene axis
sample_cutoff : int, default 50
The deletion threshold for the sample axis
tol : float, default 1e-5
The algorithm's tolerance
mask_mode : {'random', 'nan'}, default 'nan'
The masking method for the clustered values. If 'random', the values
are replaced by random floats. If 'nan', they are replaced by nan
values.
verbose : bool, default False
Verbose mode for debugging.
"""
self.delta = delta
self.l = l
self.chrom_cutoff = chrom_cutoff
self.gene_cutoff = gene_cutoff
self.sample_cutoff = sample_cutoff
self.tol = tol
self.mask_mode = mask_mode
self._check_parameters()
n_chroms, n_genes, n_samples = self.D.shape
minval, maxval = np.nanmin(self.D), np.nanmax(self.D)
result_chroms = []
result_genes = []
result_samples = []
i = 1
while True:
if (verbose):
print(i)
chroms = np.ones(n_chroms, dtype=np.bool)
genes = np.ones(n_genes, dtype=np.bool)
samples = np.ones(n_samples, dtype=np.bool)
# Multiple node deletion
chroms, genes, samples = self._multiple_node_deletion(chroms,
genes,
samples)
# Single node deletion
chroms, genes, samples = self._single_node_deletion(chroms,
genes,
samples)
# Node addition
chroms, genes, samples = self._node_addition(chroms,
genes,
samples)
# Check for trivial tricluster
if (chroms.sum() == 1) or (genes.sum() == 1) or (samples.sum() == 1):
break # trivial bicluster
# Check if the aren't any unused values in D
if ((mask_mode == 'nan') and (np.isnan(self.D).all())):
break
# Mask values
self._mask(chroms, genes, samples, minval, maxval)
result_chroms.append(chroms)
result_genes.append(genes)
result_samples.append(samples)
if (verbose):
print("--- MSR = " + str(self.MSR))
i += 1
self.result_chroms = result_chroms
self.result_genes = result_genes
self.result_samples = result_samples
def get_triclusters(self):
"""
Returns the triclusters found by the algorithm.
"""
return self.result_chroms, self.result_genes, self.result_samples | 0.880181 | 0.739352 |
# Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from ocean_keeper.account import Account
from ocean_keeper.utils import get_account
Balance = namedtuple('Balance', ('eth', 'ocn'))
class OceanAccounts:
"""Ocean accounts class."""
def __init__(self, keeper, config, ocean_tokens):
self._keeper = keeper
self._config = config
self._ocean_tokens = ocean_tokens
self._accounts = []
addresses = [account_address for account_address in self._keeper.accounts]
for address in addresses:
for account in [get_account(0), get_account(1)]:
if account and account.address.lower() == address.lower():
self._accounts.append(account)
break
@property
def accounts_addresses(self):
"""
Return a list with the account addresses.
:return: list
"""
return [a.address for a in self._accounts]
def list(self):
"""
Return list of Account instances available in the current ethereum node
:return: list of Account instances
"""
return self._accounts[:]
def balance(self, account):
"""
Return the balance, a tuple with the eth and ocn balance.
:param account: Account instance to return the balance of
:return: Balance tuple of (eth, ocn)
"""
return Balance(self._keeper.get_ether_balance(account.address),
self._keeper.token.get_token_balance(account.address))
def request_tokens(self, account, amount):
"""
Request an amount of ocean tokens for an account.
:param account: Account instance making the tokens request
:param amount: int amount of tokens requested
:raises OceanInvalidTransaction: if transaction fails
:return: bool
"""
return self._ocean_tokens.request(account, amount) | squid_py/ocean/ocean_accounts.py | # Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from ocean_keeper.account import Account
from ocean_keeper.utils import get_account
Balance = namedtuple('Balance', ('eth', 'ocn'))
class OceanAccounts:
"""Ocean accounts class."""
def __init__(self, keeper, config, ocean_tokens):
self._keeper = keeper
self._config = config
self._ocean_tokens = ocean_tokens
self._accounts = []
addresses = [account_address for account_address in self._keeper.accounts]
for address in addresses:
for account in [get_account(0), get_account(1)]:
if account and account.address.lower() == address.lower():
self._accounts.append(account)
break
@property
def accounts_addresses(self):
"""
Return a list with the account addresses.
:return: list
"""
return [a.address for a in self._accounts]
def list(self):
"""
Return list of Account instances available in the current ethereum node
:return: list of Account instances
"""
return self._accounts[:]
def balance(self, account):
"""
Return the balance, a tuple with the eth and ocn balance.
:param account: Account instance to return the balance of
:return: Balance tuple of (eth, ocn)
"""
return Balance(self._keeper.get_ether_balance(account.address),
self._keeper.token.get_token_balance(account.address))
def request_tokens(self, account, amount):
"""
Request an amount of ocean tokens for an account.
:param account: Account instance making the tokens request
:param amount: int amount of tokens requested
:raises OceanInvalidTransaction: if transaction fails
:return: bool
"""
return self._ocean_tokens.request(account, amount) | 0.910137 | 0.232691 |
from PyQt5.QtWidgets import QWidget, QGridLayout, QListView, QPushButton, \
QDialog
from PyQt5.QtCore import QSize, Qt
from app.resources.resources import RESOURCES
from app.editor.data_editor import SingleResourceEditor, MultiResourceEditor
from app.editor.icon_editor import icon_model
class IconTab(QWidget):
def __init__(self, data, title, model, parent=None):
super().__init__(parent)
self.window = parent
self._data = data
self.title = title
self.setWindowTitle(self.title)
self.setStyleSheet("font: 10pt;")
self.layout = QGridLayout(self)
self.setLayout(self.layout)
self.view = IconListView()
self.view.setMinimumSize(360, 360)
self.view.setUniformItemSizes(True)
self.view.setIconSize(QSize(64, 64))
self.model = model(self._data, self)
self.view.setModel(self.model)
self.view.setViewMode(QListView.IconMode)
self.view.setResizeMode(QListView.Adjust)
self.view.setMovement(QListView.Static)
self.view.setGridSize(QSize(80, 80))
self.layout.addWidget(self.view, 0, 0, 1, 2)
self.button = QPushButton("Add New Icon Sheet...")
self.button.clicked.connect(self.model.append)
self.layout.addWidget(self.button, 1, 0, 1, 1)
self.display = None
def update_list(self):
# self.model.dataChanged.emit(self.model.index(0), self.model.index(self.model.rowCount()))
self.model.layoutChanged.emit()
def reset(self):
pass
@property
def current(self):
indices = self.view.selectionModel().selectedIndexes()
if indices:
index = indices[0]
icon = self.model.sub_data[index.row()]
if icon.parent_nid:
icon.nid = icon.parent_nid
return icon
return None
class IconListView(QListView):
def delete(self, index):
self.model().delete(index.row())
def keyPressEvent(self, event):
super().keyPressEvent(event)
if event.key() == Qt.Key_Delete:
indices = self.selectionModel().selectedIndexes()
for index in indices:
self.delete(index)
class Icon16Database(IconTab):
@classmethod
def create(cls, parent=None):
data = RESOURCES.icons16
title = "16x16 Icon"
collection_model = icon_model.Icon16Model
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
class Icon32Database(Icon16Database):
@classmethod
def create(cls, parent=None):
data = RESOURCES.icons32
title = "32x32 Icon"
collection_model = icon_model.Icon32Model
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
class Icon80Database(Icon16Database):
@classmethod
def create(cls, parent=None):
data = RESOURCES.icons80
title = "80x72 Icon"
collection_model = icon_model.Icon80Model
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
class MapIconDatabase(IconTab):
@classmethod
def create(cls, parent=None):
data = RESOURCES.map_icons
title = 'Map Icons'
collection_model = icon_model.MapIconModel
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
@property
def current(self):
indices = self.view.selectionModel().selectedIndexes()
if indices:
index = indices[0]
icon = self.model.sub_data[index.row()]
return icon
return None
def get_map_icon_editor():
database = MapIconDatabase
window = SingleResourceEditor(database, ['map_icons'])
result = window.exec_()
if result == QDialog.Accepted:
selected_icon = window.tab.current
return selected_icon, True
else:
return None, False
def get(width):
if width == 16:
resource_type = 'icons16'
database = Icon16Database
elif width == 32:
resource_type = 'icons32'
database = Icon32Database
elif width == 80:
resource_type = 'icons80'
database = Icon80Database
else:
return None, False
window = SingleResourceEditor(database, [resource_type])
result = window.exec_()
if result == QDialog.Accepted:
selected_icon = window.tab.current
return selected_icon, True
else:
return None, False
def get_full_editor():
return MultiResourceEditor((Icon16Database, Icon32Database, Icon80Database, MapIconDatabase),
('icons16', 'icons32', 'icons80', 'map_icons'))
# Testing
# Run "python -m app.editor.icon_editor.icon_tab" from main directory
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
RESOURCES.load('default.ltproj')
# DB.load('default.ltproj')
window = MultiResourceEditor((Icon16Database, Icon32Database, Icon80Database),
('icons16', 'icons32', 'icons80'))
window.show()
app.exec_() | app/editor/icon_editor/icon_tab.py | from PyQt5.QtWidgets import QWidget, QGridLayout, QListView, QPushButton, \
QDialog
from PyQt5.QtCore import QSize, Qt
from app.resources.resources import RESOURCES
from app.editor.data_editor import SingleResourceEditor, MultiResourceEditor
from app.editor.icon_editor import icon_model
class IconTab(QWidget):
def __init__(self, data, title, model, parent=None):
super().__init__(parent)
self.window = parent
self._data = data
self.title = title
self.setWindowTitle(self.title)
self.setStyleSheet("font: 10pt;")
self.layout = QGridLayout(self)
self.setLayout(self.layout)
self.view = IconListView()
self.view.setMinimumSize(360, 360)
self.view.setUniformItemSizes(True)
self.view.setIconSize(QSize(64, 64))
self.model = model(self._data, self)
self.view.setModel(self.model)
self.view.setViewMode(QListView.IconMode)
self.view.setResizeMode(QListView.Adjust)
self.view.setMovement(QListView.Static)
self.view.setGridSize(QSize(80, 80))
self.layout.addWidget(self.view, 0, 0, 1, 2)
self.button = QPushButton("Add New Icon Sheet...")
self.button.clicked.connect(self.model.append)
self.layout.addWidget(self.button, 1, 0, 1, 1)
self.display = None
def update_list(self):
# self.model.dataChanged.emit(self.model.index(0), self.model.index(self.model.rowCount()))
self.model.layoutChanged.emit()
def reset(self):
pass
@property
def current(self):
indices = self.view.selectionModel().selectedIndexes()
if indices:
index = indices[0]
icon = self.model.sub_data[index.row()]
if icon.parent_nid:
icon.nid = icon.parent_nid
return icon
return None
class IconListView(QListView):
def delete(self, index):
self.model().delete(index.row())
def keyPressEvent(self, event):
super().keyPressEvent(event)
if event.key() == Qt.Key_Delete:
indices = self.selectionModel().selectedIndexes()
for index in indices:
self.delete(index)
class Icon16Database(IconTab):
@classmethod
def create(cls, parent=None):
data = RESOURCES.icons16
title = "16x16 Icon"
collection_model = icon_model.Icon16Model
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
class Icon32Database(Icon16Database):
@classmethod
def create(cls, parent=None):
data = RESOURCES.icons32
title = "32x32 Icon"
collection_model = icon_model.Icon32Model
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
class Icon80Database(Icon16Database):
@classmethod
def create(cls, parent=None):
data = RESOURCES.icons80
title = "80x72 Icon"
collection_model = icon_model.Icon80Model
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
class MapIconDatabase(IconTab):
@classmethod
def create(cls, parent=None):
data = RESOURCES.map_icons
title = 'Map Icons'
collection_model = icon_model.MapIconModel
deletion_criteria = None
dialog = cls(data, title, collection_model, parent)
return dialog
@property
def current(self):
indices = self.view.selectionModel().selectedIndexes()
if indices:
index = indices[0]
icon = self.model.sub_data[index.row()]
return icon
return None
def get_map_icon_editor():
database = MapIconDatabase
window = SingleResourceEditor(database, ['map_icons'])
result = window.exec_()
if result == QDialog.Accepted:
selected_icon = window.tab.current
return selected_icon, True
else:
return None, False
def get(width):
if width == 16:
resource_type = 'icons16'
database = Icon16Database
elif width == 32:
resource_type = 'icons32'
database = Icon32Database
elif width == 80:
resource_type = 'icons80'
database = Icon80Database
else:
return None, False
window = SingleResourceEditor(database, [resource_type])
result = window.exec_()
if result == QDialog.Accepted:
selected_icon = window.tab.current
return selected_icon, True
else:
return None, False
def get_full_editor():
return MultiResourceEditor((Icon16Database, Icon32Database, Icon80Database, MapIconDatabase),
('icons16', 'icons32', 'icons80', 'map_icons'))
# Testing
# Run "python -m app.editor.icon_editor.icon_tab" from main directory
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
RESOURCES.load('default.ltproj')
# DB.load('default.ltproj')
window = MultiResourceEditor((Icon16Database, Icon32Database, Icon80Database),
('icons16', 'icons32', 'icons80'))
window.show()
app.exec_() | 0.529507 | 0.088151 |
import sys
import os
sys.path.append(os.getcwd())
import json
import time
from threading import Thread, Event
from scripts.prettyCode.prettyPrint import PrettyPrint
from scripts.windows.windows import BaseWindowsControl, FindTheFile
from scripts.windows.journalist import BasicLogs
PRETTYPRINT = PrettyPrint()
class GameControl():
def __init__(self, queue, *args, **kwargs) -> None:
logName = kwargs.get('logName', None)
assert logName, 'Can not find logname.'
self.logObj = BasicLogs.handler(logName=logName, mark='dispatch')
self.logObj.logHandler().info('Initialize GameControl(gameControl) class instance.')
with open(r'..\config\case.json', 'r', encoding='utf-8') as f:
self.controlConfig = json.load(f)
self.sumiAutoCaseTime = self.controlConfig.get('Debug').get('ClientSurvivalTime')
self.processName = 'JX3ClientX64.exe'
self.autoMonitorControlFlag = Event()
self.statusDict = {
'start.done': 'start',
'completed.done': BaseWindowsControl.killProcess,
}
self.queue = queue
self.exit = False
def autoMonitorControl(self, path):
startResultExists = False
completedResultExists = False
while 1:
self.autoMonitorControlFlag.wait()
if self.exit:
self.exit = False
break
if not startResultExists:
PRETTYPRINT.pPrint('Auto Monitor Control 等待 start result 文件中')
self.logObj.logHandler().info('Auto Monitor Control waits in the start result file.')
elif not completedResultExists:
PRETTYPRINT.pPrint('Auto Monitor Control 等待 completed result 文件中')
self.logObj.logHandler().info('Auto Monitor Control waits in the completed result file.')
for file in os.listdir(path):
if file.endswith('.done'):
result = self.statusDict.get(file, None)
PRETTYPRINT.pPrint('获取到 result 文件,状态更新')
self.logObj.logHandler('Get the result file, status update.')
if callable(result):
result(self.processName)
self.queue.put('completed')
PRETTYPRINT.pPrint('识别到 lua case 已经执行完成,游戏退出,标识符已推入线程队列(D-G-P)')
self.logObj.logHandler().info('It is recognized that the lua case has been executed, the game exits, and the identifier has been pushed into the thread queue (D-G-P).')
completedResultExists = True
else:
self.queue.put(result)
PRETTYPRINT.pPrint('识别到 result 文件,result 值为: {},已推入线程队列 (D-G-P)'.format(result))
self.logObj.logHandler().info('The result file is recognized, the result value is: {}, which has been pushed into the thread queue (D-G-P).'.format(result))
startResultExists = True
newFile = '{}.scanned'.format(file)
os.rename(
os.path.join(path, file),
os.path.join(path, newFile)
)
PRETTYPRINT.pPrint('结果文件名更换: {} -> {}'.format(file, newFile))
self.logObj.logHandler().info('Result file name replacement: {} -> {}'.format(file, newFile))
time.sleep(2)
def semiAutoMaticDebugControl(self):
i = 0
while 1:
self.logObj.logHandler().info('SEMI-AUTOMATIC DEBUG - Game Control.')
PRETTYPRINT.pPrint('=========================SEMI-AUTOMATIC DEBUG - 游戏内操作=========================')
PRETTYPRINT.pPrint('客户端已存活时间(秒): {},案例时间: {}'.format(i, self.sumiAutoCaseTime))
self.logObj.logHandler().info('Client alive time (seconds): {}, case time: {}'.format(i, self.sumiAutoCaseTime))
if i >= self.sumiAutoCaseTime:
break
i += 1
time.sleep(1)
PRETTYPRINT.pPrint('客户端存活时间结束,尝试结束游戏')
self.logObj.logHandler('Client survival time is over, try to end the game.')
BaseWindowsControl.killProcess(self.processName)
def _createNewThread(self, func, name, path, *args, **kwargs) -> Thread:
print(*args)
t = Thread(target=func, name=name, args=(path, ))
self.logObj.logHandler().info('gameControl.py - Child thread object has been generated: {}, child process name: {}'.format(t, name))
return t
def _startAutoMonitorControlFlag(self):
self.autoMonitorControlFlag.set()
def _pauseAutoMonitorControlFlag(self):
self.autoMonitorControlFlag.clear()
def _stopAutoMonitorControlFlag(self):
self.exit = True
def dispatch(self, path):
monitorThread = self._createNewThread(self.autoMonitorControl, name='IDFileMonitoringThread', path=path)
monitorThread.start()
class DEBUGGAMECONTROL():
def debugGameControl(self, ):
i = 0
while 1:
PRETTYPRINT.pPrint('=========================DEBUG - 游戏内操作 -> {}========================='.format(i))
if i == 5:
with open(r'..\caches\GameStatus.json', 'w', encoding='utf-8') as f:
data = {'orReady': 1}
json.dump(data, f, indent=4)
if i == 10:
# 关闭游戏
processName = 'JX3ClientX64.exe'
PRETTYPRINT.pPrint('尝试结束游戏')
BaseWindowsControl.killProcess(processName)
break
i += 1
time.sleep(1)
@staticmethod
def debugCreateEndFile(path):
endFile = os.path.join(path, 'completed.done')
with open(endFile, 'w', encoding='utf-8') as f:
f.writable('sb')
@staticmethod
def debugCreateStartFile(path):
startFile = os.path.join(path, 'start.done')
with open(startFile, 'w', encoding='utf-8') as f:
f.writable('sb')
time.sleep(60)
if __name__ == '__main__':
BaseWindowsControl.killProcess('JX3ClientX64.exe') | scripts/game/gameControl.py | import sys
import os
sys.path.append(os.getcwd())
import json
import time
from threading import Thread, Event
from scripts.prettyCode.prettyPrint import PrettyPrint
from scripts.windows.windows import BaseWindowsControl, FindTheFile
from scripts.windows.journalist import BasicLogs
PRETTYPRINT = PrettyPrint()
class GameControl():
def __init__(self, queue, *args, **kwargs) -> None:
logName = kwargs.get('logName', None)
assert logName, 'Can not find logname.'
self.logObj = BasicLogs.handler(logName=logName, mark='dispatch')
self.logObj.logHandler().info('Initialize GameControl(gameControl) class instance.')
with open(r'..\config\case.json', 'r', encoding='utf-8') as f:
self.controlConfig = json.load(f)
self.sumiAutoCaseTime = self.controlConfig.get('Debug').get('ClientSurvivalTime')
self.processName = 'JX3ClientX64.exe'
self.autoMonitorControlFlag = Event()
self.statusDict = {
'start.done': 'start',
'completed.done': BaseWindowsControl.killProcess,
}
self.queue = queue
self.exit = False
def autoMonitorControl(self, path):
startResultExists = False
completedResultExists = False
while 1:
self.autoMonitorControlFlag.wait()
if self.exit:
self.exit = False
break
if not startResultExists:
PRETTYPRINT.pPrint('Auto Monitor Control 等待 start result 文件中')
self.logObj.logHandler().info('Auto Monitor Control waits in the start result file.')
elif not completedResultExists:
PRETTYPRINT.pPrint('Auto Monitor Control 等待 completed result 文件中')
self.logObj.logHandler().info('Auto Monitor Control waits in the completed result file.')
for file in os.listdir(path):
if file.endswith('.done'):
result = self.statusDict.get(file, None)
PRETTYPRINT.pPrint('获取到 result 文件,状态更新')
self.logObj.logHandler('Get the result file, status update.')
if callable(result):
result(self.processName)
self.queue.put('completed')
PRETTYPRINT.pPrint('识别到 lua case 已经执行完成,游戏退出,标识符已推入线程队列(D-G-P)')
self.logObj.logHandler().info('It is recognized that the lua case has been executed, the game exits, and the identifier has been pushed into the thread queue (D-G-P).')
completedResultExists = True
else:
self.queue.put(result)
PRETTYPRINT.pPrint('识别到 result 文件,result 值为: {},已推入线程队列 (D-G-P)'.format(result))
self.logObj.logHandler().info('The result file is recognized, the result value is: {}, which has been pushed into the thread queue (D-G-P).'.format(result))
startResultExists = True
newFile = '{}.scanned'.format(file)
os.rename(
os.path.join(path, file),
os.path.join(path, newFile)
)
PRETTYPRINT.pPrint('结果文件名更换: {} -> {}'.format(file, newFile))
self.logObj.logHandler().info('Result file name replacement: {} -> {}'.format(file, newFile))
time.sleep(2)
def semiAutoMaticDebugControl(self):
i = 0
while 1:
self.logObj.logHandler().info('SEMI-AUTOMATIC DEBUG - Game Control.')
PRETTYPRINT.pPrint('=========================SEMI-AUTOMATIC DEBUG - 游戏内操作=========================')
PRETTYPRINT.pPrint('客户端已存活时间(秒): {},案例时间: {}'.format(i, self.sumiAutoCaseTime))
self.logObj.logHandler().info('Client alive time (seconds): {}, case time: {}'.format(i, self.sumiAutoCaseTime))
if i >= self.sumiAutoCaseTime:
break
i += 1
time.sleep(1)
PRETTYPRINT.pPrint('客户端存活时间结束,尝试结束游戏')
self.logObj.logHandler('Client survival time is over, try to end the game.')
BaseWindowsControl.killProcess(self.processName)
def _createNewThread(self, func, name, path, *args, **kwargs) -> Thread:
print(*args)
t = Thread(target=func, name=name, args=(path, ))
self.logObj.logHandler().info('gameControl.py - Child thread object has been generated: {}, child process name: {}'.format(t, name))
return t
def _startAutoMonitorControlFlag(self):
self.autoMonitorControlFlag.set()
def _pauseAutoMonitorControlFlag(self):
self.autoMonitorControlFlag.clear()
def _stopAutoMonitorControlFlag(self):
self.exit = True
def dispatch(self, path):
monitorThread = self._createNewThread(self.autoMonitorControl, name='IDFileMonitoringThread', path=path)
monitorThread.start()
class DEBUGGAMECONTROL():
def debugGameControl(self, ):
i = 0
while 1:
PRETTYPRINT.pPrint('=========================DEBUG - 游戏内操作 -> {}========================='.format(i))
if i == 5:
with open(r'..\caches\GameStatus.json', 'w', encoding='utf-8') as f:
data = {'orReady': 1}
json.dump(data, f, indent=4)
if i == 10:
# 关闭游戏
processName = 'JX3ClientX64.exe'
PRETTYPRINT.pPrint('尝试结束游戏')
BaseWindowsControl.killProcess(processName)
break
i += 1
time.sleep(1)
@staticmethod
def debugCreateEndFile(path):
endFile = os.path.join(path, 'completed.done')
with open(endFile, 'w', encoding='utf-8') as f:
f.writable('sb')
@staticmethod
def debugCreateStartFile(path):
startFile = os.path.join(path, 'start.done')
with open(startFile, 'w', encoding='utf-8') as f:
f.writable('sb')
time.sleep(60)
if __name__ == '__main__':
BaseWindowsControl.killProcess('JX3ClientX64.exe') | 0.196788 | 0.066873 |
import numpy as np
from cascade.core.form import (
Form,
BoolField,
IntField,
FloatField,
StrField,
StringListField,
ListField,
OptionField,
FormList,
Dummy,
)
from cascade.model import priors
from cascade.core.log import getLoggers
CODELOG, MATHLOG = getLoggers(__name__)
class SmoothingPrior(Form):
"""Priors for smoothing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prior_object = None
prior_type = OptionField(["dage", "dtime", "value"])
age_lower = FloatField(nullable=True, display="Age lower")
age_upper = FloatField(nullable=True, display="Age upper")
time_lower = FloatField(nullable=True, display="Time lower")
time_upper = FloatField(nullable=True, display="Time upper")
born_lower = FloatField(nullable=True, display="Born lower")
born_upper = FloatField(nullable=True, display="Born upper")
density = OptionField(
["uniform", "gaussian", "laplace", "students", "log_gaussian", "log_laplace", "log_students"], display="Density"
)
min = FloatField(nullable=True, default=float("-inf"), display="Min")
mean = FloatField(nullable=True, display="Mean")
max = FloatField(nullable=True, default=float("inf"), display="Max")
std = FloatField(nullable=True, display="Std")
nu = FloatField(nullable=True)
eta = FloatField(nullable=True)
def _full_form_validation(self, root): # noqa: C901 too complex
errors = []
if not self.is_field_unset("age_lower") and not self.is_field_unset("age_lower"):
if self.age_lower > self.age_upper:
errors.append("age_lower must be less than or equal to age_upper")
if not self.is_field_unset("time_lower") and not self.is_field_unset("time_lower"):
if self.time_lower > self.time_upper:
errors.append("time_lower must be less than or equal to time_upper")
try:
lower = self.min
upper = self.max
mean = self.mean
if mean is None and (np.isinf(lower) or np.isinf(upper)):
mean = max(lower, 0)
std = self.std
if self.nu is None:
if self.density == "students" and not root.is_field_unset("students_dof"):
nu = root.students_dof.priors
elif self.density == "log_students" and not root.is_field_unset("log_students_dof"):
nu = root.log_students_dof.priors
else:
nu = None
else:
nu = self.nu
if self.eta is None:
if not root.is_field_unset("eta"):
eta = root.eta.priors
else:
eta = None
else:
eta = self.eta
if self.density == "uniform":
self.prior_object = priors.Uniform(lower, upper, mean)
elif self.density == "gaussian":
self.prior_object = priors.Gaussian(mean, std, lower, upper)
elif self.density == "laplace":
self.prior_object = priors.Laplace(mean, std, lower, upper)
elif self.density == "students":
self.prior_object = priors.StudentsT(mean, std, nu, lower, upper)
elif self.density == "log_gaussian":
self.prior_object = priors.LogGaussian(mean, std, eta, lower, upper)
elif self.density == "log_laplace":
self.prior_object = priors.LogLaplace(mean, std, eta, lower, upper)
elif self.density == "log_students":
self.prior_object = priors.LogStudentsT(mean, std, nu, eta, lower, upper)
else:
errors.append(f"Unknown density '{self.density}'")
except priors.PriorError as e:
errors.append(f"Parameters incompatible with density '{self.density}': {str(e)}")
return errors
class SmoothingPriorGroup(Form):
dage = SmoothingPrior(name_field="prior_type", nullable=True, display="Age diff")
dtime = SmoothingPrior(name_field="prior_type", nullable=True, display="Time diff")
value = SmoothingPrior(name_field="prior_type", nullable=True, display="Values")
class Smoothing(Form):
rate = OptionField(["pini", "iota", "rho", "chi", "omega"], "Rate")
location = IntField(nullable=True)
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
age_time_specific = IntField(display="Age and Time specific", nullable=True)
custom_age_grid = Dummy()
custom_time_grid = Dummy()
def _full_form_validation(self, root):
errors = []
if self.rate == "pini":
if not self.is_field_unset("age_grid") and len(self.age_grid) != 1:
errors.append("Pini must have exactly one age point")
else:
age_grid = self.age_grid or root.model.default_age_grid
if len(age_grid) > 1 and self.default.is_field_unset("dage"):
errors.append("You must supply a default age diff prior if the smoothing has extent over age")
time_grid = self.time_grid or root.model.default_time_grid
if len(time_grid) > 1 and self.default.is_field_unset("dtime"):
errors.append("You must supply a default time diff prior if the smoothing has extent over time")
if self._container._name == "rate":
# This validation only makes sense for Fixed Effects not Random Effects
# TODO This repeats validation logic in cascade.model.rates but I don't see a good way to bring that in here
is_negative = True
is_positive = True
for prior in [self.default.value] + [p for p in self.detail or [] if p.prior_type == "value"]:
is_negative = is_negative and prior.min == 0 and prior.max == 0
is_positive = is_positive and prior.min > 0
if prior.min < 0:
errors.append("Rates must be constrained to be >= 0 at all points. Add or correct the lower bound")
break
if self.rate in ["iota", "rho"]:
if not (is_negative or is_positive):
errors.append(f"Rate {self.rate} must be either fully positive or constrained to zero")
return errors
class StudyCovariate(Form):
# Haven't seen if this is a string or an ID for the column in the bundle.
study_covariate_id = IntField(display="Covariate")
measure_id = IntField(display="Measure")
mulcov_type = OptionField(["rate_value", "meas_value", "meas_std"], display="Multiplier type")
transformation = IntField(display="Transformation")
age_time_specific = IntField(display="Age and Time specific")
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
custom_age_grid = Dummy()
custom_time_grid = Dummy()
class CountryCovariate(Form):
country_covariate_id = IntField(display="Covariate")
measure_id = IntField(display="Measure")
mulcov_type = OptionField(["rate_value", "meas_value", "meas_std"], display="Multiplier type")
transformation = IntField(display="Transformation")
age_time_specific = IntField(display="Age and Time specific")
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
custom_age_grid = Dummy()
custom_time_grid = Dummy()
class Model(Form):
modelable_entity_id = IntField()
model_version_id = IntField(nullable=True)
random_seed = IntField()
minimum_meas_cv = FloatField(nullable=True, display="Data CV floor")
add_csmr_cause = IntField(nullable=True, display="CSMR cause")
title = StrField(nullable=True, display="Title")
description = StrField(nullable=True, display="Description")
bundle_id = IntField(nullable=True, display="Data bundle")
drill = OptionField(["cascade", "drill"], display="Drill")
drill_location = IntField(display="Drill location", nullable=True)
drill_location_start = IntField(display="Drill location start", nullable=True)
drill_location_end = IntField(display="Drill location end", nullable=True)
drill_sex = OptionField([1, 2], constructor=int, nullable=True, display="Drill sex")
birth_prev = OptionField([0, 1], constructor=int, nullable=True, default=0, display="Prevalence at birth")
default_age_grid = StringListField(constructor=float, display="(Cascade) Age grid")
default_time_grid = StringListField(constructor=float, display="(Cascade) Time grid")
constrain_omega = OptionField([0, 1], constructor=int, nullable=False, display="Constrain other cause mortality")
exclude_data_for_param = ListField(constructor=int, nullable=True, display="Exclude data for parameter")
ode_step_size = FloatField(display="ODE step size")
additional_ode_steps = StringListField(constructor=float, nullable=True,
display="Advanced additional ODE steps")
split_sex = OptionField(["most_detailed", "1", "2", "3", "4", "5"], display="Split sex (Being used as Drill Start)")
quasi_fixed = OptionField([0, 1], default=0, constructor=int, nullable=True)
zero_sum_random = ListField(nullable=True, display="Zero-sum random effects")
bound_frac_fixed = FloatField(
default=1e-2, nullable=True,
display="allowed modification to point to move it within bounds"
)
bound_random = FloatField(
nullable=True,
display="allowed modification to point to move it within bounds"
)
rate_case = Dummy()
data_density = StrField(nullable=True, display="Data density")
def _full_form_validation(self, root):
errors = []
if self.drill == "drill":
if self.is_field_unset("drill_sex"):
errors.append("For a drill, please specify Drill sex.")
return errors
class Eta(Form):
priors = FloatField(nullable=True)
data = FloatField(nullable=True)
class DataEta(Form):
integrand_measure_id = IntField(nullable=True)
value = FloatField(nullable=True)
class DataDensity(Form):
value = StrField(nullable=True)
integrand_measure_id = IntField(nullable=True)
class StudentsDOF(Form):
priors = FloatField(nullable=True, default=5)
data = FloatField(nullable=True, default=5)
class DerivativeTest(Form):
fixed = OptionField(
["none", "first-order", "second-order", "only-second-order",
"adaptive", "trace-adaptive"],
default="none",
display="test for these derivatives",
nullable=True
)
random = OptionField(
["none", "first-order", "second-order", "only-second-order",
"adaptive", "trace-adaptive"],
default="none",
display="test for these derivatives",
nullable=True
)
class FixedRandomInt(Form):
fixed = IntField(nullable=True)
random = IntField(nullable=True)
class FixedRandomFloat(Form):
fixed = FloatField(nullable=True)
random = FloatField(nullable=True)
class RandomEffectBound(Form):
location = IntField(nullable=True)
value = FloatField(nullable=True)
class Policies(Form):
estimate_emr_from_prevalence = OptionField(
[0, 1], constructor=int, default=0, display="Estimate EMR from prevalance", nullable=True
)
use_weighted_age_group_midpoints = OptionField([1, 0], default=1, constructor=int, nullable=True)
number_of_fixed_effect_samples = IntField(default=30, nullable=True)
with_hiv = BoolField(default=True, nullable=True, display="Whether to get ASDR with HIV deaths.")
age_group_set_id = IntField(default=12, nullable=True, display="Age groups for analysis work.")
exclude_relative_risk = OptionField([1, 0], default=1, constructor=int, nullable=True)
meas_std_effect = OptionField(
["add_std_scale_all", "add_std_scale_log", "add_var_scale_all", "add_var_scale_log"],
default="add_var_scale_log",
display="Measurement standard deviation effect",
nullable=True
)
limited_memory_max_history_fixed = IntField(
default=30, nullable=True,
display="number of most recent iterations taken into account for quasi-Newton"
)
fit_strategy = OptionField(["fit", "fit_fixed_then_fit"], default="fit", constructor=int, nullable=True)
decomp_step = StrField(nullable=True, default="step1")
gbd_round_id = IntField(nullable=True, default=6)
class Configuration(Form):
""" The root Form of the whole configuration tree.
Example:
>>> input_data = json.loads(json_blob)
>>> form = Configuration(input_data)
>>> errors = form.validate_and_normalize()
>>> if errors:
print(errors)
raise Exception("Woops")
else:
print(f"Ready to configure a model for {form.model.modelable_entity_id}")
"""
model = Model(display="Model", validation_priority=5)
policies = Policies(display="Policies")
gbd_round_id = IntField(display="GBD Round ID")
random_effect = FormList(Smoothing, nullable=True, display="Random effects")
rate = FormList(Smoothing, display="Rates")
study_covariate = FormList(StudyCovariate, display="Study covariates")
country_covariate = FormList(CountryCovariate, display="Country covariates")
eta = Eta(validation_priority=5)
students_dof = StudentsDOF(validation_priority=5)
log_students_dof = StudentsDOF(validation_priority=5)
csmr_cod_output_version_id = IntField()
# Unclear how this differs from csmr_cod_output_version_id. Has same value.
csmr_mortality_output_version_id = Dummy()
location_set_version_id = IntField(default=429, nullable=True)
min_cv = FormList(Dummy)
min_cv_by_rate = FormList(Dummy)
re_bound_location = FormList(RandomEffectBound)
derivative_test = DerivativeTest(display="Derivative test")
max_num_iter = FixedRandomInt(display="Max ipopt iterations")
print_level = FixedRandomInt(display="Print level")
accept_after_max_steps = FixedRandomInt(display="Max backtracking")
tolerance = FixedRandomFloat(display="Desired relative convergence tolerance")
data_eta_by_integrand = FormList(DataEta)
data_density_by_integrand = FormList(DataDensity)
config_version = IntField(nullable=True, display="Settings version") | src/cascade/input_data/configuration/form.py | import numpy as np
from cascade.core.form import (
Form,
BoolField,
IntField,
FloatField,
StrField,
StringListField,
ListField,
OptionField,
FormList,
Dummy,
)
from cascade.model import priors
from cascade.core.log import getLoggers
CODELOG, MATHLOG = getLoggers(__name__)
class SmoothingPrior(Form):
"""Priors for smoothing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prior_object = None
prior_type = OptionField(["dage", "dtime", "value"])
age_lower = FloatField(nullable=True, display="Age lower")
age_upper = FloatField(nullable=True, display="Age upper")
time_lower = FloatField(nullable=True, display="Time lower")
time_upper = FloatField(nullable=True, display="Time upper")
born_lower = FloatField(nullable=True, display="Born lower")
born_upper = FloatField(nullable=True, display="Born upper")
density = OptionField(
["uniform", "gaussian", "laplace", "students", "log_gaussian", "log_laplace", "log_students"], display="Density"
)
min = FloatField(nullable=True, default=float("-inf"), display="Min")
mean = FloatField(nullable=True, display="Mean")
max = FloatField(nullable=True, default=float("inf"), display="Max")
std = FloatField(nullable=True, display="Std")
nu = FloatField(nullable=True)
eta = FloatField(nullable=True)
def _full_form_validation(self, root): # noqa: C901 too complex
errors = []
if not self.is_field_unset("age_lower") and not self.is_field_unset("age_lower"):
if self.age_lower > self.age_upper:
errors.append("age_lower must be less than or equal to age_upper")
if not self.is_field_unset("time_lower") and not self.is_field_unset("time_lower"):
if self.time_lower > self.time_upper:
errors.append("time_lower must be less than or equal to time_upper")
try:
lower = self.min
upper = self.max
mean = self.mean
if mean is None and (np.isinf(lower) or np.isinf(upper)):
mean = max(lower, 0)
std = self.std
if self.nu is None:
if self.density == "students" and not root.is_field_unset("students_dof"):
nu = root.students_dof.priors
elif self.density == "log_students" and not root.is_field_unset("log_students_dof"):
nu = root.log_students_dof.priors
else:
nu = None
else:
nu = self.nu
if self.eta is None:
if not root.is_field_unset("eta"):
eta = root.eta.priors
else:
eta = None
else:
eta = self.eta
if self.density == "uniform":
self.prior_object = priors.Uniform(lower, upper, mean)
elif self.density == "gaussian":
self.prior_object = priors.Gaussian(mean, std, lower, upper)
elif self.density == "laplace":
self.prior_object = priors.Laplace(mean, std, lower, upper)
elif self.density == "students":
self.prior_object = priors.StudentsT(mean, std, nu, lower, upper)
elif self.density == "log_gaussian":
self.prior_object = priors.LogGaussian(mean, std, eta, lower, upper)
elif self.density == "log_laplace":
self.prior_object = priors.LogLaplace(mean, std, eta, lower, upper)
elif self.density == "log_students":
self.prior_object = priors.LogStudentsT(mean, std, nu, eta, lower, upper)
else:
errors.append(f"Unknown density '{self.density}'")
except priors.PriorError as e:
errors.append(f"Parameters incompatible with density '{self.density}': {str(e)}")
return errors
class SmoothingPriorGroup(Form):
dage = SmoothingPrior(name_field="prior_type", nullable=True, display="Age diff")
dtime = SmoothingPrior(name_field="prior_type", nullable=True, display="Time diff")
value = SmoothingPrior(name_field="prior_type", nullable=True, display="Values")
class Smoothing(Form):
rate = OptionField(["pini", "iota", "rho", "chi", "omega"], "Rate")
location = IntField(nullable=True)
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
age_time_specific = IntField(display="Age and Time specific", nullable=True)
custom_age_grid = Dummy()
custom_time_grid = Dummy()
def _full_form_validation(self, root):
errors = []
if self.rate == "pini":
if not self.is_field_unset("age_grid") and len(self.age_grid) != 1:
errors.append("Pini must have exactly one age point")
else:
age_grid = self.age_grid or root.model.default_age_grid
if len(age_grid) > 1 and self.default.is_field_unset("dage"):
errors.append("You must supply a default age diff prior if the smoothing has extent over age")
time_grid = self.time_grid or root.model.default_time_grid
if len(time_grid) > 1 and self.default.is_field_unset("dtime"):
errors.append("You must supply a default time diff prior if the smoothing has extent over time")
if self._container._name == "rate":
# This validation only makes sense for Fixed Effects not Random Effects
# TODO This repeats validation logic in cascade.model.rates but I don't see a good way to bring that in here
is_negative = True
is_positive = True
for prior in [self.default.value] + [p for p in self.detail or [] if p.prior_type == "value"]:
is_negative = is_negative and prior.min == 0 and prior.max == 0
is_positive = is_positive and prior.min > 0
if prior.min < 0:
errors.append("Rates must be constrained to be >= 0 at all points. Add or correct the lower bound")
break
if self.rate in ["iota", "rho"]:
if not (is_negative or is_positive):
errors.append(f"Rate {self.rate} must be either fully positive or constrained to zero")
return errors
class StudyCovariate(Form):
# Haven't seen if this is a string or an ID for the column in the bundle.
study_covariate_id = IntField(display="Covariate")
measure_id = IntField(display="Measure")
mulcov_type = OptionField(["rate_value", "meas_value", "meas_std"], display="Multiplier type")
transformation = IntField(display="Transformation")
age_time_specific = IntField(display="Age and Time specific")
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
custom_age_grid = Dummy()
custom_time_grid = Dummy()
class CountryCovariate(Form):
country_covariate_id = IntField(display="Covariate")
measure_id = IntField(display="Measure")
mulcov_type = OptionField(["rate_value", "meas_value", "meas_std"], display="Multiplier type")
transformation = IntField(display="Transformation")
age_time_specific = IntField(display="Age and Time specific")
age_grid = StringListField(constructor=float, nullable=True, display="Age grid")
time_grid = StringListField(constructor=float, nullable=True, display="Time grid")
default = SmoothingPriorGroup(display="Defaults")
mulstd = SmoothingPriorGroup(nullable=True, display="MulStd")
detail = FormList(SmoothingPrior, nullable=True, display="Detail")
custom_age_grid = Dummy()
custom_time_grid = Dummy()
class Model(Form):
modelable_entity_id = IntField()
model_version_id = IntField(nullable=True)
random_seed = IntField()
minimum_meas_cv = FloatField(nullable=True, display="Data CV floor")
add_csmr_cause = IntField(nullable=True, display="CSMR cause")
title = StrField(nullable=True, display="Title")
description = StrField(nullable=True, display="Description")
bundle_id = IntField(nullable=True, display="Data bundle")
drill = OptionField(["cascade", "drill"], display="Drill")
drill_location = IntField(display="Drill location", nullable=True)
drill_location_start = IntField(display="Drill location start", nullable=True)
drill_location_end = IntField(display="Drill location end", nullable=True)
drill_sex = OptionField([1, 2], constructor=int, nullable=True, display="Drill sex")
birth_prev = OptionField([0, 1], constructor=int, nullable=True, default=0, display="Prevalence at birth")
default_age_grid = StringListField(constructor=float, display="(Cascade) Age grid")
default_time_grid = StringListField(constructor=float, display="(Cascade) Time grid")
constrain_omega = OptionField([0, 1], constructor=int, nullable=False, display="Constrain other cause mortality")
exclude_data_for_param = ListField(constructor=int, nullable=True, display="Exclude data for parameter")
ode_step_size = FloatField(display="ODE step size")
additional_ode_steps = StringListField(constructor=float, nullable=True,
display="Advanced additional ODE steps")
split_sex = OptionField(["most_detailed", "1", "2", "3", "4", "5"], display="Split sex (Being used as Drill Start)")
quasi_fixed = OptionField([0, 1], default=0, constructor=int, nullable=True)
zero_sum_random = ListField(nullable=True, display="Zero-sum random effects")
bound_frac_fixed = FloatField(
default=1e-2, nullable=True,
display="allowed modification to point to move it within bounds"
)
bound_random = FloatField(
nullable=True,
display="allowed modification to point to move it within bounds"
)
rate_case = Dummy()
data_density = StrField(nullable=True, display="Data density")
def _full_form_validation(self, root):
errors = []
if self.drill == "drill":
if self.is_field_unset("drill_sex"):
errors.append("For a drill, please specify Drill sex.")
return errors
class Eta(Form):
priors = FloatField(nullable=True)
data = FloatField(nullable=True)
class DataEta(Form):
integrand_measure_id = IntField(nullable=True)
value = FloatField(nullable=True)
class DataDensity(Form):
value = StrField(nullable=True)
integrand_measure_id = IntField(nullable=True)
class StudentsDOF(Form):
priors = FloatField(nullable=True, default=5)
data = FloatField(nullable=True, default=5)
class DerivativeTest(Form):
fixed = OptionField(
["none", "first-order", "second-order", "only-second-order",
"adaptive", "trace-adaptive"],
default="none",
display="test for these derivatives",
nullable=True
)
random = OptionField(
["none", "first-order", "second-order", "only-second-order",
"adaptive", "trace-adaptive"],
default="none",
display="test for these derivatives",
nullable=True
)
class FixedRandomInt(Form):
fixed = IntField(nullable=True)
random = IntField(nullable=True)
class FixedRandomFloat(Form):
fixed = FloatField(nullable=True)
random = FloatField(nullable=True)
class RandomEffectBound(Form):
location = IntField(nullable=True)
value = FloatField(nullable=True)
class Policies(Form):
estimate_emr_from_prevalence = OptionField(
[0, 1], constructor=int, default=0, display="Estimate EMR from prevalance", nullable=True
)
use_weighted_age_group_midpoints = OptionField([1, 0], default=1, constructor=int, nullable=True)
number_of_fixed_effect_samples = IntField(default=30, nullable=True)
with_hiv = BoolField(default=True, nullable=True, display="Whether to get ASDR with HIV deaths.")
age_group_set_id = IntField(default=12, nullable=True, display="Age groups for analysis work.")
exclude_relative_risk = OptionField([1, 0], default=1, constructor=int, nullable=True)
meas_std_effect = OptionField(
["add_std_scale_all", "add_std_scale_log", "add_var_scale_all", "add_var_scale_log"],
default="add_var_scale_log",
display="Measurement standard deviation effect",
nullable=True
)
limited_memory_max_history_fixed = IntField(
default=30, nullable=True,
display="number of most recent iterations taken into account for quasi-Newton"
)
fit_strategy = OptionField(["fit", "fit_fixed_then_fit"], default="fit", constructor=int, nullable=True)
decomp_step = StrField(nullable=True, default="step1")
gbd_round_id = IntField(nullable=True, default=6)
class Configuration(Form):
""" The root Form of the whole configuration tree.
Example:
>>> input_data = json.loads(json_blob)
>>> form = Configuration(input_data)
>>> errors = form.validate_and_normalize()
>>> if errors:
print(errors)
raise Exception("Woops")
else:
print(f"Ready to configure a model for {form.model.modelable_entity_id}")
"""
model = Model(display="Model", validation_priority=5)
policies = Policies(display="Policies")
gbd_round_id = IntField(display="GBD Round ID")
random_effect = FormList(Smoothing, nullable=True, display="Random effects")
rate = FormList(Smoothing, display="Rates")
study_covariate = FormList(StudyCovariate, display="Study covariates")
country_covariate = FormList(CountryCovariate, display="Country covariates")
eta = Eta(validation_priority=5)
students_dof = StudentsDOF(validation_priority=5)
log_students_dof = StudentsDOF(validation_priority=5)
csmr_cod_output_version_id = IntField()
# Unclear how this differs from csmr_cod_output_version_id. Has same value.
csmr_mortality_output_version_id = Dummy()
location_set_version_id = IntField(default=429, nullable=True)
min_cv = FormList(Dummy)
min_cv_by_rate = FormList(Dummy)
re_bound_location = FormList(RandomEffectBound)
derivative_test = DerivativeTest(display="Derivative test")
max_num_iter = FixedRandomInt(display="Max ipopt iterations")
print_level = FixedRandomInt(display="Print level")
accept_after_max_steps = FixedRandomInt(display="Max backtracking")
tolerance = FixedRandomFloat(display="Desired relative convergence tolerance")
data_eta_by_integrand = FormList(DataEta)
data_density_by_integrand = FormList(DataDensity)
config_version = IntField(nullable=True, display="Settings version") | 0.61231 | 0.241411 |
from django.db import models
from imagekit.models import ImageSpecField
from pilkit.processors import ResizeToFit
from django.utils.safestring import mark_safe
from django.db.models.signals import post_save, pre_save, pre_delete
from social_network.core.models import (cleaning_files_pre_save, cleaning_files_pre_delete,
validate_image, make_upload_path)
from django.contrib.auth.models import User
from slugify import slugify
from django.urls import reverse
class Post(models.Model):
"""
Managing posts of users
"""
user = models.ForeignKey(User,
verbose_name='User',
related_name='posts',
on_delete=models.CASCADE)
image = models.ImageField(verbose_name='Image',
validators=[validate_image],
upload_to=make_upload_path,
blank=True,
null=True)
title = models.CharField(verbose_name='Title',
max_length=50,
default='',
help_text='Title post.')
message = models.TextField(verbose_name='Message',
max_length=1000,
default='',
help_text='Your new message.')
like = models.PositiveIntegerField(verbose_name='Like', blank=True, default=0)
unlike = models.PositiveIntegerField(verbose_name='Unlike', blank=True, default=0)
rating = models.IntegerField(verbose_name='Rating', blank=True, default=0)
slug = models.SlugField(max_length=100, blank=True, null=True)
is_disable = models.BooleanField('Is disable?', blank=True, default=False)
created_at = models.DateTimeField(verbose_name='Publication date', auto_now_add=True)
updated_at = models.DateTimeField(verbose_name='Updated', auto_now=True)
thumbnail = ImageSpecField([ResizeToFit(height=60, width=60, upscale=True)], source='image')
middle = ImageSpecField([ResizeToFit(height=180, width=180, upscale=True)], source='image')
def __str__(self):
return self.title
@property
def upload_dir(self):
return 'posts/images'
class Meta:
ordering = ('-pk',)
verbose_name = 'Post'
verbose_name_plural = 'Posts'
def save(self, *args, **kwargs):
like = self.like
unlike = self.unlike
self.rating = like - unlike
super(Post, self).save(*args, **kwargs)
def admin_thumbnail(self):
if self.image:
return mark_safe('<img src="{}" />'.format(self.thumbnail.url))
else:
return ''
admin_thumbnail.short_description = 'Image'
admin_thumbnail.allow_tags = True
def get_absolute_url(self):
return reverse('web_posts:view_post', kwargs={'slug': self.slug})
class Comment(models.Model):
post = models.ForeignKey(Post,
verbose_name='Post',
related_name='comments',
on_delete=models.CASCADE)
user = models.ForeignKey(User,
verbose_name='User',
related_name='+',
null=True,
on_delete=models.SET_NULL,
db_index=False)
text = models.TextField(verbose_name='Message', max_length=200, default="")
is_disable = models.BooleanField('Is disable?', blank=True, default=False)
created_at = models.DateTimeField(verbose_name='Publication date', auto_now_add=True)
def __str__(self):
return self.text
# Signals
def post_add_slug(instance, **kwargs):
new_slug = '{0}-{1}'.format(instance.pk, slugify(instance.title))
if instance.slug != new_slug:
instance.slug = new_slug
instance.save()
# Add slug
post_save.connect(post_add_slug, sender=Post)
# Cleaning files
pre_save.connect(cleaning_files_pre_save, sender=Post)
pre_delete.connect(cleaning_files_pre_delete, sender=Post) | posts/models.py |
from django.db import models
from imagekit.models import ImageSpecField
from pilkit.processors import ResizeToFit
from django.utils.safestring import mark_safe
from django.db.models.signals import post_save, pre_save, pre_delete
from social_network.core.models import (cleaning_files_pre_save, cleaning_files_pre_delete,
validate_image, make_upload_path)
from django.contrib.auth.models import User
from slugify import slugify
from django.urls import reverse
class Post(models.Model):
"""
Managing posts of users
"""
user = models.ForeignKey(User,
verbose_name='User',
related_name='posts',
on_delete=models.CASCADE)
image = models.ImageField(verbose_name='Image',
validators=[validate_image],
upload_to=make_upload_path,
blank=True,
null=True)
title = models.CharField(verbose_name='Title',
max_length=50,
default='',
help_text='Title post.')
message = models.TextField(verbose_name='Message',
max_length=1000,
default='',
help_text='Your new message.')
like = models.PositiveIntegerField(verbose_name='Like', blank=True, default=0)
unlike = models.PositiveIntegerField(verbose_name='Unlike', blank=True, default=0)
rating = models.IntegerField(verbose_name='Rating', blank=True, default=0)
slug = models.SlugField(max_length=100, blank=True, null=True)
is_disable = models.BooleanField('Is disable?', blank=True, default=False)
created_at = models.DateTimeField(verbose_name='Publication date', auto_now_add=True)
updated_at = models.DateTimeField(verbose_name='Updated', auto_now=True)
thumbnail = ImageSpecField([ResizeToFit(height=60, width=60, upscale=True)], source='image')
middle = ImageSpecField([ResizeToFit(height=180, width=180, upscale=True)], source='image')
def __str__(self):
return self.title
@property
def upload_dir(self):
return 'posts/images'
class Meta:
ordering = ('-pk',)
verbose_name = 'Post'
verbose_name_plural = 'Posts'
def save(self, *args, **kwargs):
like = self.like
unlike = self.unlike
self.rating = like - unlike
super(Post, self).save(*args, **kwargs)
def admin_thumbnail(self):
if self.image:
return mark_safe('<img src="{}" />'.format(self.thumbnail.url))
else:
return ''
admin_thumbnail.short_description = 'Image'
admin_thumbnail.allow_tags = True
def get_absolute_url(self):
return reverse('web_posts:view_post', kwargs={'slug': self.slug})
class Comment(models.Model):
post = models.ForeignKey(Post,
verbose_name='Post',
related_name='comments',
on_delete=models.CASCADE)
user = models.ForeignKey(User,
verbose_name='User',
related_name='+',
null=True,
on_delete=models.SET_NULL,
db_index=False)
text = models.TextField(verbose_name='Message', max_length=200, default="")
is_disable = models.BooleanField('Is disable?', blank=True, default=False)
created_at = models.DateTimeField(verbose_name='Publication date', auto_now_add=True)
def __str__(self):
return self.text
# Signals
def post_add_slug(instance, **kwargs):
new_slug = '{0}-{1}'.format(instance.pk, slugify(instance.title))
if instance.slug != new_slug:
instance.slug = new_slug
instance.save()
# Add slug
post_save.connect(post_add_slug, sender=Post)
# Cleaning files
pre_save.connect(cleaning_files_pre_save, sender=Post)
pre_delete.connect(cleaning_files_pre_delete, sender=Post) | 0.666931 | 0.128607 |
import deepinterpolation as de
import sys
from shutil import copyfile
import os
from deepinterpolation.generic import JsonSaver, ClassLoader
import datetime
from typing import Any, Dict
from kerastuner.tuners import RandomSearch, BayesianOptimization
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import pickle
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
training_param = {}
generator_param = {}
network_param = {}
generator_test_param = {}
steps_per_epoch = 150
generator_test_param["type"] = "generator"
generator_test_param["name"] = "FmriGenerator"
generator_test_param["pre_post_x"] = 3
generator_test_param["pre_post_y"] = 3
generator_test_param["pre_post_z"] = 3
generator_test_param["pre_post_t"] = 2
generator_test_param['center_omission_size'] = 4
generator_test_param[
"train_path"
] = "/home/ec2-user/fmri_data/meta_testing/sub-01:ses-perceptionTest01:func:sub-01_ses-perceptionTest01_task-perception_run-01_bold.nii.gz"
generator_test_param["batch_size"] = 1000
generator_test_param["start_frame"] = 5
generator_test_param["end_frame"] = 160
generator_test_param["total_nb_block"] = 50000
generator_test_param["steps_per_epoch"] = steps_per_epoch
# '/home/ec2-user/fmri_data/training'
local_train_path = '/home/ec2-user/fmri_data/meta_training'#'/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/studyimagenet/tmp/train'
train_paths = os.listdir(local_train_path)
generator_param_list = []
for indiv_path in train_paths:
generator_param = {}
generator_param["type"] = "generator"
generator_param["name"] = "FmriGenerator"
generator_param["pre_post_x"] = 3
generator_param["pre_post_y"] = 3
generator_param["pre_post_z"] = 3
generator_param["pre_post_t"] = 2
generator_param["train_path"] = os.path.join(local_train_path, indiv_path)
generator_param["batch_size"] = 1000
generator_param["start_frame"] = 5
generator_param["end_frame"] = 160
generator_param["total_nb_block"] = 150000
generator_param["steps_per_epoch"] = steps_per_epoch
generator_param["center_omission_size"] = 4
generator_param_list.append(generator_param)
network_param["type"] = "network"
network_param["name"] = "fmri_flexible_architecture"
training_param["type"] = "trainer"
training_param["name"] = "core_trainer"
training_param["run_uid"] = run_uid
training_param["batch_size"] = generator_test_param["batch_size"]
training_param["steps_per_epoch"] = steps_per_epoch
training_param["period_save"] = 1000
training_param["nb_gpus"] = 0
training_param["apply_learning_decay"] = 0
training_param["initial_learning_rate"] = 0.0001
training_param["epochs_drop"] = 50
training_param["nb_times_through_data"] = 1
training_param["learning_rate"] = 0.0001
training_param["loss"] = "mean_absolute_error"
training_param["model_string"] = (
network_param["name"]
+ "_"
+ training_param["loss"]
+ "_"
+ training_param["run_uid"]
)
jobdir = (
"/home/ec2-user/trained_fmri_models/"
+ training_param["model_string"]
+ "_"
+ run_uid
)
training_param["output_dir"] = jobdir
try:
os.mkdir(jobdir)
except:
print("folder already exists")
path_training = os.path.join(jobdir, "training.json")
json_obj = JsonSaver(training_param)
json_obj.save_json(path_training)
list_train_generator = []
for local_index, indiv_generator in enumerate(generator_param_list):
if local_index == 0:
indiv_generator["initialize_list"] = 1
else:
indiv_generator["initialize_list"] = 0
path_generator = os.path.join(
jobdir, "generator" + str(local_index) + ".json")
json_obj = JsonSaver(indiv_generator)
json_obj.save_json(path_generator)
generator_obj = ClassLoader(path_generator)
train_generator = generator_obj.find_and_build()(path_generator)
# we don't need to set a random set of points for all 100 or so
if local_index == 0:
keep_generator = train_generator
else:
train_generator.x_list = keep_generator.x_list
train_generator.y_list = keep_generator.y_list
train_generator.z_list = keep_generator.z_list
train_generator.t_list = keep_generator.t_list
list_train_generator.append(train_generator)
path_test_generator = os.path.join(jobdir, "test_generator.json")
json_obj = JsonSaver(generator_test_param)
json_obj.save_json(path_test_generator)
path_network = os.path.join(jobdir, "network.json")
json_obj = JsonSaver(network_param)
json_obj.save_json(path_network)
generator_obj = ClassLoader(path_generator)
generator_test_obj = ClassLoader(path_test_generator)
network_obj = ClassLoader(path_network)
trainer_obj = ClassLoader(path_training)
train_generator = generator_obj.find_and_build()(path_generator)
global_train_generator = de.generator_collection.CollectorGenerator(
list_train_generator
)
test_generator = generator_test_obj.find_and_build()(path_test_generator)
network_callback = network_obj.find_and_build()(path_network)
# We initialize the trainer as usual except without compiling
training_class = trainer_obj.find_and_build()(
global_train_generator, test_generator, network_callback, path_training, auto_compile=False
)
# We build the hyperparameter training class
def build_model(hp):
# We allow learning rate to change
training_class.optimizer = RMSprop(
lr=hp.Choice('learning_rate', [1e-3, 1e-4])) # , 1e-4, 1e-5]))
local_size = training_class.local_generator.get_input_size()
input_img = Input(shape=local_size)
training_class.local_model = Model(
input_img, training_class.network_obj(input_img, hp))
training_class.compile()
return training_class.local_model
# This is where we set the searching strategy
tuner = BayesianOptimization(
build_model,
objective='val_mae',
seed=40,
max_trials=500,
executions_per_trial=1,
directory=jobdir)
tuner.search_space_summary()
training_class.cache_validation()
# replacement for model.fit
tuner.search(training_class.local_generator,
validation_data=training_class.local_test_generator,
steps_per_epoch=training_class.steps_per_epoch,
epochs=training_class.epochs,
max_queue_size=4, # 32,
workers=training_class.workers,
shuffle=False,
use_multiprocessing=True,
callbacks=training_class.callbacks_list,
initial_epoch=0,)
tuner.results_summary()
pickle.dump(tuner, open(os.path.join(jobdir,"result.pkl"),"wb")) | examples/paper_generation_code/2020-08-26-local_fmri_hyper_training.py | import deepinterpolation as de
import sys
from shutil import copyfile
import os
from deepinterpolation.generic import JsonSaver, ClassLoader
import datetime
from typing import Any, Dict
from kerastuner.tuners import RandomSearch, BayesianOptimization
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import pickle
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
training_param = {}
generator_param = {}
network_param = {}
generator_test_param = {}
steps_per_epoch = 150
generator_test_param["type"] = "generator"
generator_test_param["name"] = "FmriGenerator"
generator_test_param["pre_post_x"] = 3
generator_test_param["pre_post_y"] = 3
generator_test_param["pre_post_z"] = 3
generator_test_param["pre_post_t"] = 2
generator_test_param['center_omission_size'] = 4
generator_test_param[
"train_path"
] = "/home/ec2-user/fmri_data/meta_testing/sub-01:ses-perceptionTest01:func:sub-01_ses-perceptionTest01_task-perception_run-01_bold.nii.gz"
generator_test_param["batch_size"] = 1000
generator_test_param["start_frame"] = 5
generator_test_param["end_frame"] = 160
generator_test_param["total_nb_block"] = 50000
generator_test_param["steps_per_epoch"] = steps_per_epoch
# '/home/ec2-user/fmri_data/training'
local_train_path = '/home/ec2-user/fmri_data/meta_training'#'/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/studyimagenet/tmp/train'
train_paths = os.listdir(local_train_path)
generator_param_list = []
for indiv_path in train_paths:
generator_param = {}
generator_param["type"] = "generator"
generator_param["name"] = "FmriGenerator"
generator_param["pre_post_x"] = 3
generator_param["pre_post_y"] = 3
generator_param["pre_post_z"] = 3
generator_param["pre_post_t"] = 2
generator_param["train_path"] = os.path.join(local_train_path, indiv_path)
generator_param["batch_size"] = 1000
generator_param["start_frame"] = 5
generator_param["end_frame"] = 160
generator_param["total_nb_block"] = 150000
generator_param["steps_per_epoch"] = steps_per_epoch
generator_param["center_omission_size"] = 4
generator_param_list.append(generator_param)
network_param["type"] = "network"
network_param["name"] = "fmri_flexible_architecture"
training_param["type"] = "trainer"
training_param["name"] = "core_trainer"
training_param["run_uid"] = run_uid
training_param["batch_size"] = generator_test_param["batch_size"]
training_param["steps_per_epoch"] = steps_per_epoch
training_param["period_save"] = 1000
training_param["nb_gpus"] = 0
training_param["apply_learning_decay"] = 0
training_param["initial_learning_rate"] = 0.0001
training_param["epochs_drop"] = 50
training_param["nb_times_through_data"] = 1
training_param["learning_rate"] = 0.0001
training_param["loss"] = "mean_absolute_error"
training_param["model_string"] = (
network_param["name"]
+ "_"
+ training_param["loss"]
+ "_"
+ training_param["run_uid"]
)
jobdir = (
"/home/ec2-user/trained_fmri_models/"
+ training_param["model_string"]
+ "_"
+ run_uid
)
training_param["output_dir"] = jobdir
try:
os.mkdir(jobdir)
except:
print("folder already exists")
path_training = os.path.join(jobdir, "training.json")
json_obj = JsonSaver(training_param)
json_obj.save_json(path_training)
list_train_generator = []
for local_index, indiv_generator in enumerate(generator_param_list):
if local_index == 0:
indiv_generator["initialize_list"] = 1
else:
indiv_generator["initialize_list"] = 0
path_generator = os.path.join(
jobdir, "generator" + str(local_index) + ".json")
json_obj = JsonSaver(indiv_generator)
json_obj.save_json(path_generator)
generator_obj = ClassLoader(path_generator)
train_generator = generator_obj.find_and_build()(path_generator)
# we don't need to set a random set of points for all 100 or so
if local_index == 0:
keep_generator = train_generator
else:
train_generator.x_list = keep_generator.x_list
train_generator.y_list = keep_generator.y_list
train_generator.z_list = keep_generator.z_list
train_generator.t_list = keep_generator.t_list
list_train_generator.append(train_generator)
path_test_generator = os.path.join(jobdir, "test_generator.json")
json_obj = JsonSaver(generator_test_param)
json_obj.save_json(path_test_generator)
path_network = os.path.join(jobdir, "network.json")
json_obj = JsonSaver(network_param)
json_obj.save_json(path_network)
generator_obj = ClassLoader(path_generator)
generator_test_obj = ClassLoader(path_test_generator)
network_obj = ClassLoader(path_network)
trainer_obj = ClassLoader(path_training)
train_generator = generator_obj.find_and_build()(path_generator)
global_train_generator = de.generator_collection.CollectorGenerator(
list_train_generator
)
test_generator = generator_test_obj.find_and_build()(path_test_generator)
network_callback = network_obj.find_and_build()(path_network)
# We initialize the trainer as usual except without compiling
training_class = trainer_obj.find_and_build()(
global_train_generator, test_generator, network_callback, path_training, auto_compile=False
)
# We build the hyperparameter training class
def build_model(hp):
# We allow learning rate to change
training_class.optimizer = RMSprop(
lr=hp.Choice('learning_rate', [1e-3, 1e-4])) # , 1e-4, 1e-5]))
local_size = training_class.local_generator.get_input_size()
input_img = Input(shape=local_size)
training_class.local_model = Model(
input_img, training_class.network_obj(input_img, hp))
training_class.compile()
return training_class.local_model
# This is where we set the searching strategy
tuner = BayesianOptimization(
build_model,
objective='val_mae',
seed=40,
max_trials=500,
executions_per_trial=1,
directory=jobdir)
tuner.search_space_summary()
training_class.cache_validation()
# replacement for model.fit
tuner.search(training_class.local_generator,
validation_data=training_class.local_test_generator,
steps_per_epoch=training_class.steps_per_epoch,
epochs=training_class.epochs,
max_queue_size=4, # 32,
workers=training_class.workers,
shuffle=False,
use_multiprocessing=True,
callbacks=training_class.callbacks_list,
initial_epoch=0,)
tuner.results_summary()
pickle.dump(tuner, open(os.path.join(jobdir,"result.pkl"),"wb")) | 0.461259 | 0.23895 |
import os
from . import __file__
from .data import GHS_HAZARDS
__version__ = [0, 1, 0]
class UnknownHazard(Exception):
"""
Exception raised when no Hazard is found
"""
message = "Unknown hazard"
class Hazard:
"""
Hazard class
"""
code = None
name = None
hazard_type = None
usage = None
non_usage = None
example = None
pictogram = None
note = None
def __init__(self, code: str):
"""
Initialize Hazard
:param code: Code of the Hazard
"""
try:
hazard = [g for g in GHS_HAZARDS
if g['code'].lower() == code.lower()][0]
self.code = code
self.name = hazard['name']
self.usage = hazard['usage']
self.hazard_type = hazard['hazard_type']
self.non_usage = hazard.get('non_usage', '')
self.example = hazard.get('example', '')
self.pictogram = hazard['pictogram']
self.note = hazard.get('note', '')
except IndexError as e:
raise UnknownHazard() from e
@classmethod
def all(cls) -> []:
"""
Return all hazards
:return: [Hazard]
"""
return [cls(h.get('code')) for h in GHS_HAZARDS]
@classmethod
def search(cls, term: str) -> []:
"""
Search for Hazards on code, name, usage, non_usage, example, note
Search is case insensitive and checks if attribute contains the term
:param term: string to look for
:return: List of Hazards
"""
results = []
for key in ['code', 'name', 'usage', 'non_usage', 'example', 'note']:
for hazard in GHS_HAZARDS:
if term.lower() in hazard.get(key, '').lower():
results.append(cls(hazard.get('code')))
return results
def get_pictogram(self):
return os.path.join(os.path.dirname(__file__), 'pictograms',
self.pictogram) | ghs_hazard_pictogram/__init__.py | import os
from . import __file__
from .data import GHS_HAZARDS
__version__ = [0, 1, 0]
class UnknownHazard(Exception):
"""
Exception raised when no Hazard is found
"""
message = "Unknown hazard"
class Hazard:
"""
Hazard class
"""
code = None
name = None
hazard_type = None
usage = None
non_usage = None
example = None
pictogram = None
note = None
def __init__(self, code: str):
"""
Initialize Hazard
:param code: Code of the Hazard
"""
try:
hazard = [g for g in GHS_HAZARDS
if g['code'].lower() == code.lower()][0]
self.code = code
self.name = hazard['name']
self.usage = hazard['usage']
self.hazard_type = hazard['hazard_type']
self.non_usage = hazard.get('non_usage', '')
self.example = hazard.get('example', '')
self.pictogram = hazard['pictogram']
self.note = hazard.get('note', '')
except IndexError as e:
raise UnknownHazard() from e
@classmethod
def all(cls) -> []:
"""
Return all hazards
:return: [Hazard]
"""
return [cls(h.get('code')) for h in GHS_HAZARDS]
@classmethod
def search(cls, term: str) -> []:
"""
Search for Hazards on code, name, usage, non_usage, example, note
Search is case insensitive and checks if attribute contains the term
:param term: string to look for
:return: List of Hazards
"""
results = []
for key in ['code', 'name', 'usage', 'non_usage', 'example', 'note']:
for hazard in GHS_HAZARDS:
if term.lower() in hazard.get(key, '').lower():
results.append(cls(hazard.get('code')))
return results
def get_pictogram(self):
return os.path.join(os.path.dirname(__file__), 'pictograms',
self.pictogram) | 0.556882 | 0.18508 |
def fibSumThree(n0):
"""
Created on Mon Aug 23 07:40:53 2021
@author: Ezra
fibSumThree(n0) function to find the sum of all the terms in the
Fibonacci sequence divisible by three whih do not exceed n0.
Input: n0 is the largest natural number considered
Output: fibSumThree- the sum of the Fibonacci terms divisible by 3 that do
not exceed n0.
"""
a=0
b=1
fibSum3 = 0
while b < n0:
if b % 3 == 0 :
fibSum3 = fibSum3 + b
c =a+b
a=b
b=c
print("b=", "fibSum3",fibSum3)
return fibSum3
fibSumThree(500000000)
def fibSumThreeTracker(n0):
"""
Created on Mon Aug 23 07:40:53 2021
@author: Ezra
fibSumThree(n0) function to find the sum of all the terms in the
Fibonacci sequence divisible by three whih do not exceed n0.
Input: n0 is the largest natural number considered
Output: fibSumThreeTracker-
x: Fibonacci numbers taht are divisible by 3
y: The sum of the Fibonacci terms divisible by 3
that do not exceed n0
the sum of the Fibonacci terms divisible by 3 that do
not exceed n0. keeps track of intermeiate values.
"""
a=0
b=1
fibSum3 = 0
x=[]
y=[]
while b < n0:
if b % 3 == 0 :
fibSum3 = fibSum3 + b
x.append(b)
y.append(fibSum3)
c =a+b
a=b
b=c
return x,y
print("b=", "fibSum3",fibSum3)
return fibSum3
x,y = fibSumThreeTracker(500000000)
print(x)
print(y)
import matplotlib.pyplot as plt
plt.figure(0)
plt.plot(x,y)
plt.title(" A neat plot that doesn't really convey anything")
plt.xlabel("Fibonacci number")
plt.ylabel("fibSumThree")
plt.grid()
plt.show()
plt.figure(1)
plt.plot(x,y)
plt.title(" A neat plot that doesn't really convey anything")
plt.xlabel("Fibonacci number")
plt.ylabel("fibSumThree")
plt.grid()
plt.yscale('log')
plt.show() | Tech lab 1.py | def fibSumThree(n0):
"""
Created on Mon Aug 23 07:40:53 2021
@author: Ezra
fibSumThree(n0) function to find the sum of all the terms in the
Fibonacci sequence divisible by three whih do not exceed n0.
Input: n0 is the largest natural number considered
Output: fibSumThree- the sum of the Fibonacci terms divisible by 3 that do
not exceed n0.
"""
a=0
b=1
fibSum3 = 0
while b < n0:
if b % 3 == 0 :
fibSum3 = fibSum3 + b
c =a+b
a=b
b=c
print("b=", "fibSum3",fibSum3)
return fibSum3
fibSumThree(500000000)
def fibSumThreeTracker(n0):
"""
Created on Mon Aug 23 07:40:53 2021
@author: Ezra
fibSumThree(n0) function to find the sum of all the terms in the
Fibonacci sequence divisible by three whih do not exceed n0.
Input: n0 is the largest natural number considered
Output: fibSumThreeTracker-
x: Fibonacci numbers taht are divisible by 3
y: The sum of the Fibonacci terms divisible by 3
that do not exceed n0
the sum of the Fibonacci terms divisible by 3 that do
not exceed n0. keeps track of intermeiate values.
"""
a=0
b=1
fibSum3 = 0
x=[]
y=[]
while b < n0:
if b % 3 == 0 :
fibSum3 = fibSum3 + b
x.append(b)
y.append(fibSum3)
c =a+b
a=b
b=c
return x,y
print("b=", "fibSum3",fibSum3)
return fibSum3
x,y = fibSumThreeTracker(500000000)
print(x)
print(y)
import matplotlib.pyplot as plt
plt.figure(0)
plt.plot(x,y)
plt.title(" A neat plot that doesn't really convey anything")
plt.xlabel("Fibonacci number")
plt.ylabel("fibSumThree")
plt.grid()
plt.show()
plt.figure(1)
plt.plot(x,y)
plt.title(" A neat plot that doesn't really convey anything")
plt.xlabel("Fibonacci number")
plt.ylabel("fibSumThree")
plt.grid()
plt.yscale('log')
plt.show() | 0.529507 | 0.514644 |
import unittest
import sys
import pandas as pd
from CovidVoting.add_data import (add_data_csv)
sys.path.append('..')
# Define all states
all_states = ["Maryland", "Iowa", "Delaware", "Ohio",
"Pennsylvania", "Nebraska", "Washington",
"Alabama", "Arkansas", "New Mexico", "Texas",
"California", "Kentucky", "Georgia", "Wisconsin",
"Oregon", "Missouri", "Virginia", "Tennessee",
"Louisiana", "New York", "Michigan", "Idaho",
"Florida", "Illinois", "Montana", "Minnesota",
"Indiana", "Massachusetts", "Kansas", "Nevada", "Vermont",
"Connecticut", "New Jersey", "District of Columbia",
"North Carolina", "Utah", "North Dakota", "South Carolina",
"Mississippi", "Colorado", "South Dakota", "Oklahoma", "Wyoming",
"West Virginia", "Maine", "New Hampshire", "Arizona",
"Rhode Island"]
class TestAddData(unittest.TestCase):
"""
This class defines the tests for add_data_csv
"""
def test_smoke_add_data_csv(self):
"""smoke test"""
base_data = "./data/raw_2_covid_latest.csv"
new_data = "./data/use_election.csv"
base_state_col = 'State/Territory'
new_state_col = 'state'
use_state = all_states
how_join = 'right'
df_covid_election = add_data_csv(base_data, new_data, base_state_col,
new_state_col, use_state, how_join)
self.assertIsNotNone(df_covid_election)
def test_oneshot_add_data_csv(self):
"""oneshot test"""
base_data = "./data/raw_2_covid_latest.csv"
new_data = "./data/use_election.csv"
base_state_col = 'State/Territory'
new_state_col = 'state'
use_state = all_states
how_join = 'right'
df_covid_election = add_data_csv(base_data,
new_data, base_state_col,
new_state_col, use_state,
how_join)
pd.testing.assert_frame_equal(df_covid_election, merge_covid_election)
def test_edge_add_data_csv(self):
"""Edge Tests
Args:
self
Returns:
True: Test passed
False: Test failed
"""
base_data = "./data/raw_2_covid_latest.csv"
new_data = "./data/use_election.csv"
base_state_col = "wrongname"
new_state_col = 'state'
use_state = all_states
how_join = 'right'
with self.assertRaises(KeyError):
add_data_csv(base_data, new_data, base_state_col,
new_state_col, use_state, how_join)
if __name__ == '__main__':
unittest.main() | CovidVoting/test/test_add_data.py | import unittest
import sys
import pandas as pd
from CovidVoting.add_data import (add_data_csv)
sys.path.append('..')
# Define all states
all_states = ["Maryland", "Iowa", "Delaware", "Ohio",
"Pennsylvania", "Nebraska", "Washington",
"Alabama", "Arkansas", "New Mexico", "Texas",
"California", "Kentucky", "Georgia", "Wisconsin",
"Oregon", "Missouri", "Virginia", "Tennessee",
"Louisiana", "New York", "Michigan", "Idaho",
"Florida", "Illinois", "Montana", "Minnesota",
"Indiana", "Massachusetts", "Kansas", "Nevada", "Vermont",
"Connecticut", "New Jersey", "District of Columbia",
"North Carolina", "Utah", "North Dakota", "South Carolina",
"Mississippi", "Colorado", "South Dakota", "Oklahoma", "Wyoming",
"West Virginia", "Maine", "New Hampshire", "Arizona",
"Rhode Island"]
class TestAddData(unittest.TestCase):
"""
This class defines the tests for add_data_csv
"""
def test_smoke_add_data_csv(self):
"""smoke test"""
base_data = "./data/raw_2_covid_latest.csv"
new_data = "./data/use_election.csv"
base_state_col = 'State/Territory'
new_state_col = 'state'
use_state = all_states
how_join = 'right'
df_covid_election = add_data_csv(base_data, new_data, base_state_col,
new_state_col, use_state, how_join)
self.assertIsNotNone(df_covid_election)
def test_oneshot_add_data_csv(self):
"""oneshot test"""
base_data = "./data/raw_2_covid_latest.csv"
new_data = "./data/use_election.csv"
base_state_col = 'State/Territory'
new_state_col = 'state'
use_state = all_states
how_join = 'right'
df_covid_election = add_data_csv(base_data,
new_data, base_state_col,
new_state_col, use_state,
how_join)
pd.testing.assert_frame_equal(df_covid_election, merge_covid_election)
def test_edge_add_data_csv(self):
"""Edge Tests
Args:
self
Returns:
True: Test passed
False: Test failed
"""
base_data = "./data/raw_2_covid_latest.csv"
new_data = "./data/use_election.csv"
base_state_col = "wrongname"
new_state_col = 'state'
use_state = all_states
how_join = 'right'
with self.assertRaises(KeyError):
add_data_csv(base_data, new_data, base_state_col,
new_state_col, use_state, how_join)
if __name__ == '__main__':
unittest.main() | 0.347537 | 0.343672 |
from flask import Flask, render_template, url_for, Response, stream_with_context
from datetime import datetime as dt
import threading, cv2, time, imutils, datetime
import numpy as np
from imutils.video import VideoStream
import time
outputFrame = None
lock = threading.Lock()
isCamOn = False
cam = None
class piCam(object):
def __init__(self):
self.contours = []
self.x_medium = 0
self.y_medium = 0
self.obj_dimensions = {}
self.video = cv2.VideoCapture(0)
(self.grabbed, self.frame) = self.video.read()
self.frame = cv2.flip(self.frame,flipCode=-1)
threading.Thread(target=self.update, args=()).start()
def __del__(self):
self.video.release()
def get_frame(self):
image = self.frame
ret, jpeg = cv2.imencode('.jpg',image)
return jpeg.tobytes()
def update(self):
while True:
self.frame = cv2.flip(self.frame,flipCode=-1)
self.hsv_frame = cv2.cvtColor(self.frame,cv2.COLOR_BGR2HSV)
self.low_red = np.array([161,155,84])
self.high_red = np.array([179,255,255])
self.red_mask = cv2.inRange(self.hsv_frame, self.low_red, self.high_red)
try:
self.contours, _ = cv2.findContours(self.red_mask,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contours = sorted(self.contours, key=lambda x:cv2.contourArea(x), reverse = True)
except Exception:
_, self.contours, _ = cv2.findContours(self.red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contours = sorted(self.contours, key=lambda x:cv2.contourArea(x), reverse = True)
if len(self.contours) > 0:
for cnt in self.contours:
(x, y, w, h) = cv2.boundingRect(cnt)
self.x_medium = int((x + x + w)/2)
self.y_medium = int((y + y + h)/2)
cv2.rectangle(self.frame, (x,y), (x+w, y+h), (0,255,0), 2)
cv2.circle(self.frame,(int(self.x_medium),int(self.y_medium)), int((x+w)/16), (0,255,0),2)
self.obj_dimensions['min_x'] = x
self.obj_dimensions['max_x'] = x+w
self.obj_dimensions['width'] = w
self.obj_dimensions['min_y'] = y+h
self.obj_dimensions['max_y'] = y
self.obj_dimensions['height'] = h
self.obj_dimensions['area'] = w*h
self.frame_xcenter = self.video.get(3)/2
self.object_xcenter = x + w/2
break
cv2.line(self.frame, (self.x_medium,0), (self.x_medium,480), (0,255,0), 2)
cv2.line(self.frame, (0,self.y_medium), (960,self.y_medium), (0,255,0), 2)
(self.grabbed, self.frame) = self.video.read()
app = Flask(__name__)
def gen(cam):
while (True):
frame = cam.get_frame()
yield(b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/')
def index():
return render_template('picar_dash.html')
@app.route('/vid_feed')
def streamVideo():
global cam
if cam == None:
cam = piCam()
else:
del cam
time.sleep(0.1)
cam = piCam()
return Response(gen(cam), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
# adding host '0.0.0.0' & a port, this can serve as a local network server when running.
app.run(host="0.0.0.0",port=81,debug=True) | Flask_Projects/Bots_Projects/picar_app.py | from flask import Flask, render_template, url_for, Response, stream_with_context
from datetime import datetime as dt
import threading, cv2, time, imutils, datetime
import numpy as np
from imutils.video import VideoStream
import time
outputFrame = None
lock = threading.Lock()
isCamOn = False
cam = None
class piCam(object):
def __init__(self):
self.contours = []
self.x_medium = 0
self.y_medium = 0
self.obj_dimensions = {}
self.video = cv2.VideoCapture(0)
(self.grabbed, self.frame) = self.video.read()
self.frame = cv2.flip(self.frame,flipCode=-1)
threading.Thread(target=self.update, args=()).start()
def __del__(self):
self.video.release()
def get_frame(self):
image = self.frame
ret, jpeg = cv2.imencode('.jpg',image)
return jpeg.tobytes()
def update(self):
while True:
self.frame = cv2.flip(self.frame,flipCode=-1)
self.hsv_frame = cv2.cvtColor(self.frame,cv2.COLOR_BGR2HSV)
self.low_red = np.array([161,155,84])
self.high_red = np.array([179,255,255])
self.red_mask = cv2.inRange(self.hsv_frame, self.low_red, self.high_red)
try:
self.contours, _ = cv2.findContours(self.red_mask,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contours = sorted(self.contours, key=lambda x:cv2.contourArea(x), reverse = True)
except Exception:
_, self.contours, _ = cv2.findContours(self.red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contours = sorted(self.contours, key=lambda x:cv2.contourArea(x), reverse = True)
if len(self.contours) > 0:
for cnt in self.contours:
(x, y, w, h) = cv2.boundingRect(cnt)
self.x_medium = int((x + x + w)/2)
self.y_medium = int((y + y + h)/2)
cv2.rectangle(self.frame, (x,y), (x+w, y+h), (0,255,0), 2)
cv2.circle(self.frame,(int(self.x_medium),int(self.y_medium)), int((x+w)/16), (0,255,0),2)
self.obj_dimensions['min_x'] = x
self.obj_dimensions['max_x'] = x+w
self.obj_dimensions['width'] = w
self.obj_dimensions['min_y'] = y+h
self.obj_dimensions['max_y'] = y
self.obj_dimensions['height'] = h
self.obj_dimensions['area'] = w*h
self.frame_xcenter = self.video.get(3)/2
self.object_xcenter = x + w/2
break
cv2.line(self.frame, (self.x_medium,0), (self.x_medium,480), (0,255,0), 2)
cv2.line(self.frame, (0,self.y_medium), (960,self.y_medium), (0,255,0), 2)
(self.grabbed, self.frame) = self.video.read()
app = Flask(__name__)
def gen(cam):
while (True):
frame = cam.get_frame()
yield(b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/')
def index():
return render_template('picar_dash.html')
@app.route('/vid_feed')
def streamVideo():
global cam
if cam == None:
cam = piCam()
else:
del cam
time.sleep(0.1)
cam = piCam()
return Response(gen(cam), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
# adding host '0.0.0.0' & a port, this can serve as a local network server when running.
app.run(host="0.0.0.0",port=81,debug=True) | 0.295332 | 0.167049 |
import urllib.parse
from typing import Any, Dict, List, Optional, Union
from ._http_status_codes import HTTP_STATUS_CODES
from ._model import Model
class HttpResponse(Model):
"""HTTP Error
Properties:
code: (code) OPTIONAL int
content_type: (content_type) OPTIONAL str
content: (content) OPTIONAL Any
"""
code: int
content_type: str
content: Any
def __str__(self):
return f"[{self.code}] {self.content_type}: {str(self.content)}"
def is_error(self) -> bool:
return self.content_type == "error"
def is_no_content(self) -> bool:
return self.content_type == "no_content"
def get_query_params(self) -> dict:
if self.content_type == "location":
return urllib.parse.parse_qs(urllib.parse.urlparse(self.content).query)
return {}
@classmethod
def create(cls, code: int, message: str):
instance = cls()
instance.code = code
instance.content_type = "message"
instance.content = message
return instance
@classmethod
def create_redirect(cls, code: int, location: str):
instance = cls()
instance.code = code
instance.content_type = "location"
instance.content = location
return instance
@classmethod
def create_error(cls, code: int, error: str):
instance = cls()
instance.code = code
instance.content_type = "error"
instance.content = error
return instance
@classmethod
def create_connection_error(cls):
instance = cls()
instance.code = 0
instance.content_type = "error"
instance.content = "Connection Error"
return instance
@classmethod
def create_undocumented_response(cls, code: int, content: Any):
if code not in HTTP_STATUS_CODES:
return None
content_type = "error"
if code == 200 and not content:
content_type = "no_content"
content = None
if code == 204:
content_type = "no_content"
content = None
instance = cls()
instance.code = code
instance.content_type = content_type
instance.content = content
return instance
@classmethod
def create_unexpected_content_type_error(
cls,
actual: Optional[str] = None,
expected: Optional[Union[str, List[str]]] = None,
):
content = "Unexpected Content-Type Error"
if actual is not None and expected is not None:
content += f" (actual: {actual} expected one in: {expected})"
elif actual is not None:
content += f" (actual: {actual})"
elif expected is not None:
content += f" (expected: {expected})"
instance = cls()
instance.code = -1
instance.content_type = "error"
instance.content = content
return instance
@classmethod
def create_unhandled_error(cls):
instance = cls()
instance.code = -1
instance.content_type = "error"
instance.content = "Unhandled Error"
return instance
@classmethod
def create_base_url_not_set_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Base URL not set."
return instance
@classmethod
def create_client_not_registered_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Client not registered."
return instance
@classmethod
def create_token_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Token not found."
return instance
@classmethod
def create_config_repo_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Config repository not found."
return instance
@classmethod
def create_token_repo_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Token repository not found."
return instance
@classmethod
def create_http_client_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "HTTP client not found."
return instance
@classmethod
def create_failed_to_resolve_security_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Failed to resolve security."
return instance
@classmethod
def try_create_undocumented_response(cls, code: int, content: Any):
if code not in HTTP_STATUS_CODES:
return False, None
content_type = "error"
if code == 204:
content_type = "no_content"
content = None
instance = cls()
instance.code = code
instance.content_type = content_type
instance.content = content
return True, instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"code": "code",
"content_type": "content_type",
"content": "content",
} | accelbyte_py_sdk/core/_http_response.py |
import urllib.parse
from typing import Any, Dict, List, Optional, Union
from ._http_status_codes import HTTP_STATUS_CODES
from ._model import Model
class HttpResponse(Model):
"""HTTP Error
Properties:
code: (code) OPTIONAL int
content_type: (content_type) OPTIONAL str
content: (content) OPTIONAL Any
"""
code: int
content_type: str
content: Any
def __str__(self):
return f"[{self.code}] {self.content_type}: {str(self.content)}"
def is_error(self) -> bool:
return self.content_type == "error"
def is_no_content(self) -> bool:
return self.content_type == "no_content"
def get_query_params(self) -> dict:
if self.content_type == "location":
return urllib.parse.parse_qs(urllib.parse.urlparse(self.content).query)
return {}
@classmethod
def create(cls, code: int, message: str):
instance = cls()
instance.code = code
instance.content_type = "message"
instance.content = message
return instance
@classmethod
def create_redirect(cls, code: int, location: str):
instance = cls()
instance.code = code
instance.content_type = "location"
instance.content = location
return instance
@classmethod
def create_error(cls, code: int, error: str):
instance = cls()
instance.code = code
instance.content_type = "error"
instance.content = error
return instance
@classmethod
def create_connection_error(cls):
instance = cls()
instance.code = 0
instance.content_type = "error"
instance.content = "Connection Error"
return instance
@classmethod
def create_undocumented_response(cls, code: int, content: Any):
if code not in HTTP_STATUS_CODES:
return None
content_type = "error"
if code == 200 and not content:
content_type = "no_content"
content = None
if code == 204:
content_type = "no_content"
content = None
instance = cls()
instance.code = code
instance.content_type = content_type
instance.content = content
return instance
@classmethod
def create_unexpected_content_type_error(
cls,
actual: Optional[str] = None,
expected: Optional[Union[str, List[str]]] = None,
):
content = "Unexpected Content-Type Error"
if actual is not None and expected is not None:
content += f" (actual: {actual} expected one in: {expected})"
elif actual is not None:
content += f" (actual: {actual})"
elif expected is not None:
content += f" (expected: {expected})"
instance = cls()
instance.code = -1
instance.content_type = "error"
instance.content = content
return instance
@classmethod
def create_unhandled_error(cls):
instance = cls()
instance.code = -1
instance.content_type = "error"
instance.content = "Unhandled Error"
return instance
@classmethod
def create_base_url_not_set_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Base URL not set."
return instance
@classmethod
def create_client_not_registered_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Client not registered."
return instance
@classmethod
def create_token_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Token not found."
return instance
@classmethod
def create_config_repo_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Config repository not found."
return instance
@classmethod
def create_token_repo_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Token repository not found."
return instance
@classmethod
def create_http_client_not_found_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "HTTP client not found."
return instance
@classmethod
def create_failed_to_resolve_security_error(cls):
instance = cls()
instance.code = 400
instance.content_type = "error"
instance.content = "Failed to resolve security."
return instance
@classmethod
def try_create_undocumented_response(cls, code: int, content: Any):
if code not in HTTP_STATUS_CODES:
return False, None
content_type = "error"
if code == 204:
content_type = "no_content"
content = None
instance = cls()
instance.code = code
instance.content_type = content_type
instance.content = content
return True, instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"code": "code",
"content_type": "content_type",
"content": "content",
} | 0.8288 | 0.160463 |
import requests
import json
import os
import time
from time import sleep
def dateTime():
return time.strftime("%Y/%m/%d %H:%M:%S")
# Read token and owner id from untracked credentials file.
# We wouldn't want this on Github now, would we?
f = open("creds.txt")
lines = f.readlines()
f.close
token = lines[0].strip()
ownerId = lines[1].strip()
print "Token and owner id loaded"
last_update = 0
url = 'https://api.telegram.org/bot%s/' % token
bot_name = "@unifr_mensabot"
pollCreator = "null"
participants_id = []
participants_name = []
hasConnection = True
while True:
try:
if (last_update == 0):
get_updates = json.loads(requests.post(url + 'getUpdates',
params=dict(timeout=20),
timeout=40).content)
else:
get_updates = json.loads(
requests.post(url + 'getUpdates',
params=dict(offset=last_update + 1,
timeout=20),
timeout=40).content)
if not hasConnection:
print dateTime() + " regained connection"
hasConnection = True
except:
if hasConnection:
print dateTime() + " lost connection"
hasConnection = False
get_updates['result'] = []
sleep(25)
for update in get_updates['result']:
hasMsg = False
try:
msg = update['message']['text']
hasMsg = True
except:
hasMsg = False
if hasMsg:
senderId = update['message']['from']['id']
senderName = update['message']['from']['first_name']
out = "Got message: " + update['message']['text']
out += " from " + senderName
reply = "null"
msg = msg.replace(bot_name, "")
if msg == "/newpoll":
if pollCreator == "null":
pollCreator = senderId
reply = "Who's also having lunch today?"
out = senderName + " started a poll"
else:
reply = "There's already a poll running"
out = senderName + " tried to start another poll"
elif msg == "/me":
if pollCreator != "null":
if pollCreator != senderId:
if senderId in participants_id:
out = senderName + " tried to add themselves again"
else:
participants_id.append(senderId)
participants_name.append(senderName)
out = senderName + " joins for lunch"
reply = "Got it"
else:
out = "The poll creator tried to add themselves"
reply = "No need for that, you know you're coming"
else:
out = senderName + " tried to add themselves - "
out += "no poll running"
elif msg == "/result":
if pollCreator != "null":
if pollCreator == senderId or senderId == int(ownerId):
if len(participants_name) > 2:
reply = ""
count = len(participants_name)
for i in range(count - 2):
reply += participants_name[i] + ", "
reply += participants_name[count - 2]
reply += " and " + participants_name[count - 1]
reply += " are joining you today.\n\n"
reply += "Be sure to save " + str(count)
reply += " more seats."
elif len(participants_name) == 2:
reply = participants_name[0] + " and "
reply += participants_name[1]
reply += " are joining you today.\n\n"
reply += "Be sure to save two more seats."
elif len(participants_name) == 1:
reply = participants_name[0]
reply += " is joining you today.\n\n"
reply += "Be sure to save one more seat."
else:
reply = "Looks like nobody is coming today"
pollCreator = "null"
participants_id = []
participants_name = []
out = senderName + " finished poll. Result:\n\n"
out += reply + "\n"
else:
out = senderName
out += " tried to finish a poll they didn't start"
reply = "Only the creator can finish a poll"
else:
out = senderName
out += " tried to finish a poll - there is none running"
reply = "There is no poll running"
print out
last_update = update['update_id']
if reply != "null":
requests.post(
url + 'sendMessage',
params=dict(chat_id=update['message']['chat']['id'],
text=reply)) | mensabot.py | import requests
import json
import os
import time
from time import sleep
def dateTime():
return time.strftime("%Y/%m/%d %H:%M:%S")
# Read token and owner id from untracked credentials file.
# We wouldn't want this on Github now, would we?
f = open("creds.txt")
lines = f.readlines()
f.close
token = lines[0].strip()
ownerId = lines[1].strip()
print "Token and owner id loaded"
last_update = 0
url = 'https://api.telegram.org/bot%s/' % token
bot_name = "@unifr_mensabot"
pollCreator = "null"
participants_id = []
participants_name = []
hasConnection = True
while True:
try:
if (last_update == 0):
get_updates = json.loads(requests.post(url + 'getUpdates',
params=dict(timeout=20),
timeout=40).content)
else:
get_updates = json.loads(
requests.post(url + 'getUpdates',
params=dict(offset=last_update + 1,
timeout=20),
timeout=40).content)
if not hasConnection:
print dateTime() + " regained connection"
hasConnection = True
except:
if hasConnection:
print dateTime() + " lost connection"
hasConnection = False
get_updates['result'] = []
sleep(25)
for update in get_updates['result']:
hasMsg = False
try:
msg = update['message']['text']
hasMsg = True
except:
hasMsg = False
if hasMsg:
senderId = update['message']['from']['id']
senderName = update['message']['from']['first_name']
out = "Got message: " + update['message']['text']
out += " from " + senderName
reply = "null"
msg = msg.replace(bot_name, "")
if msg == "/newpoll":
if pollCreator == "null":
pollCreator = senderId
reply = "Who's also having lunch today?"
out = senderName + " started a poll"
else:
reply = "There's already a poll running"
out = senderName + " tried to start another poll"
elif msg == "/me":
if pollCreator != "null":
if pollCreator != senderId:
if senderId in participants_id:
out = senderName + " tried to add themselves again"
else:
participants_id.append(senderId)
participants_name.append(senderName)
out = senderName + " joins for lunch"
reply = "Got it"
else:
out = "The poll creator tried to add themselves"
reply = "No need for that, you know you're coming"
else:
out = senderName + " tried to add themselves - "
out += "no poll running"
elif msg == "/result":
if pollCreator != "null":
if pollCreator == senderId or senderId == int(ownerId):
if len(participants_name) > 2:
reply = ""
count = len(participants_name)
for i in range(count - 2):
reply += participants_name[i] + ", "
reply += participants_name[count - 2]
reply += " and " + participants_name[count - 1]
reply += " are joining you today.\n\n"
reply += "Be sure to save " + str(count)
reply += " more seats."
elif len(participants_name) == 2:
reply = participants_name[0] + " and "
reply += participants_name[1]
reply += " are joining you today.\n\n"
reply += "Be sure to save two more seats."
elif len(participants_name) == 1:
reply = participants_name[0]
reply += " is joining you today.\n\n"
reply += "Be sure to save one more seat."
else:
reply = "Looks like nobody is coming today"
pollCreator = "null"
participants_id = []
participants_name = []
out = senderName + " finished poll. Result:\n\n"
out += reply + "\n"
else:
out = senderName
out += " tried to finish a poll they didn't start"
reply = "Only the creator can finish a poll"
else:
out = senderName
out += " tried to finish a poll - there is none running"
reply = "There is no poll running"
print out
last_update = update['update_id']
if reply != "null":
requests.post(
url + 'sendMessage',
params=dict(chat_id=update['message']['chat']['id'],
text=reply)) | 0.049854 | 0.062588 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import random
import re
import threading
import librosa
import pretty_midi
import numpy as np
import tensorflow as tf
sys.path.append(os.path.join(os.getcwd(), os.pardir))
from utils import find_files, roll_encode, roll_decode, get_roll_index
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def load_piece_data(directory,
audio_sr,
velocity,
fac,
valid=True):
'''Loader that reads tune from directory and yields audio
waveform and encoded piano roll as tuple of 3 arrays:
(W, T, I). If more audio files represent single midi file,
one is chosen randomly.
'''
midi_files = find_files(directory, '*.mid')
randomized_midi_files = randomize_files(midi_files)
for midi_filename in randomized_midi_files if valid else midi_files:
# load piano roll from midi file
proll = pretty_midi.PrettyMIDI(
midi_filename).get_piano_roll(fs=int(audio_sr/fac))
proll /= 127 # velocity to <0;1>
if not velocity:
proll[proll > 0] = 1
# encode piano roll
table, indices = roll_encode(proll, fac)
# add 0-roll if not present (we will need it later for padding)
if get_roll_index(table, np.zeros(128)).shape[0] == 0:
table = np.concatenate((table, np.zeros(shape=(1, 128))))
# get respective audio file names and choose 1 randomly
base = midi_filename.rsplit('/', 1)[-1]
base = re.sub(r'(.*)%s$' % re.escape('.mid'), r'\1', base)
audio_files = find_files(directory, base+'*.wav')
if not audio_files:
raise ValueError('No files found for \'{}\'.'.format(base+'*.wav'))
audio_filename = random.choice(audio_files)
# load audio waveform
audio, _ = librosa.load(audio_filename, sr=audio_sr, mono=True)
yield audio, table, indices
def sequence_samples(audio, table, indices, reader):
'''Generator that yields batch samples as a tuple
of numpy arrays (wave, roll) with shapes:
wave.shape = (sample_size + receptive_field - 1, 1)
roll.shape = (sample_size, 128)
where sample_size is length of slice to which piece
is cut. Last slice of a tune may have shape with
length < sample_size.
'''
left = np.ceil((reader.receptive_field - 1) / 2).astype(int)
right = np.floor((reader.receptive_field - 1) / 2).astype(int)
# Ensure len(audio) == len(indices)
if (audio.shape[0] < indices.shape[0]):
# Cut piano roll down to length of audio sequence
indices = indices[:audio.shape[0]]
else:
# Pad piano roll up to length of audio sequence, since this is
# usually longer due to sustain of last notes
indices = np.pad(indices,
[0, audio.shape[0] - indices.shape[0]],
'constant',
constant_values=get_roll_index(table, np.zeros(128))[0])
# Pad audio sequence from left and right to provide context
# to each estimate, receptive field is therefore centered
# to time sample being calculated
audio = np.pad(audio,
[left, right],
'constant').reshape(-1, 1)
if reader.sample_size:
# Cut tune into sequences of size sample_size +
# receptive_field - 1 with overlap = receptive_field - 1
while len(audio) > reader.receptive_field:
wave = audio[:(left + reader.sample_size + right), :]
roll = roll_decode(table, indices[:reader.sample_size])
yield wave, roll
audio = audio[reader.sample_size:, :]
indices = indices[reader.sample_size:]
else:
yield audio, roll_decode(table, indices)
class WavMidReader(object):
'''Generic background music data reader that preprocesses audio files
and enqueues them into a TensorFlow queue.
'''
def __init__(self,
data_dir,
coord,
audio_sample_rate,
receptive_field,
velocity,
sample_size,
queues_size,
compress_fac=10):
self.data_dir = data_dir
self.audio_sample_rate = audio_sample_rate
self.compress_fac = compress_fac
self.coord = coord
self.receptive_field = receptive_field
self.velocity = velocity
self.sample_size = sample_size
self.threads = []
# Init queues and placeholders.
self.queues = {'tune': {}, 'batch': {}}
self.audio_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,))
self.table_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,
128))
self.indices_placeholder = tf.placeholder(dtype=tf.int32, shape=(None,))
self.queues['tune']['Q'] = tf.FIFOQueue(queues_size[0],
['float32', 'float32', 'int32'])
self.queues['tune']['enQ'] = self.queues['tune']['Q'].enqueue(
[self.audio_placeholder,
self.table_placeholder,
self.indices_placeholder])
self.wave_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, 1))
self.roll_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, 128))
self.queues['batch']['Q'] = tf.PaddingFIFOQueue(
queues_size[1], ['float32', 'float32'],
shapes=[(None, 1), (None, 128)])
self.queues['batch']['enQ'] = self.queues['batch']['Q'].enqueue(
[self.wave_placeholder, self.roll_placeholder])
self.file_counter = tf.Variable(0, trainable=True)
self.increment_file_counter = tf.assign(
self.file_counter, self.file_counter+1)
files = find_files(data_dir, '*.mid')
if not files:
raise ValueError('No midi files found in \'{}\'.'.format(data_dir))
def dequeue(self, num_elements):
output = self.queues['batch']['Q'].dequeue_many(num_elements)
return output
def thread_loader(self, sess):
stop = False
# Count tune data files
n_midi_files = len(find_files(self.data_dir, '*.mid'))
if n_midi_files == 0:
raise ValueError('No files found for \'{}\'.'.format(
directory+'/*.mid'))
one_percent = int(np.ceil(n_midi_files/100))
print('files length: {}'.format(n_midi_files))
# Go through the dataset repeatedly until stopped
while not stop:
# Randomly iterate over files and fetch tune data
file_iterator = load_piece_data(self.data_dir,
self.audio_sample_rate,
self.velocity,
self.compress_fac)
for audio, table, indices in file_iterator:
sess.run(self.queues['tune']['enQ'],
feed_dict={self.audio_placeholder: audio,
self.table_placeholder: table,
self.indices_placeholder: indices})
# Track and report progress
sess.run(self.increment_file_counter)
file_counter = sess.run(self.file_counter)
if file_counter % one_percent == 0:
print('Training progress: {:.02f} epochs '
'(file {} of {})'.format(file_counter/n_midi_files,
file_counter, n_midi_files))
if self.coord.should_stop():
stop = True
break
def thread_generator(self, sess):
stop = False
# Go through the dataset repeatedly until stopped
while not stop:
# Dequeue tune data
audio, table, indices = sess.run(self.queues['tune']['Q'].dequeue())
# Fetch samples from the tune
sample_iterator = sequence_samples(audio, table, indices, self)
for wave, roll in sample_iterator:
sess.run(self.queues['batch']['enQ'],
feed_dict={self.wave_placeholder: wave,
self.roll_placeholder: roll})
if self.coord.should_stop():
stop = True
break
def single_pass(self, sess, data_dir):
for audio, table, indices in load_piece_data(data_dir,
self.audio_sample_rate,
self.velocity,
self.compress_fac,
valid=False):
if self.coord.should_stop():
break
for wave, roll in sequence_samples(audio,
table,
indices,
self):
if self.coord.should_stop():
break
wave = np.expand_dims(wave, axis=0)
yield wave, roll
def start_threads(self, sess, n_threads=1):
def _add_daemon_thread(reader, thread_func, sess):
thread = threading.Thread(target=thread_func, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
reader.threads.append(thread)
# Single loader will suffice to possibly multiple generators
_add_daemon_thread(self, self.thread_loader, sess)
for _ in range(n_threads):
_add_daemon_thread(self, self.thread_generator, sess)
for thread in self.threads:
thread.start()
return self.threads | readers/wavmid_reader.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import random
import re
import threading
import librosa
import pretty_midi
import numpy as np
import tensorflow as tf
sys.path.append(os.path.join(os.getcwd(), os.pardir))
from utils import find_files, roll_encode, roll_decode, get_roll_index
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def load_piece_data(directory,
audio_sr,
velocity,
fac,
valid=True):
'''Loader that reads tune from directory and yields audio
waveform and encoded piano roll as tuple of 3 arrays:
(W, T, I). If more audio files represent single midi file,
one is chosen randomly.
'''
midi_files = find_files(directory, '*.mid')
randomized_midi_files = randomize_files(midi_files)
for midi_filename in randomized_midi_files if valid else midi_files:
# load piano roll from midi file
proll = pretty_midi.PrettyMIDI(
midi_filename).get_piano_roll(fs=int(audio_sr/fac))
proll /= 127 # velocity to <0;1>
if not velocity:
proll[proll > 0] = 1
# encode piano roll
table, indices = roll_encode(proll, fac)
# add 0-roll if not present (we will need it later for padding)
if get_roll_index(table, np.zeros(128)).shape[0] == 0:
table = np.concatenate((table, np.zeros(shape=(1, 128))))
# get respective audio file names and choose 1 randomly
base = midi_filename.rsplit('/', 1)[-1]
base = re.sub(r'(.*)%s$' % re.escape('.mid'), r'\1', base)
audio_files = find_files(directory, base+'*.wav')
if not audio_files:
raise ValueError('No files found for \'{}\'.'.format(base+'*.wav'))
audio_filename = random.choice(audio_files)
# load audio waveform
audio, _ = librosa.load(audio_filename, sr=audio_sr, mono=True)
yield audio, table, indices
def sequence_samples(audio, table, indices, reader):
'''Generator that yields batch samples as a tuple
of numpy arrays (wave, roll) with shapes:
wave.shape = (sample_size + receptive_field - 1, 1)
roll.shape = (sample_size, 128)
where sample_size is length of slice to which piece
is cut. Last slice of a tune may have shape with
length < sample_size.
'''
left = np.ceil((reader.receptive_field - 1) / 2).astype(int)
right = np.floor((reader.receptive_field - 1) / 2).astype(int)
# Ensure len(audio) == len(indices)
if (audio.shape[0] < indices.shape[0]):
# Cut piano roll down to length of audio sequence
indices = indices[:audio.shape[0]]
else:
# Pad piano roll up to length of audio sequence, since this is
# usually longer due to sustain of last notes
indices = np.pad(indices,
[0, audio.shape[0] - indices.shape[0]],
'constant',
constant_values=get_roll_index(table, np.zeros(128))[0])
# Pad audio sequence from left and right to provide context
# to each estimate, receptive field is therefore centered
# to time sample being calculated
audio = np.pad(audio,
[left, right],
'constant').reshape(-1, 1)
if reader.sample_size:
# Cut tune into sequences of size sample_size +
# receptive_field - 1 with overlap = receptive_field - 1
while len(audio) > reader.receptive_field:
wave = audio[:(left + reader.sample_size + right), :]
roll = roll_decode(table, indices[:reader.sample_size])
yield wave, roll
audio = audio[reader.sample_size:, :]
indices = indices[reader.sample_size:]
else:
yield audio, roll_decode(table, indices)
class WavMidReader(object):
'''Generic background music data reader that preprocesses audio files
and enqueues them into a TensorFlow queue.
'''
def __init__(self,
data_dir,
coord,
audio_sample_rate,
receptive_field,
velocity,
sample_size,
queues_size,
compress_fac=10):
self.data_dir = data_dir
self.audio_sample_rate = audio_sample_rate
self.compress_fac = compress_fac
self.coord = coord
self.receptive_field = receptive_field
self.velocity = velocity
self.sample_size = sample_size
self.threads = []
# Init queues and placeholders.
self.queues = {'tune': {}, 'batch': {}}
self.audio_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,))
self.table_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,
128))
self.indices_placeholder = tf.placeholder(dtype=tf.int32, shape=(None,))
self.queues['tune']['Q'] = tf.FIFOQueue(queues_size[0],
['float32', 'float32', 'int32'])
self.queues['tune']['enQ'] = self.queues['tune']['Q'].enqueue(
[self.audio_placeholder,
self.table_placeholder,
self.indices_placeholder])
self.wave_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, 1))
self.roll_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, 128))
self.queues['batch']['Q'] = tf.PaddingFIFOQueue(
queues_size[1], ['float32', 'float32'],
shapes=[(None, 1), (None, 128)])
self.queues['batch']['enQ'] = self.queues['batch']['Q'].enqueue(
[self.wave_placeholder, self.roll_placeholder])
self.file_counter = tf.Variable(0, trainable=True)
self.increment_file_counter = tf.assign(
self.file_counter, self.file_counter+1)
files = find_files(data_dir, '*.mid')
if not files:
raise ValueError('No midi files found in \'{}\'.'.format(data_dir))
def dequeue(self, num_elements):
output = self.queues['batch']['Q'].dequeue_many(num_elements)
return output
def thread_loader(self, sess):
stop = False
# Count tune data files
n_midi_files = len(find_files(self.data_dir, '*.mid'))
if n_midi_files == 0:
raise ValueError('No files found for \'{}\'.'.format(
directory+'/*.mid'))
one_percent = int(np.ceil(n_midi_files/100))
print('files length: {}'.format(n_midi_files))
# Go through the dataset repeatedly until stopped
while not stop:
# Randomly iterate over files and fetch tune data
file_iterator = load_piece_data(self.data_dir,
self.audio_sample_rate,
self.velocity,
self.compress_fac)
for audio, table, indices in file_iterator:
sess.run(self.queues['tune']['enQ'],
feed_dict={self.audio_placeholder: audio,
self.table_placeholder: table,
self.indices_placeholder: indices})
# Track and report progress
sess.run(self.increment_file_counter)
file_counter = sess.run(self.file_counter)
if file_counter % one_percent == 0:
print('Training progress: {:.02f} epochs '
'(file {} of {})'.format(file_counter/n_midi_files,
file_counter, n_midi_files))
if self.coord.should_stop():
stop = True
break
def thread_generator(self, sess):
stop = False
# Go through the dataset repeatedly until stopped
while not stop:
# Dequeue tune data
audio, table, indices = sess.run(self.queues['tune']['Q'].dequeue())
# Fetch samples from the tune
sample_iterator = sequence_samples(audio, table, indices, self)
for wave, roll in sample_iterator:
sess.run(self.queues['batch']['enQ'],
feed_dict={self.wave_placeholder: wave,
self.roll_placeholder: roll})
if self.coord.should_stop():
stop = True
break
def single_pass(self, sess, data_dir):
for audio, table, indices in load_piece_data(data_dir,
self.audio_sample_rate,
self.velocity,
self.compress_fac,
valid=False):
if self.coord.should_stop():
break
for wave, roll in sequence_samples(audio,
table,
indices,
self):
if self.coord.should_stop():
break
wave = np.expand_dims(wave, axis=0)
yield wave, roll
def start_threads(self, sess, n_threads=1):
def _add_daemon_thread(reader, thread_func, sess):
thread = threading.Thread(target=thread_func, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
reader.threads.append(thread)
# Single loader will suffice to possibly multiple generators
_add_daemon_thread(self, self.thread_loader, sess)
for _ in range(n_threads):
_add_daemon_thread(self, self.thread_generator, sess)
for thread in self.threads:
thread.start()
return self.threads | 0.525369 | 0.188866 |
import numpy as np
_EXTREMUM_SEARCH_NUM_POINTS = 5
_EXTREMUM_SEARCH_EPSILON = 0.5 / 86400. # A half-second fraction of a Julian day
def find_extremum(t_0, t_1, extremum, function, epsilon=_EXTREMUM_SEARCH_EPSILON,
num=_EXTREMUM_SEARCH_NUM_POINTS):
"""Find a global extremum for a function with a domain of skyfield.api.Time
for values between t_0 and t_1.
Assumes well-behaved functions with a global extremum and no other
local extrema.
Args:
t_0: Start time for period to be searched.
t_1: End time for period to be searched.
extremum: function that computes an extremum.
function: a function that takes time as its single argument, returning
a numeric value.
epsilon: a float defining the distance less than which two times will
be treated as equal.
Returns:
Two values, the first being the time of the extremum, and the second
being the value of the extremum.
"""
timescale, jd_0, jd_1 = t_0.ts, t_0.tt, t_1.tt
while jd_1 - jd_0 > epsilon:
date = np.linspace(jd_0, jd_1, 5)
time = timescale.tt(jd=date)
i = extremum(function(time))
jd_0, jd_1 = date[np.max([0, i-1])], date[np.min([i+1, num-1])]
return timescale.tt(jd=jd_0), function(timescale.tt(jd=jd_0))
def find_minimum(t_0, t_1, function, epsilon=_EXTREMUM_SEARCH_EPSILON,
num=_EXTREMUM_SEARCH_NUM_POINTS):
"""Find a global minimum for a function with a domain of skyfield.api.Time
for values between t_0 and t_1.
Args:
t_0: Start time for period to be searched.
t_1: End time for period to be searched.
function: a function that takes time as its single argument, returning
a numeric value.
epsilon: a float defining the distance less than which two times will
be treated as equal.
Returns:
Two values, the first being the time of the minimum, and the second
being the value of the minimum.
"""
return find_extremum(t_0, t_1, np.argmin, function, epsilon, num)
def find_maximum(t_0, t_1, function, epsilon=_EXTREMUM_SEARCH_EPSILON,
num=_EXTREMUM_SEARCH_NUM_POINTS):
"""Find a global maximum for a function with a domain of skyfield.api.Time
for values between t_0 and t_1.
Args:
t_0: Start time for period to be searched.
t_1: End time for period to be searched.
function: a function that takes time as its single argument, returning
a numeric value.
epsilon: a float defining the distance less than which two times will
be treated as equal.
Returns:
Two values, the first being the time of the maximum, and the second
being the value of the maximum.
"""
return find_extremum(t_0, t_1, np.argmax, function, epsilon, num) | extremum.py | import numpy as np
_EXTREMUM_SEARCH_NUM_POINTS = 5
_EXTREMUM_SEARCH_EPSILON = 0.5 / 86400. # A half-second fraction of a Julian day
def find_extremum(t_0, t_1, extremum, function, epsilon=_EXTREMUM_SEARCH_EPSILON,
num=_EXTREMUM_SEARCH_NUM_POINTS):
"""Find a global extremum for a function with a domain of skyfield.api.Time
for values between t_0 and t_1.
Assumes well-behaved functions with a global extremum and no other
local extrema.
Args:
t_0: Start time for period to be searched.
t_1: End time for period to be searched.
extremum: function that computes an extremum.
function: a function that takes time as its single argument, returning
a numeric value.
epsilon: a float defining the distance less than which two times will
be treated as equal.
Returns:
Two values, the first being the time of the extremum, and the second
being the value of the extremum.
"""
timescale, jd_0, jd_1 = t_0.ts, t_0.tt, t_1.tt
while jd_1 - jd_0 > epsilon:
date = np.linspace(jd_0, jd_1, 5)
time = timescale.tt(jd=date)
i = extremum(function(time))
jd_0, jd_1 = date[np.max([0, i-1])], date[np.min([i+1, num-1])]
return timescale.tt(jd=jd_0), function(timescale.tt(jd=jd_0))
def find_minimum(t_0, t_1, function, epsilon=_EXTREMUM_SEARCH_EPSILON,
num=_EXTREMUM_SEARCH_NUM_POINTS):
"""Find a global minimum for a function with a domain of skyfield.api.Time
for values between t_0 and t_1.
Args:
t_0: Start time for period to be searched.
t_1: End time for period to be searched.
function: a function that takes time as its single argument, returning
a numeric value.
epsilon: a float defining the distance less than which two times will
be treated as equal.
Returns:
Two values, the first being the time of the minimum, and the second
being the value of the minimum.
"""
return find_extremum(t_0, t_1, np.argmin, function, epsilon, num)
def find_maximum(t_0, t_1, function, epsilon=_EXTREMUM_SEARCH_EPSILON,
num=_EXTREMUM_SEARCH_NUM_POINTS):
"""Find a global maximum for a function with a domain of skyfield.api.Time
for values between t_0 and t_1.
Args:
t_0: Start time for period to be searched.
t_1: End time for period to be searched.
function: a function that takes time as its single argument, returning
a numeric value.
epsilon: a float defining the distance less than which two times will
be treated as equal.
Returns:
Two values, the first being the time of the maximum, and the second
being the value of the maximum.
"""
return find_extremum(t_0, t_1, np.argmax, function, epsilon, num) | 0.92241 | 0.49646 |
from unittest import TestCase
from link_parser import LinkParser
valid_url = LinkParser.valid_url
normalize_url = LinkParser.normalize_url
extract_name = LinkParser.extract_name
class Tests(TestCase):
def test_valid_url(self):
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Main_Page"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Weather"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Blu-ray"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Family_(biology)"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/BAFTA_Academy_Fellowship_Award#cite_note-off-6"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/A.S._Fortis_Trani"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Janet.#mw-head"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Ender%27s_Game"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/ISO_3166-2:BR"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/List_of_record_labels:_I%E2%80%93Q"))
self.assertFalse(valid_url("https://commons.wikimedia.org/wiki/Main_Page"))
self.assertFalse(
valid_url("https://simple.wikipedia.org/w/index.php?title=Special:UserLogin&returnto=Main+Page"))
self.assertFalse(valid_url("http://en.wikiversity.org/?uselang=mk"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/Special:RecentChangesLinked/Summer"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/File:Science-symbol-2.svg"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/Category:All_articles_with_dead_external_links"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/Wikipedia:Simple_start"))
def test_normalize_url(self):
self.assertEqual(normalize_url("https://simple.wikipedia.org/wiki/Main_Page"),
"https://simple.wikipedia.org/wiki/Main_Page")
self.assertEqual(normalize_url("https://simple.wikipedia.org/wiki/Main_Page/Something"),
"https://simple.wikipedia.org/wiki/Main_Page")
self.assertEqual(normalize_url("https://simple.wikipedia.org/wiki/Janet.#mw-head"),
"https://simple.wikipedia.org/wiki/Janet.")
def test_extract_name(self):
self.assertEqual(extract_name("https://simple.wikipedia.org/wiki/Main_Page"), "Main_Page") | 06_page_rank/tests.py | from unittest import TestCase
from link_parser import LinkParser
valid_url = LinkParser.valid_url
normalize_url = LinkParser.normalize_url
extract_name = LinkParser.extract_name
class Tests(TestCase):
def test_valid_url(self):
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Main_Page"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Weather"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Blu-ray"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Family_(biology)"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/BAFTA_Academy_Fellowship_Award#cite_note-off-6"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/A.S._Fortis_Trani"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Janet.#mw-head"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/Ender%27s_Game"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/ISO_3166-2:BR"))
self.assertTrue(valid_url("https://simple.wikipedia.org/wiki/List_of_record_labels:_I%E2%80%93Q"))
self.assertFalse(valid_url("https://commons.wikimedia.org/wiki/Main_Page"))
self.assertFalse(
valid_url("https://simple.wikipedia.org/w/index.php?title=Special:UserLogin&returnto=Main+Page"))
self.assertFalse(valid_url("http://en.wikiversity.org/?uselang=mk"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/Special:RecentChangesLinked/Summer"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/File:Science-symbol-2.svg"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/Category:All_articles_with_dead_external_links"))
self.assertFalse(valid_url("https://simple.wikipedia.org/wiki/Wikipedia:Simple_start"))
def test_normalize_url(self):
self.assertEqual(normalize_url("https://simple.wikipedia.org/wiki/Main_Page"),
"https://simple.wikipedia.org/wiki/Main_Page")
self.assertEqual(normalize_url("https://simple.wikipedia.org/wiki/Main_Page/Something"),
"https://simple.wikipedia.org/wiki/Main_Page")
self.assertEqual(normalize_url("https://simple.wikipedia.org/wiki/Janet.#mw-head"),
"https://simple.wikipedia.org/wiki/Janet.")
def test_extract_name(self):
self.assertEqual(extract_name("https://simple.wikipedia.org/wiki/Main_Page"), "Main_Page") | 0.658966 | 0.737205 |
import json
""" Module for Losant API DataTableRows wrapper class """
# pylint: disable=C0301
class DataTableRows(object):
""" Class containing all the actions for the Data Table Rows Resource """
def __init__(self, client):
self.client = client
def delete(self, **kwargs):
"""
Delete rows from a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, dataTableRows.*, or dataTableRows.delete.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {hash} query - Query to apply to filter the data table (https://api.losant.com/#/definitions/advancedQuery)
* {string} limit - Limit number of rows to delete from data table
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If request successfully deletes a set of Data Table rows (https://api.losant.com/#/definitions/dataTableRowsDelete)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "query" in kwargs:
body = kwargs["query"]
if "limit" in kwargs:
query_params["limit"] = kwargs["limit"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/delete".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def export(self, **kwargs):
"""
Request an export of the data table's data
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.read, dataTableRows.*, or dataTableRows.export.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {hash} exportData - Object containing export specifications (https://api.losant.com/#/definitions/dataTableRowsExport)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If request was successfully queued (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "exportData" in kwargs:
body = kwargs["exportData"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/export".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def get(self, **kwargs):
"""
Returns the rows for a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.cli, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.cli, all.User.read, dataTableRows.*, or dataTableRows.get.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {string} sortColumn - Column to sort the rows by
* {string} sortDirection - Direction to sort the rows by. Accepted values are: asc, desc
* {string} limit - How many rows to return
* {string} offset - How many rows to skip
* {string} includeFields - Comma-separated list of fields to include in resulting rows. When not provided, returns all fields.
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - Collection of data table rows (https://api.losant.com/#/definitions/dataTableRows)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "sortColumn" in kwargs:
query_params["sortColumn"] = kwargs["sortColumn"]
if "sortDirection" in kwargs:
query_params["sortDirection"] = kwargs["sortDirection"]
if "limit" in kwargs:
query_params["limit"] = kwargs["limit"]
if "offset" in kwargs:
query_params["offset"] = kwargs["offset"]
if "includeFields" in kwargs:
query_params["includeFields"] = kwargs["includeFields"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows".format(**path_params)
return self.client.request("GET", path, params=query_params, headers=headers, body=body)
def post(self, **kwargs):
"""
Inserts a new row(s) into a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, dataTableRows.*, or dataTableRows.post.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {hash} dataTableRow - The row(s) to insert (https://api.losant.com/#/definitions/dataTableRowInsert)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 201 - Successfully created data table row, or bulk insert count (https://api.losant.com/#/definitions/dataTableRowInsertResult)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "dataTableRow" in kwargs:
body = kwargs["dataTableRow"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def query(self, **kwargs):
"""
Queries for rows from a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.read, dataTableRows.*, or dataTableRows.query.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {string} sortColumn - Column to sort the rows by
* {string} sortDirection - Direction to sort the rows by. Accepted values are: asc, desc
* {string} limit - How many rows to return
* {string} offset - How many rows to skip
* {string} includeFields - Comma-separated list of fields to include in resulting rows. When not provided, returns all fields.
* {hash} query - Query to apply to filter the data table (https://api.losant.com/#/definitions/advancedQuery)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - Collection of data table rows (https://api.losant.com/#/definitions/dataTableRows)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "sortColumn" in kwargs:
query_params["sortColumn"] = kwargs["sortColumn"]
if "sortDirection" in kwargs:
query_params["sortDirection"] = kwargs["sortDirection"]
if "limit" in kwargs:
query_params["limit"] = kwargs["limit"]
if "offset" in kwargs:
query_params["offset"] = kwargs["offset"]
if "includeFields" in kwargs:
query_params["includeFields"] = kwargs["includeFields"]
if "query" in kwargs:
body = kwargs["query"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/query".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def truncate(self, **kwargs):
"""
Delete all data in the data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, dataTableRows.*, or dataTableRows.truncate.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If request successfully deleted **all** rows in the data table, this will **not** send workflow data table deletion triggers (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/truncate".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body) | losantrest/data_table_rows.py | import json
""" Module for Losant API DataTableRows wrapper class """
# pylint: disable=C0301
class DataTableRows(object):
""" Class containing all the actions for the Data Table Rows Resource """
def __init__(self, client):
self.client = client
def delete(self, **kwargs):
"""
Delete rows from a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, dataTableRows.*, or dataTableRows.delete.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {hash} query - Query to apply to filter the data table (https://api.losant.com/#/definitions/advancedQuery)
* {string} limit - Limit number of rows to delete from data table
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If request successfully deletes a set of Data Table rows (https://api.losant.com/#/definitions/dataTableRowsDelete)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "query" in kwargs:
body = kwargs["query"]
if "limit" in kwargs:
query_params["limit"] = kwargs["limit"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/delete".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def export(self, **kwargs):
"""
Request an export of the data table's data
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.read, dataTableRows.*, or dataTableRows.export.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {hash} exportData - Object containing export specifications (https://api.losant.com/#/definitions/dataTableRowsExport)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If request was successfully queued (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "exportData" in kwargs:
body = kwargs["exportData"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/export".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def get(self, **kwargs):
"""
Returns the rows for a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.cli, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.cli, all.User.read, dataTableRows.*, or dataTableRows.get.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {string} sortColumn - Column to sort the rows by
* {string} sortDirection - Direction to sort the rows by. Accepted values are: asc, desc
* {string} limit - How many rows to return
* {string} offset - How many rows to skip
* {string} includeFields - Comma-separated list of fields to include in resulting rows. When not provided, returns all fields.
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - Collection of data table rows (https://api.losant.com/#/definitions/dataTableRows)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "sortColumn" in kwargs:
query_params["sortColumn"] = kwargs["sortColumn"]
if "sortDirection" in kwargs:
query_params["sortDirection"] = kwargs["sortDirection"]
if "limit" in kwargs:
query_params["limit"] = kwargs["limit"]
if "offset" in kwargs:
query_params["offset"] = kwargs["offset"]
if "includeFields" in kwargs:
query_params["includeFields"] = kwargs["includeFields"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows".format(**path_params)
return self.client.request("GET", path, params=query_params, headers=headers, body=body)
def post(self, **kwargs):
"""
Inserts a new row(s) into a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, dataTableRows.*, or dataTableRows.post.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {hash} dataTableRow - The row(s) to insert (https://api.losant.com/#/definitions/dataTableRowInsert)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 201 - Successfully created data table row, or bulk insert count (https://api.losant.com/#/definitions/dataTableRowInsertResult)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "dataTableRow" in kwargs:
body = kwargs["dataTableRow"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def query(self, **kwargs):
"""
Queries for rows from a data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.read, dataTableRows.*, or dataTableRows.query.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {string} sortColumn - Column to sort the rows by
* {string} sortDirection - Direction to sort the rows by. Accepted values are: asc, desc
* {string} limit - How many rows to return
* {string} offset - How many rows to skip
* {string} includeFields - Comma-separated list of fields to include in resulting rows. When not provided, returns all fields.
* {hash} query - Query to apply to filter the data table (https://api.losant.com/#/definitions/advancedQuery)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - Collection of data table rows (https://api.losant.com/#/definitions/dataTableRows)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "sortColumn" in kwargs:
query_params["sortColumn"] = kwargs["sortColumn"]
if "sortDirection" in kwargs:
query_params["sortDirection"] = kwargs["sortDirection"]
if "limit" in kwargs:
query_params["limit"] = kwargs["limit"]
if "offset" in kwargs:
query_params["offset"] = kwargs["offset"]
if "includeFields" in kwargs:
query_params["includeFields"] = kwargs["includeFields"]
if "query" in kwargs:
body = kwargs["query"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/query".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def truncate(self, **kwargs):
"""
Delete all data in the data table
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, dataTableRows.*, or dataTableRows.truncate.
Parameters:
* {string} applicationId - ID associated with the application
* {string} dataTableId - ID associated with the data table
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If request successfully deleted **all** rows in the data table, this will **not** send workflow data table deletion triggers (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if data table was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "dataTableId" in kwargs:
path_params["dataTableId"] = kwargs["dataTableId"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data-tables/{dataTableId}/rows/truncate".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body) | 0.771456 | 0.285339 |
import os
import yaml
import string
import re
import wx
import wx.adv
from datetime import date, timedelta
from cash_flow.transaction import Transaction
from cash_flow.transaction_store import TransactionStore
from cash_flow.cash_flow import CashFlow
def wxDate2pyDate(wxdate):
return date(wxdate.GetYear(), wxdate.GetMonth()+1, wxdate.GetDay())
def pyDate2wxDate(pyDate):
return wx.DateTime(pyDate.day, pyDate.month-1, pyDate.year)
class AppSettings():
def __init__(self, startDate=None, startBalance=None, warning=None, dataFile=None):
if startDate is None:
startDate = date.today()
self.startDate = startDate
if startBalance is None:
startBalance = '0.00'
self.startBalance = startBalance
if warning is None:
warning = 100.00
self.warning = warning
if dataFile is None:
dataFile = ""
self.dataFile = dataFile
class CashFlowDisplay(wx.Panel):
def __init__(self, parent, ts, settings):
super().__init__(parent)
self.ts = ts
self.settings = settings
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
# Controls at top
self.control_sizer = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label='Starting Date')
self.control_sizer.Add(label, 0)
self.date_picker = wx.adv.DatePickerCtrl(self)
wxDate = pyDate2wxDate(self.settings.startDate)
self.date_picker.SetValue(wxDate)
self.date_picker.Bind(wx.adv.EVT_DATE_CHANGED, self.handleSettingsChange)
self.control_sizer.Add(self.date_picker, 0)
label = wx.StaticText(self, label="Starting Balance $")
self.control_sizer.Add(label, 0)
self.starting_balance = wx.TextCtrl(self, value=self.settings.startBalance)
self.starting_balance.Bind(wx.EVT_TEXT, self.handleSettingsChange)
self.control_sizer.Add(self.starting_balance, 0)
self.main_sizer.Add(self.control_sizer, 0)
# List of transactions
self.list_sizer = wx.BoxSizer(wx.VERTICAL)
self.main_sizer.Add(self.list_sizer, 0, wx.EXPAND)
self.SetSizer(self.main_sizer)
self.updateList()
def handleSettingsChange(self, event):
self.updateList()
self.updateSettings()
def updateList(self):
start_date = wxDate2pyDate(self.date_picker.GetValue())
starting_balance = self.starting_balance.GetValue()
allow = string.digits + "."
starting_balance = re.sub('[^%s]' % allow, '', starting_balance)
cf = CashFlow(start_date, starting_balance, self.ts)
day = cf.getTodaysTransactions()
self.list_sizer.Clear(delete_windows=True)
listCtrl = wx.ListCtrl(self, style=wx.LC_REPORT)
listCtrl.InsertColumn(0, "Date")
listCtrl.InsertColumn(1, "Balance")
listCtrl.InsertColumn(2, "Transaction")
listCtrl.InsertColumn(3, "Amount")
listCtrl.SetColumnWidth(0, 100)
listCtrl.SetColumnWidth(1, 100)
listCtrl.SetColumnWidth(2, 200)
listCtrl.SetColumnWidth(3, 75)
for i in range(0, 365):
(d, bal, t_list) = next(day)
if t_list:
# Add daily summary
index = listCtrl.InsertItem(listCtrl.GetItemCount(), str(d))
listCtrl.SetItem(index, 1, str(bal))
if bal < self.settings.warning:
listCtrl.SetItemBackgroundColour(index, wx.Colour(255, 255, 0))
if bal < 0:
listCtrl.SetItemBackgroundColour(index, wx.Colour(255, 0, 0))
# Add individual transactions
for t in t_list:
index = listCtrl.InsertItem(listCtrl.GetItemCount(), "")
listCtrl.SetItem(index, 2, str(t.description))
listCtrl.SetItem(index, 3, str(t.amount))
# label = f'{d} {t.description} {t.amount} {bal}'
# txt = wx.StaticText(self, label=label)
# self.list_sizer.Add(txt,0)
self.list_sizer.Add(listCtrl, 0, wx.EXPAND)
self.main_sizer.Layout()
def updateSettings(self):
self.settings.startDate = wxDate2pyDate(self.date_picker.GetValue())
self.settings.startBalance = self.starting_balance.GetValue()
# TODO: set warning once control is exposed
def loadSettings(self):
wxDate = pyDate2wxDate(self.settings.startDate)
self.date_picker.SetValue(wxDate)
self.starting_balance.SetValue(self.settings.startBalance)
# TODO: set warning once control is exposed
class TransactionManagement(wx.Panel):
def __init__(self, parent, ts, settings):
super().__init__(parent)
self.ts = ts
self.settings = settings
self.editPane1 = None
self.transaction_buttons = {}
self.main_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.left_side_sizer = wx.BoxSizer(wx.VERTICAL)
self.t_list_sizer = wx.BoxSizer(wx.VERTICAL)
self.left_side_sizer.Add(self.t_list_sizer, 0)
btn = wx.Button(self, label='New Transaction')
btn.Bind(wx.EVT_BUTTON, self.newTransaction)
self.left_side_sizer.Add(btn, 0)
self.main_sizer.Add(self.left_side_sizer, 0)
self.SetSizer(self.main_sizer)
def redraw(self):
self.clearEditPane()
self.rebuildTransactionButtons()
def loadSettings(self):
pass
def clearEditPane(self):
if self.main_sizer.GetItemCount() > 1:
self.main_sizer.Remove(1)
if self.editPane1:
self.editPane1.Destroy()
self.editPane1 = None
self.main_sizer.Layout()
def rebuildTransactionButtons(self):
self.t_list_sizer.Clear(delete_windows=True)
self.transaction_buttons = {}
for t in self.ts.getTransactions():
self.updateButtonForTransaction(t)
self.main_sizer.Layout()
def editTransaction(self, event, trans):
self.clearEditPane()
self.editPane1 = EditTransactionPanel(self, trans)
self.main_sizer.Add(self.editPane1, 0)
self.main_sizer.Layout()
def newTransaction(self, event):
t = Transaction()
self.editTransaction(event, t)
def deleteTransaction(self, trans):
self.ts.removeTransactions(trans)
self.rebuildTransactionButtons()
def updateButtonForTransaction(self, t):
label = f'{t.description} {t.amount} {t.start}'
if t in self.transaction_buttons:
btn = self.transaction_buttons[t]
btn.SetLabel(label)
else:
btn = wx.Button(self, label=label)
btn.Bind(wx.EVT_BUTTON, lambda evt, trans=t: self.editTransaction(evt, trans))
self.t_list_sizer.Add(btn, 0)
self.transaction_buttons[t] = btn
if t not in self.ts.getTransactions():
self.ts.addTransactions(t)
self.t_list_sizer.Layout()
class EditTransactionPanel(wx.Panel):
def __init__(self, parent, trans):
super().__init__(parent)
self.parent = parent
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.transaction = trans
# Description
self.description = wx.TextCtrl(self)
self.main_sizer.Add(self.description, 0, wx.EXPAND)
# Original Start Date
label = wx.StaticText(self, label='Original Start Date', size=(50, -1))
self.orig_start = wx.adv.DatePickerCtrl(self)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.orig_start, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Current Start Date
label = wx.StaticText(self, label='Current Start Date', size=(50, -1))
self.start = wx.adv.DatePickerCtrl(self)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.start, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Amount
label = wx.StaticText(self, label='Amount', size=(50, -1))
self.amount = wx.TextCtrl(self)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.amount, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Frequency
label = wx.StaticText(self, label='Frequency', size=(50, -1))
self.frequency = wx.Choice(self, choices=Transaction.INTERVALS)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.frequency, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Scheduled
self.scheduled = wx.CheckBox(self, label='Scheduled', style=wx.CHK_2STATE | wx.ALIGN_RIGHT)
self.main_sizer.Add(self.scheduled, 0)
# Cleared
self.cleared = wx.CheckBox(self, label='Cleared', style=wx.CHK_2STATE | wx.ALIGN_RIGHT)
self.main_sizer.Add(self.cleared, 0)
# Action Buttons
action_button_sizer = wx.BoxSizer()
cancel = wx.Button(self, label="Cancel")
cancel.Bind(wx.EVT_BUTTON, self.cancelEdit)
action_button_sizer.Add(cancel, 0)
reset = wx.Button(self, label="Reset")
reset.Bind(wx.EVT_BUTTON, self.resetEdit)
action_button_sizer.Add(reset, 0)
save = wx.Button(self, label="Save")
save.Bind(wx.EVT_BUTTON, self.saveEdit)
action_button_sizer.Add(save, 0)
self.main_sizer.Add(action_button_sizer, 0)
# Delete Button
delete_button_sizer = wx.BoxSizer()
delete = wx.Button(self, label="Delete")
delete.Bind(wx.EVT_BUTTON, self.deleteTransaction)
delete_button_sizer.Add(delete, 0)
self.main_sizer.Add(delete_button_sizer, 0)
self.setValues()
self.SetSizer(self.main_sizer)
def setValues(self):
self.description.SetValue(self.transaction.description)
self.orig_start.SetValue(pyDate2wxDate(self.transaction.original_start))
self.start.SetValue(pyDate2wxDate(self.transaction.start))
self.amount.SetValue(str(self.transaction.amount))
self.frequency.SetSelection(Transaction.INTERVALS.index(self.transaction.frequency))
self.scheduled.SetValue(self.transaction.scheduled)
self.cleared.SetValue(self.transaction.cleared)
def cancelEdit(self, event):
self.parent.clearEditPane()
def resetEdit(self, event):
self.setValues()
def saveEdit(self, event):
self.transaction.description = self.description.GetValue()
self.transaction.original_start = wxDate2pyDate(self.orig_start.GetValue())
self.transaction.start = wxDate2pyDate(self.start.GetValue())
self.transaction.updateAmount(self.amount.GetValue())
self.transaction.frequency = Transaction.INTERVALS[self.frequency.GetCurrentSelection()]
self.transaction.scheduled = self.scheduled.GetValue()
self.transaction.cleared = self.cleared.GetValue()
self.parent.updateButtonForTransaction(self.transaction)
self.parent.clearEditPane()
def deleteTransaction(self, event):
self.parent.deleteTransaction(self.transaction)
self.parent.clearEditPane()
class MainFrame(wx.Frame):
WILDCARD = "YAML (*.yml)|*.yml|" \
"All files (*.*)|*.*"
def __init__(self):
super().__init__(parent=None, title='Cash Flow Calculator')
self.settingsFile = os.getcwd()+'/data/'+'.cash_flow_settings.yml'
self.settings = AppSettings()
self.ts = TransactionStore()
self.defaultDir = os.getcwd()+'/data'
self.notebook = wx.Notebook(self)
self.notebook.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.handleNotebookChange)
self.transactionManagement = TransactionManagement(self.notebook, self.ts, self.settings)
self.notebook.AddPage(self.transactionManagement, "Transaction Management")
self.cashFlowDisplay = CashFlowDisplay(self.notebook, self.ts, self.settings)
self.notebook.AddPage(self.cashFlowDisplay, "Cash Flow")
self.SetInitialSize(wx.Size(650, 650))
self.create_menu()
self.loadSettings()
self.loadTransactions(self.settings.dataFile)
self.Show()
def handleNotebookChange(self, event):
self.updateChildren()
event.Skip()
def updateChildren(self):
self.transactionManagement.loadSettings()
self.transactionManagement.redraw()
self.cashFlowDisplay.loadSettings()
self.cashFlowDisplay.updateList()
def create_menu(self):
menu_bar = wx.MenuBar()
file_menu = wx.Menu()
new_file_menu_item = file_menu.Append(
wx.ID_ANY, "New File", "Create a new file"
)
open_file_menu_item = file_menu.Append(
wx.ID_ANY, "Open...", "Open a file"
)
save_menu_item = file_menu.Append(
wx.ID_ANY, "Save", "Save to current file"
)
save_as_menu_item = file_menu.Append(
wx.ID_ANY, "Save As", "Save file with new name"
)
menu_bar.Append(file_menu, "&File")
self.Bind(
event=wx.EVT_MENU,
handler=self.on_new_file,
source=new_file_menu_item,
)
self.Bind(
event=wx.EVT_MENU,
handler=self.on_open_file,
source=open_file_menu_item,
)
self.Bind(
event=wx.EVT_MENU,
handler=self.on_save,
source=save_menu_item,
)
self.Bind(
event=wx.EVT_MENU,
handler=self.on_save_as,
source=save_as_menu_item,
)
self.SetMenuBar(menu_bar)
def on_new_file(self, event):
self.loadSettings()
self.settings.dataFile = None
self.saveSettings()
self.loadTransactions()
def on_open_file(self, event):
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=self.defaultDir,
defaultFile="",
wildcard=MainFrame.WILDCARD,
style=wx.FD_OPEN |
wx.FD_CHANGE_DIR | wx.FD_FILE_MUST_EXIST |
wx.FD_PREVIEW
)
if dlg.ShowModal() == wx.ID_OK:
self.loadSettings()
self.settings.dataFile = dlg.GetPath()
self.loadTransactions(self.settings.dataFile)
self.saveSettings()
dlg.Destroy()
def on_save(self, event):
if self.settings.dataFile is not None:
self.saveTransactions()
self.saveSettings()
else:
self.on_save_as(event)
def on_save_as(self, event):
if self.settings.dataFile is not None:
defaultDir = os.path.dirname(self.settings.dataFile)
defaultFile = os.path.basename(self.settings.dataFile)
else:
defaultDir = self.defaultDir
defaultFile = ""
dlg = wx.FileDialog(
self, message="Save file as ...", defaultDir=defaultDir,
defaultFile=defaultFile, wildcard=MainFrame.WILDCARD, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
)
if dlg.ShowModal() == wx.ID_OK:
self.settings.dataFile = dlg.GetPath()
self.saveTransactions(self.settings.dataFile)
self.saveSettings()
dlg.Destroy()
def loadTransactions(self, file=None):
self.ts = TransactionStore()
if file is not None:
self.ts.loadTransactions(file)
self.transactionManagement.ts = self.ts
self.cashFlowDisplay.ts = self.ts
self.updateChildren()
def saveTransactions(self, file=None):
if file is None:
file = self.settings.dataFile
self.settings.dataFile = file
self.ts.saveTransactions(file)
def saveSettings(self):
try:
with open(self.settingsFile, "w") as f:
yaml.dump(self.settings, f)
except:
print("Can't save settings for some reason.")
def loadSettings(self):
try:
with open(self.settingsFile, "r") as f:
self.settings = yaml.load(f, Loader=yaml.Loader)
self.transactionManagement.settings = self.settings
self.cashFlowDisplay.settings = self.settings
self.updateChildren()
except:
print("Can't load settings file. Using defaults.")
if __name__ == '__main__':
app = wx.App()
frame = MainFrame()
app.MainLoop() | cash_flow_calculator.py | import os
import yaml
import string
import re
import wx
import wx.adv
from datetime import date, timedelta
from cash_flow.transaction import Transaction
from cash_flow.transaction_store import TransactionStore
from cash_flow.cash_flow import CashFlow
def wxDate2pyDate(wxdate):
return date(wxdate.GetYear(), wxdate.GetMonth()+1, wxdate.GetDay())
def pyDate2wxDate(pyDate):
return wx.DateTime(pyDate.day, pyDate.month-1, pyDate.year)
class AppSettings():
def __init__(self, startDate=None, startBalance=None, warning=None, dataFile=None):
if startDate is None:
startDate = date.today()
self.startDate = startDate
if startBalance is None:
startBalance = '0.00'
self.startBalance = startBalance
if warning is None:
warning = 100.00
self.warning = warning
if dataFile is None:
dataFile = ""
self.dataFile = dataFile
class CashFlowDisplay(wx.Panel):
def __init__(self, parent, ts, settings):
super().__init__(parent)
self.ts = ts
self.settings = settings
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
# Controls at top
self.control_sizer = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label='Starting Date')
self.control_sizer.Add(label, 0)
self.date_picker = wx.adv.DatePickerCtrl(self)
wxDate = pyDate2wxDate(self.settings.startDate)
self.date_picker.SetValue(wxDate)
self.date_picker.Bind(wx.adv.EVT_DATE_CHANGED, self.handleSettingsChange)
self.control_sizer.Add(self.date_picker, 0)
label = wx.StaticText(self, label="Starting Balance $")
self.control_sizer.Add(label, 0)
self.starting_balance = wx.TextCtrl(self, value=self.settings.startBalance)
self.starting_balance.Bind(wx.EVT_TEXT, self.handleSettingsChange)
self.control_sizer.Add(self.starting_balance, 0)
self.main_sizer.Add(self.control_sizer, 0)
# List of transactions
self.list_sizer = wx.BoxSizer(wx.VERTICAL)
self.main_sizer.Add(self.list_sizer, 0, wx.EXPAND)
self.SetSizer(self.main_sizer)
self.updateList()
def handleSettingsChange(self, event):
self.updateList()
self.updateSettings()
def updateList(self):
start_date = wxDate2pyDate(self.date_picker.GetValue())
starting_balance = self.starting_balance.GetValue()
allow = string.digits + "."
starting_balance = re.sub('[^%s]' % allow, '', starting_balance)
cf = CashFlow(start_date, starting_balance, self.ts)
day = cf.getTodaysTransactions()
self.list_sizer.Clear(delete_windows=True)
listCtrl = wx.ListCtrl(self, style=wx.LC_REPORT)
listCtrl.InsertColumn(0, "Date")
listCtrl.InsertColumn(1, "Balance")
listCtrl.InsertColumn(2, "Transaction")
listCtrl.InsertColumn(3, "Amount")
listCtrl.SetColumnWidth(0, 100)
listCtrl.SetColumnWidth(1, 100)
listCtrl.SetColumnWidth(2, 200)
listCtrl.SetColumnWidth(3, 75)
for i in range(0, 365):
(d, bal, t_list) = next(day)
if t_list:
# Add daily summary
index = listCtrl.InsertItem(listCtrl.GetItemCount(), str(d))
listCtrl.SetItem(index, 1, str(bal))
if bal < self.settings.warning:
listCtrl.SetItemBackgroundColour(index, wx.Colour(255, 255, 0))
if bal < 0:
listCtrl.SetItemBackgroundColour(index, wx.Colour(255, 0, 0))
# Add individual transactions
for t in t_list:
index = listCtrl.InsertItem(listCtrl.GetItemCount(), "")
listCtrl.SetItem(index, 2, str(t.description))
listCtrl.SetItem(index, 3, str(t.amount))
# label = f'{d} {t.description} {t.amount} {bal}'
# txt = wx.StaticText(self, label=label)
# self.list_sizer.Add(txt,0)
self.list_sizer.Add(listCtrl, 0, wx.EXPAND)
self.main_sizer.Layout()
def updateSettings(self):
self.settings.startDate = wxDate2pyDate(self.date_picker.GetValue())
self.settings.startBalance = self.starting_balance.GetValue()
# TODO: set warning once control is exposed
def loadSettings(self):
wxDate = pyDate2wxDate(self.settings.startDate)
self.date_picker.SetValue(wxDate)
self.starting_balance.SetValue(self.settings.startBalance)
# TODO: set warning once control is exposed
class TransactionManagement(wx.Panel):
def __init__(self, parent, ts, settings):
super().__init__(parent)
self.ts = ts
self.settings = settings
self.editPane1 = None
self.transaction_buttons = {}
self.main_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.left_side_sizer = wx.BoxSizer(wx.VERTICAL)
self.t_list_sizer = wx.BoxSizer(wx.VERTICAL)
self.left_side_sizer.Add(self.t_list_sizer, 0)
btn = wx.Button(self, label='New Transaction')
btn.Bind(wx.EVT_BUTTON, self.newTransaction)
self.left_side_sizer.Add(btn, 0)
self.main_sizer.Add(self.left_side_sizer, 0)
self.SetSizer(self.main_sizer)
def redraw(self):
self.clearEditPane()
self.rebuildTransactionButtons()
def loadSettings(self):
pass
def clearEditPane(self):
if self.main_sizer.GetItemCount() > 1:
self.main_sizer.Remove(1)
if self.editPane1:
self.editPane1.Destroy()
self.editPane1 = None
self.main_sizer.Layout()
def rebuildTransactionButtons(self):
self.t_list_sizer.Clear(delete_windows=True)
self.transaction_buttons = {}
for t in self.ts.getTransactions():
self.updateButtonForTransaction(t)
self.main_sizer.Layout()
def editTransaction(self, event, trans):
self.clearEditPane()
self.editPane1 = EditTransactionPanel(self, trans)
self.main_sizer.Add(self.editPane1, 0)
self.main_sizer.Layout()
def newTransaction(self, event):
t = Transaction()
self.editTransaction(event, t)
def deleteTransaction(self, trans):
self.ts.removeTransactions(trans)
self.rebuildTransactionButtons()
def updateButtonForTransaction(self, t):
label = f'{t.description} {t.amount} {t.start}'
if t in self.transaction_buttons:
btn = self.transaction_buttons[t]
btn.SetLabel(label)
else:
btn = wx.Button(self, label=label)
btn.Bind(wx.EVT_BUTTON, lambda evt, trans=t: self.editTransaction(evt, trans))
self.t_list_sizer.Add(btn, 0)
self.transaction_buttons[t] = btn
if t not in self.ts.getTransactions():
self.ts.addTransactions(t)
self.t_list_sizer.Layout()
class EditTransactionPanel(wx.Panel):
def __init__(self, parent, trans):
super().__init__(parent)
self.parent = parent
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.transaction = trans
# Description
self.description = wx.TextCtrl(self)
self.main_sizer.Add(self.description, 0, wx.EXPAND)
# Original Start Date
label = wx.StaticText(self, label='Original Start Date', size=(50, -1))
self.orig_start = wx.adv.DatePickerCtrl(self)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.orig_start, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Current Start Date
label = wx.StaticText(self, label='Current Start Date', size=(50, -1))
self.start = wx.adv.DatePickerCtrl(self)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.start, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Amount
label = wx.StaticText(self, label='Amount', size=(50, -1))
self.amount = wx.TextCtrl(self)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.amount, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Frequency
label = wx.StaticText(self, label='Frequency', size=(50, -1))
self.frequency = wx.Choice(self, choices=Transaction.INTERVALS)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(label, 0, wx.ALL, 5)
row_sizer.Add(self.frequency, 1, wx.ALL, 5)
self.main_sizer.Add(row_sizer, 0)
# Scheduled
self.scheduled = wx.CheckBox(self, label='Scheduled', style=wx.CHK_2STATE | wx.ALIGN_RIGHT)
self.main_sizer.Add(self.scheduled, 0)
# Cleared
self.cleared = wx.CheckBox(self, label='Cleared', style=wx.CHK_2STATE | wx.ALIGN_RIGHT)
self.main_sizer.Add(self.cleared, 0)
# Action Buttons
action_button_sizer = wx.BoxSizer()
cancel = wx.Button(self, label="Cancel")
cancel.Bind(wx.EVT_BUTTON, self.cancelEdit)
action_button_sizer.Add(cancel, 0)
reset = wx.Button(self, label="Reset")
reset.Bind(wx.EVT_BUTTON, self.resetEdit)
action_button_sizer.Add(reset, 0)
save = wx.Button(self, label="Save")
save.Bind(wx.EVT_BUTTON, self.saveEdit)
action_button_sizer.Add(save, 0)
self.main_sizer.Add(action_button_sizer, 0)
# Delete Button
delete_button_sizer = wx.BoxSizer()
delete = wx.Button(self, label="Delete")
delete.Bind(wx.EVT_BUTTON, self.deleteTransaction)
delete_button_sizer.Add(delete, 0)
self.main_sizer.Add(delete_button_sizer, 0)
self.setValues()
self.SetSizer(self.main_sizer)
def setValues(self):
self.description.SetValue(self.transaction.description)
self.orig_start.SetValue(pyDate2wxDate(self.transaction.original_start))
self.start.SetValue(pyDate2wxDate(self.transaction.start))
self.amount.SetValue(str(self.transaction.amount))
self.frequency.SetSelection(Transaction.INTERVALS.index(self.transaction.frequency))
self.scheduled.SetValue(self.transaction.scheduled)
self.cleared.SetValue(self.transaction.cleared)
def cancelEdit(self, event):
self.parent.clearEditPane()
def resetEdit(self, event):
self.setValues()
def saveEdit(self, event):
self.transaction.description = self.description.GetValue()
self.transaction.original_start = wxDate2pyDate(self.orig_start.GetValue())
self.transaction.start = wxDate2pyDate(self.start.GetValue())
self.transaction.updateAmount(self.amount.GetValue())
self.transaction.frequency = Transaction.INTERVALS[self.frequency.GetCurrentSelection()]
self.transaction.scheduled = self.scheduled.GetValue()
self.transaction.cleared = self.cleared.GetValue()
self.parent.updateButtonForTransaction(self.transaction)
self.parent.clearEditPane()
def deleteTransaction(self, event):
self.parent.deleteTransaction(self.transaction)
self.parent.clearEditPane()
class MainFrame(wx.Frame):
WILDCARD = "YAML (*.yml)|*.yml|" \
"All files (*.*)|*.*"
def __init__(self):
super().__init__(parent=None, title='Cash Flow Calculator')
self.settingsFile = os.getcwd()+'/data/'+'.cash_flow_settings.yml'
self.settings = AppSettings()
self.ts = TransactionStore()
self.defaultDir = os.getcwd()+'/data'
self.notebook = wx.Notebook(self)
self.notebook.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.handleNotebookChange)
self.transactionManagement = TransactionManagement(self.notebook, self.ts, self.settings)
self.notebook.AddPage(self.transactionManagement, "Transaction Management")
self.cashFlowDisplay = CashFlowDisplay(self.notebook, self.ts, self.settings)
self.notebook.AddPage(self.cashFlowDisplay, "Cash Flow")
self.SetInitialSize(wx.Size(650, 650))
self.create_menu()
self.loadSettings()
self.loadTransactions(self.settings.dataFile)
self.Show()
def handleNotebookChange(self, event):
self.updateChildren()
event.Skip()
def updateChildren(self):
self.transactionManagement.loadSettings()
self.transactionManagement.redraw()
self.cashFlowDisplay.loadSettings()
self.cashFlowDisplay.updateList()
def create_menu(self):
menu_bar = wx.MenuBar()
file_menu = wx.Menu()
new_file_menu_item = file_menu.Append(
wx.ID_ANY, "New File", "Create a new file"
)
open_file_menu_item = file_menu.Append(
wx.ID_ANY, "Open...", "Open a file"
)
save_menu_item = file_menu.Append(
wx.ID_ANY, "Save", "Save to current file"
)
save_as_menu_item = file_menu.Append(
wx.ID_ANY, "Save As", "Save file with new name"
)
menu_bar.Append(file_menu, "&File")
self.Bind(
event=wx.EVT_MENU,
handler=self.on_new_file,
source=new_file_menu_item,
)
self.Bind(
event=wx.EVT_MENU,
handler=self.on_open_file,
source=open_file_menu_item,
)
self.Bind(
event=wx.EVT_MENU,
handler=self.on_save,
source=save_menu_item,
)
self.Bind(
event=wx.EVT_MENU,
handler=self.on_save_as,
source=save_as_menu_item,
)
self.SetMenuBar(menu_bar)
def on_new_file(self, event):
self.loadSettings()
self.settings.dataFile = None
self.saveSettings()
self.loadTransactions()
def on_open_file(self, event):
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=self.defaultDir,
defaultFile="",
wildcard=MainFrame.WILDCARD,
style=wx.FD_OPEN |
wx.FD_CHANGE_DIR | wx.FD_FILE_MUST_EXIST |
wx.FD_PREVIEW
)
if dlg.ShowModal() == wx.ID_OK:
self.loadSettings()
self.settings.dataFile = dlg.GetPath()
self.loadTransactions(self.settings.dataFile)
self.saveSettings()
dlg.Destroy()
def on_save(self, event):
if self.settings.dataFile is not None:
self.saveTransactions()
self.saveSettings()
else:
self.on_save_as(event)
def on_save_as(self, event):
if self.settings.dataFile is not None:
defaultDir = os.path.dirname(self.settings.dataFile)
defaultFile = os.path.basename(self.settings.dataFile)
else:
defaultDir = self.defaultDir
defaultFile = ""
dlg = wx.FileDialog(
self, message="Save file as ...", defaultDir=defaultDir,
defaultFile=defaultFile, wildcard=MainFrame.WILDCARD, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
)
if dlg.ShowModal() == wx.ID_OK:
self.settings.dataFile = dlg.GetPath()
self.saveTransactions(self.settings.dataFile)
self.saveSettings()
dlg.Destroy()
def loadTransactions(self, file=None):
self.ts = TransactionStore()
if file is not None:
self.ts.loadTransactions(file)
self.transactionManagement.ts = self.ts
self.cashFlowDisplay.ts = self.ts
self.updateChildren()
def saveTransactions(self, file=None):
if file is None:
file = self.settings.dataFile
self.settings.dataFile = file
self.ts.saveTransactions(file)
def saveSettings(self):
try:
with open(self.settingsFile, "w") as f:
yaml.dump(self.settings, f)
except:
print("Can't save settings for some reason.")
def loadSettings(self):
try:
with open(self.settingsFile, "r") as f:
self.settings = yaml.load(f, Loader=yaml.Loader)
self.transactionManagement.settings = self.settings
self.cashFlowDisplay.settings = self.settings
self.updateChildren()
except:
print("Can't load settings file. Using defaults.")
if __name__ == '__main__':
app = wx.App()
frame = MainFrame()
app.MainLoop() | 0.285472 | 0.0729 |
"""Test utilities."""
from __future__ import (
absolute_import, division, print_function, unicode_literals,
)
import logging
import os
import re
import sys
from threading import RLock
from traitlets.config.application import LevelFormatter
from traitlets.traitlets import default
try:
from notebook.tests.test_notebookapp import raise_on_bad_version
except ImportError:
pep440re = re.compile(
r'^'
r'([1-9]\d*!)?(0|[1-9]\d*)(.(0|[1-9]\d*))*'
r'((a|b|rc)(0|[1-9]\d*))?'
r'(\.post(0|[1-9]\d*))?'
r'(\.dev(0|[1-9]\d*))?'
r'$'
)
def raise_on_bad_version(version):
if not pep440re.match(version):
raise ValueError(
"Versions String apparently does not match Pep 440 "
"specification, which might lead to sdist and wheel being "
"seen as 2 different release. "
"E.g: do not use dots for beta/alpha/rc markers."
)
def stringify_env(env):
"""
Convert environment vars dict to str: str (not unicode) for py2 on Windows.
Python 2 on Windows doesn't handle Unicode objects in environment, even if
they can be converted to ASCII string, which can cause problems for
subprocess calls in modules importing unicode_literals from future.
"""
if sys.version_info[0] < 3 and os.name == 'nt':
return {str(key): str(val) for key, val in env.iteritems()}
return env
class GlobalMemoryHandler(logging.Handler):
"""
A MemoryHandler which uses a single buffer across all instances.
In addition, will only flush logs when explicitly called to.
"""
_buffer = None # used as a class-wide attribute
_lock = None # used as a class-wide attribute
@classmethod
def _setup_class(cls):
if cls._lock is None:
cls._lock = RLock()
if cls._buffer is None:
with cls._lock:
cls._buffer = []
def __init__(self, target):
logging.Handler.__init__(self)
self.target = target
self._setup_class()
def emit(self, record):
"""
Emit a record.
Append the record and its target to the buffer.
Don't check shouldFlush like regular MemoryHandler does.
"""
self.__class__._buffer.append((record, self.target))
@classmethod
def flush_to_target(cls):
"""
Sending the buffered records to their respective targets.
The class-wide record buffer is also cleared by this operation.
"""
with cls._lock:
for record, target in cls._buffer:
target.handle(record)
cls.clear_buffer()
@classmethod
def clear_buffer(cls):
with cls._lock:
cls._buffer = []
@classmethod
def rotate_buffer(cls, num_places=1):
with cls._lock:
cls._buffer = cls._buffer[-num_places:] + cls._buffer[:-num_places]
def close(self):
"""Close the handler."""
try:
self.flush()
finally:
logging.Handler.close(self)
def wrap_logger_handlers(logger):
"""Wrap a logging handler in a GlobalMemoryHandler."""
# clear original log handlers, saving a copy
handlers_to_wrap = logger.handlers
logger.handlers = []
# wrap each one
for handler in handlers_to_wrap:
if isinstance(handler, GlobalMemoryHandler):
wrapping_handler = handler
else:
wrapping_handler = GlobalMemoryHandler(target=handler)
logger.addHandler(wrapping_handler)
return logger
def get_logger(name=__name__, log_level=logging.DEBUG):
"""
Return a logger with a default StreamHandler.
Adapted from
tratilets.config.application.Application._log_default
"""
log = logging.getLogger(name)
log.setLevel(log_level)
log.propagate = False
_log = log # copied from Logger.hasHandlers() (new in Python 3.2)
while _log:
if _log.handlers:
return log
if not _log.propagate:
break
else:
_log = _log.parent
if sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = LevelFormatter(
fmt='[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s',
datefmt='%H:%M:%S')
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log
def get_wrapped_logger(*args, **kwargs):
"""Return a logger with StreamHandler wrapped in a GlobalMemoryHandler."""
return wrap_logger_handlers(get_logger(*args, **kwargs))
def patch_traitlets_app_logs(klass):
"""
Patch an App's default log method for use in nose tests.
This is for use on subclasses of tratilets.config.application.Application
and essentially removes handlers from the default logger, then sets it to
propagate so that nose can capture the logs.
"""
@default('log')
def new_default_log(self):
logger = super(klass, self)._log_default()
# clear log handlers and propagate to root for nose to capture
logger.propagate = True
logger.handlers = []
return logger
klass._log_default = new_default_log | jupyter_contrib_core/testing_utils/__init__.py | """Test utilities."""
from __future__ import (
absolute_import, division, print_function, unicode_literals,
)
import logging
import os
import re
import sys
from threading import RLock
from traitlets.config.application import LevelFormatter
from traitlets.traitlets import default
try:
from notebook.tests.test_notebookapp import raise_on_bad_version
except ImportError:
pep440re = re.compile(
r'^'
r'([1-9]\d*!)?(0|[1-9]\d*)(.(0|[1-9]\d*))*'
r'((a|b|rc)(0|[1-9]\d*))?'
r'(\.post(0|[1-9]\d*))?'
r'(\.dev(0|[1-9]\d*))?'
r'$'
)
def raise_on_bad_version(version):
if not pep440re.match(version):
raise ValueError(
"Versions String apparently does not match Pep 440 "
"specification, which might lead to sdist and wheel being "
"seen as 2 different release. "
"E.g: do not use dots for beta/alpha/rc markers."
)
def stringify_env(env):
"""
Convert environment vars dict to str: str (not unicode) for py2 on Windows.
Python 2 on Windows doesn't handle Unicode objects in environment, even if
they can be converted to ASCII string, which can cause problems for
subprocess calls in modules importing unicode_literals from future.
"""
if sys.version_info[0] < 3 and os.name == 'nt':
return {str(key): str(val) for key, val in env.iteritems()}
return env
class GlobalMemoryHandler(logging.Handler):
"""
A MemoryHandler which uses a single buffer across all instances.
In addition, will only flush logs when explicitly called to.
"""
_buffer = None # used as a class-wide attribute
_lock = None # used as a class-wide attribute
@classmethod
def _setup_class(cls):
if cls._lock is None:
cls._lock = RLock()
if cls._buffer is None:
with cls._lock:
cls._buffer = []
def __init__(self, target):
logging.Handler.__init__(self)
self.target = target
self._setup_class()
def emit(self, record):
"""
Emit a record.
Append the record and its target to the buffer.
Don't check shouldFlush like regular MemoryHandler does.
"""
self.__class__._buffer.append((record, self.target))
@classmethod
def flush_to_target(cls):
"""
Sending the buffered records to their respective targets.
The class-wide record buffer is also cleared by this operation.
"""
with cls._lock:
for record, target in cls._buffer:
target.handle(record)
cls.clear_buffer()
@classmethod
def clear_buffer(cls):
with cls._lock:
cls._buffer = []
@classmethod
def rotate_buffer(cls, num_places=1):
with cls._lock:
cls._buffer = cls._buffer[-num_places:] + cls._buffer[:-num_places]
def close(self):
"""Close the handler."""
try:
self.flush()
finally:
logging.Handler.close(self)
def wrap_logger_handlers(logger):
"""Wrap a logging handler in a GlobalMemoryHandler."""
# clear original log handlers, saving a copy
handlers_to_wrap = logger.handlers
logger.handlers = []
# wrap each one
for handler in handlers_to_wrap:
if isinstance(handler, GlobalMemoryHandler):
wrapping_handler = handler
else:
wrapping_handler = GlobalMemoryHandler(target=handler)
logger.addHandler(wrapping_handler)
return logger
def get_logger(name=__name__, log_level=logging.DEBUG):
"""
Return a logger with a default StreamHandler.
Adapted from
tratilets.config.application.Application._log_default
"""
log = logging.getLogger(name)
log.setLevel(log_level)
log.propagate = False
_log = log # copied from Logger.hasHandlers() (new in Python 3.2)
while _log:
if _log.handlers:
return log
if not _log.propagate:
break
else:
_log = _log.parent
if sys.executable.endswith('pythonw.exe'):
# this should really go to a file, but file-logging is only
# hooked up in parallel applications
_log_handler = logging.StreamHandler(open(os.devnull, 'w'))
else:
_log_handler = logging.StreamHandler()
_log_formatter = LevelFormatter(
fmt='[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s',
datefmt='%H:%M:%S')
_log_handler.setFormatter(_log_formatter)
log.addHandler(_log_handler)
return log
def get_wrapped_logger(*args, **kwargs):
"""Return a logger with StreamHandler wrapped in a GlobalMemoryHandler."""
return wrap_logger_handlers(get_logger(*args, **kwargs))
def patch_traitlets_app_logs(klass):
"""
Patch an App's default log method for use in nose tests.
This is for use on subclasses of tratilets.config.application.Application
and essentially removes handlers from the default logger, then sets it to
propagate so that nose can capture the logs.
"""
@default('log')
def new_default_log(self):
logger = super(klass, self)._log_default()
# clear log handlers and propagate to root for nose to capture
logger.propagate = True
logger.handlers = []
return logger
klass._log_default = new_default_log | 0.654343 | 0.158077 |
from netapp.netapp_object import NetAppObject
class LunStatsInfo(NetAppObject):
"""
Stats for a LUN.
"""
_last_zeroed = None
@property
def last_zeroed(self):
"""
Total number of seconds since the statistics
for this lun were last zeroed.
"""
return self._last_zeroed
@last_zeroed.setter
def last_zeroed(self, val):
if val != None:
self.validate('last_zeroed', val)
self._last_zeroed = val
_block_size = None
@property
def block_size(self):
"""
Disk block size for this LUN in bytes.
This attribute is unavailable when the LUN is fenced for
a restore operation.
"""
return self._block_size
@block_size.setter
def block_size(self, val):
if val != None:
self.validate('block_size', val)
self._block_size = val
_scsi_errors = None
@property
def scsi_errors(self):
"""
Total number of SCSI errors.
"""
return self._scsi_errors
@scsi_errors.setter
def scsi_errors(self, val):
if val != None:
self.validate('scsi_errors', val)
self._scsi_errors = val
_write_ops = None
@property
def write_ops(self):
"""
Total number of SCSI write ops executed.
"""
return self._write_ops
@write_ops.setter
def write_ops(self, val):
if val != None:
self.validate('write_ops', val)
self._write_ops = val
_write_blocks = None
@property
def write_blocks(self):
"""
Number of disk blocks written.
This attribute is unavailable when the LUN is fenced for
a restore operation.
"""
return self._write_blocks
@write_blocks.setter
def write_blocks(self, val):
if val != None:
self.validate('write_blocks', val)
self._write_blocks = val
_vserver = None
@property
def vserver(self):
"""
Vserver containing the lun
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_other_ops = None
@property
def other_ops(self):
"""
Total number of other SCSI ops executed.
"""
return self._other_ops
@other_ops.setter
def other_ops(self, val):
if val != None:
self.validate('other_ops', val)
self._other_ops = val
_path = None
@property
def path(self):
"""
path of the LUN.
(for example, "/vol/vol0/lun1")
"""
return self._path
@path.setter
def path(self, val):
if val != None:
self.validate('path', val)
self._path = val
_read_blocks = None
@property
def read_blocks(self):
"""
Number of disk blocks read.
This attribute is unavailable when the LUN is fenced for
a restore operation.
"""
return self._read_blocks
@read_blocks.setter
def read_blocks(self, val):
if val != None:
self.validate('read_blocks', val)
self._read_blocks = val
_read_ops = None
@property
def read_ops(self):
"""
Total number of SCSI read ops executed.
"""
return self._read_ops
@read_ops.setter
def read_ops(self, val):
if val != None:
self.validate('read_ops', val)
self._read_ops = val
@staticmethod
def get_api_name():
return "lun-stats-info"
@staticmethod
def get_desired_attrs():
return [
'last-zeroed',
'block-size',
'scsi-errors',
'write-ops',
'write-blocks',
'vserver',
'other-ops',
'path',
'read-blocks',
'read-ops',
]
def describe_properties(self):
return {
'last_zeroed': { 'class': int, 'is_list': False, 'required': 'optional' },
'block_size': { 'class': int, 'is_list': False, 'required': 'optional' },
'scsi_errors': { 'class': int, 'is_list': False, 'required': 'required' },
'write_ops': { 'class': int, 'is_list': False, 'required': 'required' },
'write_blocks': { 'class': int, 'is_list': False, 'required': 'optional' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'other_ops': { 'class': int, 'is_list': False, 'required': 'required' },
'path': { 'class': basestring, 'is_list': False, 'required': 'required' },
'read_blocks': { 'class': int, 'is_list': False, 'required': 'optional' },
'read_ops': { 'class': int, 'is_list': False, 'required': 'required' },
} | generated-libraries/python/netapp/lun/lun_stats_info.py | from netapp.netapp_object import NetAppObject
class LunStatsInfo(NetAppObject):
"""
Stats for a LUN.
"""
_last_zeroed = None
@property
def last_zeroed(self):
"""
Total number of seconds since the statistics
for this lun were last zeroed.
"""
return self._last_zeroed
@last_zeroed.setter
def last_zeroed(self, val):
if val != None:
self.validate('last_zeroed', val)
self._last_zeroed = val
_block_size = None
@property
def block_size(self):
"""
Disk block size for this LUN in bytes.
This attribute is unavailable when the LUN is fenced for
a restore operation.
"""
return self._block_size
@block_size.setter
def block_size(self, val):
if val != None:
self.validate('block_size', val)
self._block_size = val
_scsi_errors = None
@property
def scsi_errors(self):
"""
Total number of SCSI errors.
"""
return self._scsi_errors
@scsi_errors.setter
def scsi_errors(self, val):
if val != None:
self.validate('scsi_errors', val)
self._scsi_errors = val
_write_ops = None
@property
def write_ops(self):
"""
Total number of SCSI write ops executed.
"""
return self._write_ops
@write_ops.setter
def write_ops(self, val):
if val != None:
self.validate('write_ops', val)
self._write_ops = val
_write_blocks = None
@property
def write_blocks(self):
"""
Number of disk blocks written.
This attribute is unavailable when the LUN is fenced for
a restore operation.
"""
return self._write_blocks
@write_blocks.setter
def write_blocks(self, val):
if val != None:
self.validate('write_blocks', val)
self._write_blocks = val
_vserver = None
@property
def vserver(self):
"""
Vserver containing the lun
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_other_ops = None
@property
def other_ops(self):
"""
Total number of other SCSI ops executed.
"""
return self._other_ops
@other_ops.setter
def other_ops(self, val):
if val != None:
self.validate('other_ops', val)
self._other_ops = val
_path = None
@property
def path(self):
"""
path of the LUN.
(for example, "/vol/vol0/lun1")
"""
return self._path
@path.setter
def path(self, val):
if val != None:
self.validate('path', val)
self._path = val
_read_blocks = None
@property
def read_blocks(self):
"""
Number of disk blocks read.
This attribute is unavailable when the LUN is fenced for
a restore operation.
"""
return self._read_blocks
@read_blocks.setter
def read_blocks(self, val):
if val != None:
self.validate('read_blocks', val)
self._read_blocks = val
_read_ops = None
@property
def read_ops(self):
"""
Total number of SCSI read ops executed.
"""
return self._read_ops
@read_ops.setter
def read_ops(self, val):
if val != None:
self.validate('read_ops', val)
self._read_ops = val
@staticmethod
def get_api_name():
return "lun-stats-info"
@staticmethod
def get_desired_attrs():
return [
'last-zeroed',
'block-size',
'scsi-errors',
'write-ops',
'write-blocks',
'vserver',
'other-ops',
'path',
'read-blocks',
'read-ops',
]
def describe_properties(self):
return {
'last_zeroed': { 'class': int, 'is_list': False, 'required': 'optional' },
'block_size': { 'class': int, 'is_list': False, 'required': 'optional' },
'scsi_errors': { 'class': int, 'is_list': False, 'required': 'required' },
'write_ops': { 'class': int, 'is_list': False, 'required': 'required' },
'write_blocks': { 'class': int, 'is_list': False, 'required': 'optional' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'other_ops': { 'class': int, 'is_list': False, 'required': 'required' },
'path': { 'class': basestring, 'is_list': False, 'required': 'required' },
'read_blocks': { 'class': int, 'is_list': False, 'required': 'optional' },
'read_ops': { 'class': int, 'is_list': False, 'required': 'required' },
} | 0.766468 | 0.196421 |
from unittest import TestCase
import shellmacros
class TestMacroEngine(TestCase):
def setup1(self):
e = shellmacros.MacroEngine()
e.add('zack_dog', 'dolly')
e.add('what','${pet}')
e.add('pet','dog')
e.add('parent', 'duane')
e.add('duane_son', 'zack')
e.add_keep('keep','_keep_me')
e.add_external('extern')
e.add('EXTERN','${extern}')
return e
def test_A010_simple(self):
e = self.setup1()
r = e.resolve_text('')
self.assertTrue(r.ok)
self.assertEqual(r.result , '' )
def test_A020_simple(self):
e = self.setup1()
r = e.resolve_text('${parent}')
self.assertTrue(r.ok)
self.assertEqual(r.result,'duane')
def test_A030_simple(self):
e = self.setup1()
r=e.resolve_text( '${${${parent}_son}_${what}}' )
assert(r.ok)
self.assertEqual(r.result,'dolly')
def test_B010_ext(self):
def permutation( rr, ee, kk, full ):
e = self.setup1()
if ee:
e.mark_macro_external(ee)
if kk:
e.mark_macro_keep(kk)
r = e.resolve_text('${zack_dog}',full)
self.assertTrue( r.ok )
if( r.result != rr ):
# here so we can set a breakpoint
self.assertEqual( r.result , rr )
permutation( 'dolly', None, None, False )
permutation( 'dolly', None, None, True )
permutation( '${zack_dog}', 'zack_dog', None, False )
permutation( 'dolly' , 'zack_dog', None, True )
permutation( '${zack_dog}', None , 'zack_dog', False )
permutation( 'dolly' , None , 'zack_dog', True )
def test_B020_find_a_keep(self):
e = self.setup1()
e.mark_macro_keep('zack_dog')
r = e.resolve_text('${${${parent}_son}_${what}}')
self.assertTrue(r.ok)
self.assertEqual(r.result,'${zack_dog}')
# test that we can resolve this
r = e.resolve_text('${${${parent}_son}_${what}}',True)
self.assertTrue(r.ok)
self.assertEqual(r.result,'dolly')
def test_B030_expand_an_extern(self):
e = self.setup1()
e.mark_macro_keep('duane_son')
r = e.resolve_text('${${${parent}_son}_${what}}',True)
self.assertTrue(r.ok)
self.assertEqual(r.result, 'dolly')
r = e.resolve_text('${${${parent}_son}_${what}}', False)
self.assertFalse( r.ok)
self.assertIsNone (r.result)
def test_C010_extern(self):
e =self.setup1()
r = e.resolve_text('abc ${EXTERN} xyz',False)
self.assertTrue(r.ok)
self.assertEqual( r.result , 'abc ${extern} xyz' )
def test_C020_transfors(self):
e = shellmacros.MacroEngine()
input=r'//Server\MixedCase'
e.add( 'a', input )
# no change
r = e.resolve_text('${a}')
self.assertTrue(r.ok)
self.assertEqual( r.result, input )
# lower
r = e.resolve_text('${a_lc}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.lower())
# upper
r = e.resolve_text('${a_uc}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.upper())
# DOS
r = e.resolve_text('${a_dos}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.replace('/','\\'))
# Unix
r = e.resolve_text('${a_unix}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.replace('/', '\\'))
def test_NEG_010_syntax(self):
e = self.setup1()
s = '${noclose'
r = e.resolve_text(s)
self.assertTrue(r.ok)
self.assertEqual( r.result, s)
s = 'noopen}'
r = e.resolve_text(s)
self.assertTrue(r.ok)
self.assertEqual(r.result, s)
e.add('A', '${B}')
e.add('B', '${A}')
r = e.resolve_text('${A}')
self.assertFalse( r.ok )
self.assertIsInstance(r.error,shellmacros.MacroRecursionError)
def order_test_setup(self):
e = shellmacros.MacroEngine()
# goal: ${${abc}} -> ${${a}_{b}_{c}}
# a=a, b=dogs, c=lunch
# ${a_dogs_lunch} => is_not_tasty
e.add('a', 'a')
e.add('b', 'dogs')
e.add('c', 'lunch')
e.add('abc', '${a}_${b}_${c}')
e.add('a_dogs_lunch', 'is_not_tasty')
e.add('foo', '${${abc}}')
return e
def test_E010_depends(self):
e = self.order_test_setup()
r = e.resolve_text( '${foo}', e.RESOLVE_REFERENCES )
r = e.output_order()
correct = ['c', 'b', 'a_dogs_lunch', 'a', 'abc', 'foo']
self.assertEqual( len(r) , len(correct) )
for x in range(0,len(r)):
self.assertEqual( correct[x] , r[x] )
# Done.
def test_E020_make(self):
e = self.order_test_setup()
j = e.json_macros_str()
# this is not easy to test.. so we eyeball it
print('JSON result:\n---------\n%s\n---------' % j)
print("")
def test_E030_bash(self):
e = self.order_test_setup()
b = e.bash_fragment_str()
# this is not easy so we eyeball it
print('BASH result:\n---------\n%s\n---------' % b)
print("")
def test_E040_bash(self):
e = self.order_test_setup()
m = e.make_fragment_str()
# this is not easy so we eyeball it
print('MAKE result:\n---------\n%s\n---------' % m)
print("")
def test_E050_quoted_keeps(self):
e = shellmacros.MacroEngine()
e.add_keep( "CC", "${CROSS_COMPILE}gcc" )
e.add_keep( "CROSS_COMPILE", "arm-none-eabi-")
e.add_external("WORKSPACE_LOC")
m=e.add("SOMEDIR", r"C:\path with\spaces in\path")
m.quoted = True
e.add_makefle_dynamic_vars()
e.add( "cmd", "${CC} -o I${WORKSPACE_LOC}/foo -I${SOMEDIR} -o ${@} ${<}" )
s=e.make_fragment_str()
print("MAKE RESULT\n-----\n%s\n------\n" % s)
s=e.bash_fragment_str()
print("BASH RESULT\n-----\n%s\n------\n" % s )
print("") | tests/test_engine.py | from unittest import TestCase
import shellmacros
class TestMacroEngine(TestCase):
def setup1(self):
e = shellmacros.MacroEngine()
e.add('zack_dog', 'dolly')
e.add('what','${pet}')
e.add('pet','dog')
e.add('parent', 'duane')
e.add('duane_son', 'zack')
e.add_keep('keep','_keep_me')
e.add_external('extern')
e.add('EXTERN','${extern}')
return e
def test_A010_simple(self):
e = self.setup1()
r = e.resolve_text('')
self.assertTrue(r.ok)
self.assertEqual(r.result , '' )
def test_A020_simple(self):
e = self.setup1()
r = e.resolve_text('${parent}')
self.assertTrue(r.ok)
self.assertEqual(r.result,'duane')
def test_A030_simple(self):
e = self.setup1()
r=e.resolve_text( '${${${parent}_son}_${what}}' )
assert(r.ok)
self.assertEqual(r.result,'dolly')
def test_B010_ext(self):
def permutation( rr, ee, kk, full ):
e = self.setup1()
if ee:
e.mark_macro_external(ee)
if kk:
e.mark_macro_keep(kk)
r = e.resolve_text('${zack_dog}',full)
self.assertTrue( r.ok )
if( r.result != rr ):
# here so we can set a breakpoint
self.assertEqual( r.result , rr )
permutation( 'dolly', None, None, False )
permutation( 'dolly', None, None, True )
permutation( '${zack_dog}', 'zack_dog', None, False )
permutation( 'dolly' , 'zack_dog', None, True )
permutation( '${zack_dog}', None , 'zack_dog', False )
permutation( 'dolly' , None , 'zack_dog', True )
def test_B020_find_a_keep(self):
e = self.setup1()
e.mark_macro_keep('zack_dog')
r = e.resolve_text('${${${parent}_son}_${what}}')
self.assertTrue(r.ok)
self.assertEqual(r.result,'${zack_dog}')
# test that we can resolve this
r = e.resolve_text('${${${parent}_son}_${what}}',True)
self.assertTrue(r.ok)
self.assertEqual(r.result,'dolly')
def test_B030_expand_an_extern(self):
e = self.setup1()
e.mark_macro_keep('duane_son')
r = e.resolve_text('${${${parent}_son}_${what}}',True)
self.assertTrue(r.ok)
self.assertEqual(r.result, 'dolly')
r = e.resolve_text('${${${parent}_son}_${what}}', False)
self.assertFalse( r.ok)
self.assertIsNone (r.result)
def test_C010_extern(self):
e =self.setup1()
r = e.resolve_text('abc ${EXTERN} xyz',False)
self.assertTrue(r.ok)
self.assertEqual( r.result , 'abc ${extern} xyz' )
def test_C020_transfors(self):
e = shellmacros.MacroEngine()
input=r'//Server\MixedCase'
e.add( 'a', input )
# no change
r = e.resolve_text('${a}')
self.assertTrue(r.ok)
self.assertEqual( r.result, input )
# lower
r = e.resolve_text('${a_lc}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.lower())
# upper
r = e.resolve_text('${a_uc}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.upper())
# DOS
r = e.resolve_text('${a_dos}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.replace('/','\\'))
# Unix
r = e.resolve_text('${a_unix}')
self.assertTrue(r.ok)
self.assertEqual(r.result, input.replace('/', '\\'))
def test_NEG_010_syntax(self):
e = self.setup1()
s = '${noclose'
r = e.resolve_text(s)
self.assertTrue(r.ok)
self.assertEqual( r.result, s)
s = 'noopen}'
r = e.resolve_text(s)
self.assertTrue(r.ok)
self.assertEqual(r.result, s)
e.add('A', '${B}')
e.add('B', '${A}')
r = e.resolve_text('${A}')
self.assertFalse( r.ok )
self.assertIsInstance(r.error,shellmacros.MacroRecursionError)
def order_test_setup(self):
e = shellmacros.MacroEngine()
# goal: ${${abc}} -> ${${a}_{b}_{c}}
# a=a, b=dogs, c=lunch
# ${a_dogs_lunch} => is_not_tasty
e.add('a', 'a')
e.add('b', 'dogs')
e.add('c', 'lunch')
e.add('abc', '${a}_${b}_${c}')
e.add('a_dogs_lunch', 'is_not_tasty')
e.add('foo', '${${abc}}')
return e
def test_E010_depends(self):
e = self.order_test_setup()
r = e.resolve_text( '${foo}', e.RESOLVE_REFERENCES )
r = e.output_order()
correct = ['c', 'b', 'a_dogs_lunch', 'a', 'abc', 'foo']
self.assertEqual( len(r) , len(correct) )
for x in range(0,len(r)):
self.assertEqual( correct[x] , r[x] )
# Done.
def test_E020_make(self):
e = self.order_test_setup()
j = e.json_macros_str()
# this is not easy to test.. so we eyeball it
print('JSON result:\n---------\n%s\n---------' % j)
print("")
def test_E030_bash(self):
e = self.order_test_setup()
b = e.bash_fragment_str()
# this is not easy so we eyeball it
print('BASH result:\n---------\n%s\n---------' % b)
print("")
def test_E040_bash(self):
e = self.order_test_setup()
m = e.make_fragment_str()
# this is not easy so we eyeball it
print('MAKE result:\n---------\n%s\n---------' % m)
print("")
def test_E050_quoted_keeps(self):
e = shellmacros.MacroEngine()
e.add_keep( "CC", "${CROSS_COMPILE}gcc" )
e.add_keep( "CROSS_COMPILE", "arm-none-eabi-")
e.add_external("WORKSPACE_LOC")
m=e.add("SOMEDIR", r"C:\path with\spaces in\path")
m.quoted = True
e.add_makefle_dynamic_vars()
e.add( "cmd", "${CC} -o I${WORKSPACE_LOC}/foo -I${SOMEDIR} -o ${@} ${<}" )
s=e.make_fragment_str()
print("MAKE RESULT\n-----\n%s\n------\n" % s)
s=e.bash_fragment_str()
print("BASH RESULT\n-----\n%s\n------\n" % s )
print("") | 0.476092 | 0.388502 |
__author__ = "<NAME>"
__copyright__ = "OuroborosCoding"
__version__ = "1.0.0"
__email__ = "<EMAIL>"
__created__ = "2018-11-11"
def crop(w, h, bw, bh):
"""Crop
Makes sure one side fits and crops the other
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
dict
"""
# Init the return
dRet = {}
# Easier to work with floats
w = float(w)
h = float(h)
# If the image is already smaller, make it bigger
if w < bw or h < bh:
# Which is the side that needs to grow more?
if (bw / w) > (bh / h):
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
else:
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
# Else, make it smaller
else:
# Which is the side that needs to shrink less?
if (w / bw) > (h / bh):
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
else:
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
# Return the new width and height
return dRet
def fit(w, h, bw, bh):
"""Fit
Makes sure one side fits and makes the other smaller than necessary
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
list [w, h]
"""
# Init the return
dRet = {}
# Easier to work with floats
w = float(w)
h = float(h)
# If the image is already smaller, make it bigger
if w < bw and h < bh:
# Figure out the larger side
if (bw / w) > (bh / h):
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
else:
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
# Else, make it smaller
else:
# Figure out the larger side
if (w / bw) > (h / bh):
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
else:
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
# Return the new width and height
return dRet
def region(w, h, bw, bh):
"""Region
Returns a new set of region points based on a current width and height and
the bounding box
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
dict
"""
# Return
dRet = {}
# If the current width is larger than the bounds width
if w > bw:
dRet['x'] = int(round((w - bw) / 2.0))
dRet['y'] = 0
dRet['w'] = int(bw + round((w - bw) / 2.0))
dRet['h'] = bh
# Else if the current height is larger than the bounds height
else:
dRet['x'] = 0
dRet['y'] = int(round((h - bh) / 2.0))
dRet['w'] = bw
dRet['h'] = int(bh + round((h - bh) / 2.0))
# Return the region
return dRet | RestOC/Resize.py | __author__ = "<NAME>"
__copyright__ = "OuroborosCoding"
__version__ = "1.0.0"
__email__ = "<EMAIL>"
__created__ = "2018-11-11"
def crop(w, h, bw, bh):
"""Crop
Makes sure one side fits and crops the other
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
dict
"""
# Init the return
dRet = {}
# Easier to work with floats
w = float(w)
h = float(h)
# If the image is already smaller, make it bigger
if w < bw or h < bh:
# Which is the side that needs to grow more?
if (bw / w) > (bh / h):
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
else:
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
# Else, make it smaller
else:
# Which is the side that needs to shrink less?
if (w / bw) > (h / bh):
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
else:
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
# Return the new width and height
return dRet
def fit(w, h, bw, bh):
"""Fit
Makes sure one side fits and makes the other smaller than necessary
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
list [w, h]
"""
# Init the return
dRet = {}
# Easier to work with floats
w = float(w)
h = float(h)
# If the image is already smaller, make it bigger
if w < bw and h < bh:
# Figure out the larger side
if (bw / w) > (bh / h):
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
else:
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
# Else, make it smaller
else:
# Figure out the larger side
if (w / bw) > (h / bh):
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
else:
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
# Return the new width and height
return dRet
def region(w, h, bw, bh):
"""Region
Returns a new set of region points based on a current width and height and
the bounding box
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
dict
"""
# Return
dRet = {}
# If the current width is larger than the bounds width
if w > bw:
dRet['x'] = int(round((w - bw) / 2.0))
dRet['y'] = 0
dRet['w'] = int(bw + round((w - bw) / 2.0))
dRet['h'] = bh
# Else if the current height is larger than the bounds height
else:
dRet['x'] = 0
dRet['y'] = int(round((h - bh) / 2.0))
dRet['w'] = bw
dRet['h'] = int(bh + round((h - bh) / 2.0))
# Return the region
return dRet | 0.718199 | 0.180071 |
from interpreter import commands
#----------READER----------
class Reader:
pos = 0
data = ''
def eof(self):
return self.pos >= len(self.data)
def move_cursor(self, offset):
self.pos = self.pos + offset
def load_text(self, text):
self.data = text
self.pos = 0
def can_peek(self, n):
return (self.pos + n) < len(self.data)
def peek_next(self, offset=0):
if self.eof():
return None
else:
return self.data[self.pos+offset]
def read_next(self):
if self.eof():
return None
else:
c = self.data[self.pos]
self.pos += 1
return c
#----------TOKENIZER----------
class SpecialDelimiter:
IF='i'
ELSE='e'
STRING='"'
SEPARATOR=' '
CLOSE_BRACKET='}'
class TokenType:
LOOP = 'LOOP'
IF = 'IF'
ELSE = 'ELSE'
CODE = 'CODE'
FUNCTION = 'FUNC'
STRING = 'STR'
COMPRESSED_STRING = 'CSTR'
CLOSE_BRACKET = 'CLOSE'
NUMBER = 'NUM'
UNKNOWN = 'UNKN'
SINGLE_INSTRUCTION = 'INST1'
DOUBLE_INSTRUCTION = 'INST2'
WHISTESPACE = 'WSPACE'
class Token:
type = None
data = ''
def __init__(self, type, data):
self.type = type
self.data = data
def __str__(self):
return '({} : {})'.format(self.type, self.data)
def __repr__(self):
return '({} : {})'.format(self.type, self.data)
class Tokenizer:
def __init__(self):
self.reader = Reader()
def tokenize_string(self):
self.reader.read_next()
s = ''
c = ''
while c != SpecialDelimiter.STRING:
s += c
c = self.reader.read_next()
return Token(TokenType.STRING, s)
def tokenize_command(self):
c1 = self.reader.peek_next(0)
c2 = ''
if self.reader.can_peek(1):
c2 = self.reader.peek_next(0) + self.reader.peek_next(1)
if c2 in commands.keys():
self.reader.read_next()
self.reader.read_next()
return Token(TokenType.DOUBLE_INSTRUCTION, c2)
elif c1 in commands.keys():
self.reader.read_next()
return Token(TokenType.SINGLE_INSTRUCTION, c1)
else:
Token(TokenType.UNKNOWN, self.next_char())
def tokenize_number(self):
s = ''
c = ''
has_dot = False
while True:
if not self.reader.can_peek(0):
break
c = self.reader.read_next()
if c not in '0123456789.' or (c=='.' and has_dot):
self.reader.move_cursor(-1)
break
if c == '.':
has_dot = True
s += c
return Token(TokenType.NUMBER, s)
def load_text(self, text):
self.reader.load_text(text)
def next_char(self):
return self.reader.read_next()
def read_all(self):
tokens = []
while not self.reader.eof():
tokens.append(self.read_next())
return tokens
def read_next(self):
c = self.reader.peek_next()
if c == SpecialDelimiter.IF:
return Token(TokenType.IF, self.next_char())
elif c == SpecialDelimiter.STRING:
return self.tokenize_string()
elif c == SpecialDelimiter.CLOSE_BRACKET:
return Token(TokenType.CLOSE_BRACKET, self.next_char())
elif c == SpecialDelimiter.ELSE:
return Token(TokenType.ELSE, self.next_char())
elif c in '0123456789':
return self.tokenize_number()
elif c == SpecialDelimiter.SEPARATOR:
while self.reader.peek_next() == ' ':
self.next_char()
return Token(TokenType.WHISTESPACE, '')
else:
return self.tokenize_command()
#----------AST----------
class Node:
children = []
def execute(self):
for c in self.children:
c.execute()
class ConditionalNode(Node):
nif = Node()
nelse = Node()
class CommandNode(Node):
value = ''
class NumberNode(Node):
value = 0
class TextNode(Node):
value = ''
#----------AST Interpreter----------
class ASTInterpreter:
interpreter = None
root = None
def run_node(self,node,inter):
if node is TextNode or node is NumberNode:
inter.push(node.value)
elif node is CommandNode:
inter.execute(node.value)
elif node is ConditionalNode:
if inter.pop_truthy():
self.run_node(node.nif, inter)
else:
self.run_node(node.nelse, inter)
else:
for n in node.children:
self.run_node(n, inter) | src/interpreters/ysabel/python/ysabel_parser.py | from interpreter import commands
#----------READER----------
class Reader:
pos = 0
data = ''
def eof(self):
return self.pos >= len(self.data)
def move_cursor(self, offset):
self.pos = self.pos + offset
def load_text(self, text):
self.data = text
self.pos = 0
def can_peek(self, n):
return (self.pos + n) < len(self.data)
def peek_next(self, offset=0):
if self.eof():
return None
else:
return self.data[self.pos+offset]
def read_next(self):
if self.eof():
return None
else:
c = self.data[self.pos]
self.pos += 1
return c
#----------TOKENIZER----------
class SpecialDelimiter:
IF='i'
ELSE='e'
STRING='"'
SEPARATOR=' '
CLOSE_BRACKET='}'
class TokenType:
LOOP = 'LOOP'
IF = 'IF'
ELSE = 'ELSE'
CODE = 'CODE'
FUNCTION = 'FUNC'
STRING = 'STR'
COMPRESSED_STRING = 'CSTR'
CLOSE_BRACKET = 'CLOSE'
NUMBER = 'NUM'
UNKNOWN = 'UNKN'
SINGLE_INSTRUCTION = 'INST1'
DOUBLE_INSTRUCTION = 'INST2'
WHISTESPACE = 'WSPACE'
class Token:
type = None
data = ''
def __init__(self, type, data):
self.type = type
self.data = data
def __str__(self):
return '({} : {})'.format(self.type, self.data)
def __repr__(self):
return '({} : {})'.format(self.type, self.data)
class Tokenizer:
def __init__(self):
self.reader = Reader()
def tokenize_string(self):
self.reader.read_next()
s = ''
c = ''
while c != SpecialDelimiter.STRING:
s += c
c = self.reader.read_next()
return Token(TokenType.STRING, s)
def tokenize_command(self):
c1 = self.reader.peek_next(0)
c2 = ''
if self.reader.can_peek(1):
c2 = self.reader.peek_next(0) + self.reader.peek_next(1)
if c2 in commands.keys():
self.reader.read_next()
self.reader.read_next()
return Token(TokenType.DOUBLE_INSTRUCTION, c2)
elif c1 in commands.keys():
self.reader.read_next()
return Token(TokenType.SINGLE_INSTRUCTION, c1)
else:
Token(TokenType.UNKNOWN, self.next_char())
def tokenize_number(self):
s = ''
c = ''
has_dot = False
while True:
if not self.reader.can_peek(0):
break
c = self.reader.read_next()
if c not in '0123456789.' or (c=='.' and has_dot):
self.reader.move_cursor(-1)
break
if c == '.':
has_dot = True
s += c
return Token(TokenType.NUMBER, s)
def load_text(self, text):
self.reader.load_text(text)
def next_char(self):
return self.reader.read_next()
def read_all(self):
tokens = []
while not self.reader.eof():
tokens.append(self.read_next())
return tokens
def read_next(self):
c = self.reader.peek_next()
if c == SpecialDelimiter.IF:
return Token(TokenType.IF, self.next_char())
elif c == SpecialDelimiter.STRING:
return self.tokenize_string()
elif c == SpecialDelimiter.CLOSE_BRACKET:
return Token(TokenType.CLOSE_BRACKET, self.next_char())
elif c == SpecialDelimiter.ELSE:
return Token(TokenType.ELSE, self.next_char())
elif c in '0123456789':
return self.tokenize_number()
elif c == SpecialDelimiter.SEPARATOR:
while self.reader.peek_next() == ' ':
self.next_char()
return Token(TokenType.WHISTESPACE, '')
else:
return self.tokenize_command()
#----------AST----------
class Node:
children = []
def execute(self):
for c in self.children:
c.execute()
class ConditionalNode(Node):
nif = Node()
nelse = Node()
class CommandNode(Node):
value = ''
class NumberNode(Node):
value = 0
class TextNode(Node):
value = ''
#----------AST Interpreter----------
class ASTInterpreter:
interpreter = None
root = None
def run_node(self,node,inter):
if node is TextNode or node is NumberNode:
inter.push(node.value)
elif node is CommandNode:
inter.execute(node.value)
elif node is ConditionalNode:
if inter.pop_truthy():
self.run_node(node.nif, inter)
else:
self.run_node(node.nelse, inter)
else:
for n in node.children:
self.run_node(n, inter) | 0.417509 | 0.290427 |
import float32_convertors as f32cnv
import unittest
def plot_call_back(string):
pass
class TestFloatConversion(unittest.TestCase):
def setUp(self):
pass
def testInv(self):
""" """
# проверим обр. преобр
doubleOne = 1.0
doubleOneFromMCHIP = f32cnv.hex_mchip_f32_to_hex_ieee_f32("7F 00 00 00")
self.assertEqual(doubleOneFromMCHIP, doubleOne)
# проверим обр. преобр
doubleOne = 0.5
doubleOneFromMCHIP = f32cnv.hex_mchip_f32_to_hex_ieee_f32("7E 00 00 00")
self.assertEqual(doubleOneFromMCHIP, doubleOne)
def testInvIEEEOneValue(self):
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("3F 80 00 00"), 1.0)
def testInvMCHIPOneValue(self):
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32("7F 00 00 00"), 1.0)
def testInvIEEETwoValue(self):
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("40 00 00 00"), 2.0)
def testInvMCHIPTwoValue(self):
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32("80 00 00 00"), 2.0)
def test_ieee_one_value(self):
double_one = 1.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(double_one, plot_call_back)
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float(a), double_one)
def testIEEETwoValue(self):
doubleOne = 2.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float(a), doubleOne)
def testIEEEHalfValue(self):
doubleOne = 0.5
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float(a), doubleOne)
def testOneValue(self):
''' преобразование 1 и 2
ошибка 1 = 0.5
'''
doubleOne = 1.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32(doubleOneDirectCnvMCHIP), doubleOne)
def testTwoValue(self):
''' '''
doubleOne = 2.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32(doubleOneDirectCnvMCHIP), doubleOne)
def testHalfValue(self):
''' '''
doubleOne = 0.5
# проверка преобразования
message, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32(doubleOneDirectCnvMCHIP), doubleOne)
def testSimple(self):
''' Просто тест на работоспособность '''
# IEEE
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("43 1B A0 00"), 155.625)
# MCHIP
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("43 1B A0 00"), 155.625)
def testZero(self):
doubleOne = 0.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, None)
self.assertEqual(a[:-1], '00 00 00 00')
# Run tests
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestFloatConversion)
unittest.TextTestRunner(verbosity=2).run(suite) | matlab_ext/measurement/mc-assistant/source/py/convertors_simple_data_types/test_float32_convertors.py | import float32_convertors as f32cnv
import unittest
def plot_call_back(string):
pass
class TestFloatConversion(unittest.TestCase):
def setUp(self):
pass
def testInv(self):
""" """
# проверим обр. преобр
doubleOne = 1.0
doubleOneFromMCHIP = f32cnv.hex_mchip_f32_to_hex_ieee_f32("7F 00 00 00")
self.assertEqual(doubleOneFromMCHIP, doubleOne)
# проверим обр. преобр
doubleOne = 0.5
doubleOneFromMCHIP = f32cnv.hex_mchip_f32_to_hex_ieee_f32("7E 00 00 00")
self.assertEqual(doubleOneFromMCHIP, doubleOne)
def testInvIEEEOneValue(self):
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("3F 80 00 00"), 1.0)
def testInvMCHIPOneValue(self):
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32("7F 00 00 00"), 1.0)
def testInvIEEETwoValue(self):
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("40 00 00 00"), 2.0)
def testInvMCHIPTwoValue(self):
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32("80 00 00 00"), 2.0)
def test_ieee_one_value(self):
double_one = 1.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(double_one, plot_call_back)
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float(a), double_one)
def testIEEETwoValue(self):
doubleOne = 2.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float(a), doubleOne)
def testIEEEHalfValue(self):
doubleOne = 0.5
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float(a), doubleOne)
def testOneValue(self):
''' преобразование 1 и 2
ошибка 1 = 0.5
'''
doubleOne = 1.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32(doubleOneDirectCnvMCHIP), doubleOne)
def testTwoValue(self):
''' '''
doubleOne = 2.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32(doubleOneDirectCnvMCHIP), doubleOne)
def testHalfValue(self):
''' '''
doubleOne = 0.5
# проверка преобразования
message, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, plot_call_back)
self.assertEqual(f32cnv.hex_mchip_f32_to_hex_ieee_f32(doubleOneDirectCnvMCHIP), doubleOne)
def testSimple(self):
''' Просто тест на работоспособность '''
# IEEE
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("43 1B A0 00"), 155.625)
# MCHIP
self.assertEqual(f32cnv.hex_ieee_f32_str_to_float("43 1B A0 00"), 155.625)
def testZero(self):
doubleOne = 0.0
# проверка преобразования
m, a, doubleOneDirectCnvMCHIP = f32cnv.pack_f32_into_i32(doubleOne, None)
self.assertEqual(a[:-1], '00 00 00 00')
# Run tests
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestFloatConversion)
unittest.TextTestRunner(verbosity=2).run(suite) | 0.495117 | 0.338268 |
from idds.core import collections
def add_collection(scope, name, coll_type=None, request_id=None, transform_id=None,
relation_type=None, coll_size=0, status=None, total_files=0, retries=0,
expired_at=None, coll_metadata=None):
"""
Add a collection.
:param scope: The scope of the request data.
:param name: The name of the request data.
:param type: The type of dataset as dataset or container.
:param request_id: The request id related to this collection.
:param transform_id: The transform id related to this collection.
:param relation_type: The relation between this collection and its transform,
such as Input, Output, Log and so on.
:param size: The size of the collection.
:param status: The status.
:param total_files: Number of total files.
:param retries: Number of retries.
:param expired_at: The datetime when it expires.
:param coll_metadata: The metadata as json.
:returns: collection id.
"""
kwargs = {'scope': scope, 'name': name, 'coll_type': coll_type, 'request_id': request_id,
'transform_id': transform_id, 'relation_type': relation_type, 'coll_size': coll_size,
'status': status, 'total_files': total_files, 'retries': retries,
'expired_at': expired_at, 'coll_metadata': coll_metadata}
return collections.add_collection(**kwargs)
def get_collection(coll_id=None, transform_id=None, relation_type=None):
"""
Get a collection or raise a NoObject exception.
:param coll_id: The id of the collection.
:param transform_id: The transform id related to this collection.
:param relation_type: The relation between this collection and its transform,
such as Input, Output, Log and so on.
:returns: Collection.
"""
return collections.get_collection(coll_id=coll_id, transform_id=transform_id, relation_type=relation_type)
def update_collection(coll_id, parameters):
"""
update a collection.
:param coll_id: the collection id.
:param parameters: A dictionary of parameters.
"""
return collections.update_collection(coll_id=coll_id, parameters=parameters)
def delete_collection(coll_id=None):
"""
delete a collection.
:param request_id: The id of the request.
"""
return collections.delete_collection(coll_id=coll_id) | main/lib/idds/api/collections.py | from idds.core import collections
def add_collection(scope, name, coll_type=None, request_id=None, transform_id=None,
relation_type=None, coll_size=0, status=None, total_files=0, retries=0,
expired_at=None, coll_metadata=None):
"""
Add a collection.
:param scope: The scope of the request data.
:param name: The name of the request data.
:param type: The type of dataset as dataset or container.
:param request_id: The request id related to this collection.
:param transform_id: The transform id related to this collection.
:param relation_type: The relation between this collection and its transform,
such as Input, Output, Log and so on.
:param size: The size of the collection.
:param status: The status.
:param total_files: Number of total files.
:param retries: Number of retries.
:param expired_at: The datetime when it expires.
:param coll_metadata: The metadata as json.
:returns: collection id.
"""
kwargs = {'scope': scope, 'name': name, 'coll_type': coll_type, 'request_id': request_id,
'transform_id': transform_id, 'relation_type': relation_type, 'coll_size': coll_size,
'status': status, 'total_files': total_files, 'retries': retries,
'expired_at': expired_at, 'coll_metadata': coll_metadata}
return collections.add_collection(**kwargs)
def get_collection(coll_id=None, transform_id=None, relation_type=None):
"""
Get a collection or raise a NoObject exception.
:param coll_id: The id of the collection.
:param transform_id: The transform id related to this collection.
:param relation_type: The relation between this collection and its transform,
such as Input, Output, Log and so on.
:returns: Collection.
"""
return collections.get_collection(coll_id=coll_id, transform_id=transform_id, relation_type=relation_type)
def update_collection(coll_id, parameters):
"""
update a collection.
:param coll_id: the collection id.
:param parameters: A dictionary of parameters.
"""
return collections.update_collection(coll_id=coll_id, parameters=parameters)
def delete_collection(coll_id=None):
"""
delete a collection.
:param request_id: The id of the request.
"""
return collections.delete_collection(coll_id=coll_id) | 0.795975 | 0.265279 |
import random
import threading
import time
from datetime import datetime
import json
import os
from bson.json_util import dumps
from common.json_encoder import JSONFriendlyEncoder
from common.logger import get_logger
from common.timer import RepeatedTimer
from helpers.file_helper import FileHelper
from helpers.s3_helper import S3Helper
logger = get_logger(__name__)
class DocumentBatcher:
def __init__(self, cluster_name, namespace, database_name, collection_name, dynamo_helper):
self.__cluster_name = cluster_name
self.__namespace = namespace
self.__database_name = database_name
self.__collection_name = collection_name
self.__current_change = None
self.__previous_change = None
self.__resume_token = None
self.__batch_id = 0
self.__batch = []
self.__timer = None
self.__event = threading.Event()
self.__fh = FileHelper()
self.__dh = dynamo_helper
def initialize(self, token):
if token is not None:
logger.info("Initializing the document batcher with token: %s", json.dumps(token, cls=JSONFriendlyEncoder))
self.__batch_id = token["batch_id"] + 1 # use the next batch id
self.__previous_change = json.loads(token["validation_document"])
self.__resume_token = json.loads(token["resume_token"])
self.__timer = RepeatedTimer(10, self.__on_time_elapsed)
self.__timer.start()
self.__event.set()
def on_change_event(self, cluster_name, database_name, collection_name, change):
# full_document = change["fullDocument"]
# TODO: What are you doing with the clustrer_name and other input parameters
self.__event.wait()
self.__previous_change = self.__current_change
self.__current_change = change
self.__batch.append(change)
def __on_time_elapsed(self):
self.__event.clear()
# TODO: control passed wait in on_change_event, but not appended yet.
# poor man's hack to handle above scenario. sleep for upto 0.1 second
time.sleep(random.uniform(0.01, 0.1))
# TODO: Allow saving empty batch even to help track the heartbeats
s3_key_name = "null"
if len(self.__batch) > 0:
s3_key_name = "{}/{}/{}/{}-batch-{:06.0f}.json".format(
self.__cluster_name, self.__database_name,
self.__collection_name, self.__namespace, self.__batch_id)
self.__write_to_s3(s3_key_name)
self.__update_dynamodb(s3_key_name)
self.__batch_id = self.__batch_id + 1
self.__batch[:] = []
self.__event.set()
def __write_to_s3(self, s3_key_name):
# TODO: handle any failures
file_path = self.__create_local_batch_file()
self.__upload_to_s3(file_path, s3_key_name)
self.__fh.delete_file(file_path)
def __update_dynamodb(self, s3_key_name):
# TODO: handle any failures
# TODO: do it in transactions
# update watchers with namespace and current batch id, last token etc
# insert change_events with namespace
timestamp = datetime.utcnow().isoformat()
watcher_item = self.__get_watcher_item(timestamp)
change_event_item = self.__get_change_event_item(s3_key_name, timestamp)
self.__dh.save_watcher(watcher_item)
self.__dh.save_change_event(change_event_item)
def __get_watcher_item(self, timestamp):
token = None
if self.__previous_change is not None:
token = self.__previous_change["_id"]
else:
token = self.__resume_token
item = {
"watcher_id": "{}::{}".format(self.__cluster_name, self.__namespace),
"cluster_name": self.__cluster_name,
"namespace": self.__namespace,
"resume_token": dumps(token),
"validation_document": dumps(self.__current_change),
"batch_id": self.__batch_id,
"document_count": len(self.__batch),
"created_timestamp": timestamp}
return item
def __get_change_event_item(self, s3_link, timestamp):
token = None
if self.__previous_change is not None:
# TODO: possibly ["_id"] even on resume token
token = self.__previous_change["_id"]
else:
token = self.__resume_token
item = {
"watcher_id": "{}::{}".format(self.__cluster_name, self.__namespace),
"batch_status": "{}::{:06.0f}".format("false", self.__batch_id),
"cluster_name": self.__cluster_name,
"namespace": self.__namespace,
"batch_id": self.__batch_id,
"s3_link": s3_link,
"created_timestamp": timestamp,
"document_count": len(self.__batch),
"is_processed": False,
"resume_token": dumps(token),
"processed_timestamp": "9999-12-31T00:00:00.000000"}
return item
def __create_local_batch_file(self):
lines = []
for item in self.__batch:
lines.append("{}\n".format(dumps(item["fullDocument"])))
temp_file = self.__fh.create_file()
with open(temp_file.name, 'w') as stream:
stream.writelines(lines)
return temp_file.name
def __upload_to_s3(self, file_path, key_name):
s3h = S3Helper()
bucket_name = os.environ['S3_CHANGE_FEED_BUCKET_NAME']
s3h.upload(file_path, bucket_name, key_name)
def close(self):
logger.info("Cleaning up the Document Batcher for namespace: %s", self.__namespace)
if self.__timer is not None:
self.__timer.stop()
# wait until writing to s3/dynamo is done
self.__event.wait() | cosmos-db-migration-utility/src/migrator-app/helpers/document_batcher.py | import random
import threading
import time
from datetime import datetime
import json
import os
from bson.json_util import dumps
from common.json_encoder import JSONFriendlyEncoder
from common.logger import get_logger
from common.timer import RepeatedTimer
from helpers.file_helper import FileHelper
from helpers.s3_helper import S3Helper
logger = get_logger(__name__)
class DocumentBatcher:
def __init__(self, cluster_name, namespace, database_name, collection_name, dynamo_helper):
self.__cluster_name = cluster_name
self.__namespace = namespace
self.__database_name = database_name
self.__collection_name = collection_name
self.__current_change = None
self.__previous_change = None
self.__resume_token = None
self.__batch_id = 0
self.__batch = []
self.__timer = None
self.__event = threading.Event()
self.__fh = FileHelper()
self.__dh = dynamo_helper
def initialize(self, token):
if token is not None:
logger.info("Initializing the document batcher with token: %s", json.dumps(token, cls=JSONFriendlyEncoder))
self.__batch_id = token["batch_id"] + 1 # use the next batch id
self.__previous_change = json.loads(token["validation_document"])
self.__resume_token = json.loads(token["resume_token"])
self.__timer = RepeatedTimer(10, self.__on_time_elapsed)
self.__timer.start()
self.__event.set()
def on_change_event(self, cluster_name, database_name, collection_name, change):
# full_document = change["fullDocument"]
# TODO: What are you doing with the clustrer_name and other input parameters
self.__event.wait()
self.__previous_change = self.__current_change
self.__current_change = change
self.__batch.append(change)
def __on_time_elapsed(self):
self.__event.clear()
# TODO: control passed wait in on_change_event, but not appended yet.
# poor man's hack to handle above scenario. sleep for upto 0.1 second
time.sleep(random.uniform(0.01, 0.1))
# TODO: Allow saving empty batch even to help track the heartbeats
s3_key_name = "null"
if len(self.__batch) > 0:
s3_key_name = "{}/{}/{}/{}-batch-{:06.0f}.json".format(
self.__cluster_name, self.__database_name,
self.__collection_name, self.__namespace, self.__batch_id)
self.__write_to_s3(s3_key_name)
self.__update_dynamodb(s3_key_name)
self.__batch_id = self.__batch_id + 1
self.__batch[:] = []
self.__event.set()
def __write_to_s3(self, s3_key_name):
# TODO: handle any failures
file_path = self.__create_local_batch_file()
self.__upload_to_s3(file_path, s3_key_name)
self.__fh.delete_file(file_path)
def __update_dynamodb(self, s3_key_name):
# TODO: handle any failures
# TODO: do it in transactions
# update watchers with namespace and current batch id, last token etc
# insert change_events with namespace
timestamp = datetime.utcnow().isoformat()
watcher_item = self.__get_watcher_item(timestamp)
change_event_item = self.__get_change_event_item(s3_key_name, timestamp)
self.__dh.save_watcher(watcher_item)
self.__dh.save_change_event(change_event_item)
def __get_watcher_item(self, timestamp):
token = None
if self.__previous_change is not None:
token = self.__previous_change["_id"]
else:
token = self.__resume_token
item = {
"watcher_id": "{}::{}".format(self.__cluster_name, self.__namespace),
"cluster_name": self.__cluster_name,
"namespace": self.__namespace,
"resume_token": dumps(token),
"validation_document": dumps(self.__current_change),
"batch_id": self.__batch_id,
"document_count": len(self.__batch),
"created_timestamp": timestamp}
return item
def __get_change_event_item(self, s3_link, timestamp):
token = None
if self.__previous_change is not None:
# TODO: possibly ["_id"] even on resume token
token = self.__previous_change["_id"]
else:
token = self.__resume_token
item = {
"watcher_id": "{}::{}".format(self.__cluster_name, self.__namespace),
"batch_status": "{}::{:06.0f}".format("false", self.__batch_id),
"cluster_name": self.__cluster_name,
"namespace": self.__namespace,
"batch_id": self.__batch_id,
"s3_link": s3_link,
"created_timestamp": timestamp,
"document_count": len(self.__batch),
"is_processed": False,
"resume_token": dumps(token),
"processed_timestamp": "9999-12-31T00:00:00.000000"}
return item
def __create_local_batch_file(self):
lines = []
for item in self.__batch:
lines.append("{}\n".format(dumps(item["fullDocument"])))
temp_file = self.__fh.create_file()
with open(temp_file.name, 'w') as stream:
stream.writelines(lines)
return temp_file.name
def __upload_to_s3(self, file_path, key_name):
s3h = S3Helper()
bucket_name = os.environ['S3_CHANGE_FEED_BUCKET_NAME']
s3h.upload(file_path, bucket_name, key_name)
def close(self):
logger.info("Cleaning up the Document Batcher for namespace: %s", self.__namespace)
if self.__timer is not None:
self.__timer.stop()
# wait until writing to s3/dynamo is done
self.__event.wait() | 0.191517 | 0.085099 |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from ppdet.core.workspace import register, serializable
from ..backbones.darknet import ConvBNLayer
import numpy as np
from ..shape_spec import ShapeSpec
__all__ = ['YOLOv3FPN', 'PPYOLOFPN']
class YoloDetBlock(nn.Layer):
def __init__(self, ch_in, channel, norm_type, name):
super(YoloDetBlock, self).__init__()
self.ch_in = ch_in
self.channel = channel
assert channel % 2 == 0, \
"channel {} cannot be divided by 2".format(channel)
conv_def = [
['conv0', ch_in, channel, 1, '.0.0'],
['conv1', channel, channel * 2, 3, '.0.1'],
['conv2', channel * 2, channel, 1, '.1.0'],
['conv3', channel, channel * 2, 3, '.1.1'],
['route', channel * 2, channel, 1, '.2'],
]
self.conv_module = nn.Sequential()
for idx, (conv_name, ch_in, ch_out, filter_size,
post_name) in enumerate(conv_def):
self.conv_module.add_sublayer(
conv_name,
ConvBNLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=filter_size,
padding=(filter_size - 1) // 2,
norm_type=norm_type,
name=name + post_name))
self.tip = ConvBNLayer(
ch_in=channel,
ch_out=channel * 2,
filter_size=3,
padding=1,
norm_type=norm_type,
name=name + '.tip')
def forward(self, inputs):
route = self.conv_module(inputs)
tip = self.tip(route)
return route, tip
class SPP(nn.Layer):
def __init__(self, ch_in, ch_out, k, pool_size, norm_type, name):
super(SPP, self).__init__()
self.pool = []
for size in pool_size:
pool = self.add_sublayer(
'{}.pool1'.format(name),
nn.MaxPool2D(
kernel_size=size,
stride=1,
padding=size // 2,
ceil_mode=False))
self.pool.append(pool)
self.conv = ConvBNLayer(
ch_in, ch_out, k, padding=k // 2, norm_type=norm_type, name=name)
def forward(self, x):
outs = [x]
for pool in self.pool:
outs.append(pool(x))
y = paddle.concat(outs, axis=1)
y = self.conv(y)
return y
class DropBlock(nn.Layer):
def __init__(self, block_size, keep_prob, name):
super(DropBlock, self).__init__()
self.block_size = block_size
self.keep_prob = keep_prob
self.name = name
def forward(self, x):
if not self.training or self.keep_prob == 1:
return x
else:
gamma = (1. - self.keep_prob) / (self.block_size**2)
for s in x.shape[2:]:
gamma *= s / (s - self.block_size + 1)
matrix = paddle.cast(paddle.rand(x.shape, x.dtype) < gamma, x.dtype)
mask_inv = F.max_pool2d(
matrix, self.block_size, stride=1, padding=self.block_size // 2)
mask = 1. - mask_inv
y = x * mask * (mask.numel() / mask.sum())
return y
class CoordConv(nn.Layer):
def __init__(self, ch_in, ch_out, filter_size, padding, norm_type, name):
super(CoordConv, self).__init__()
self.conv = ConvBNLayer(
ch_in + 2,
ch_out,
filter_size=filter_size,
padding=padding,
norm_type=norm_type,
name=name)
def forward(self, x):
b = x.shape[0]
h = x.shape[2]
w = x.shape[3]
gx = paddle.arange(w, dtype='float32') / (w - 1.) * 2.0 - 1.
gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])
gx.stop_gradient = True
gy = paddle.arange(h, dtype='float32') / (h - 1.) * 2.0 - 1.
gy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])
gy.stop_gradient = True
y = paddle.concat([x, gx, gy], axis=1)
y = self.conv(y)
return y
class PPYOLODetBlock(nn.Layer):
def __init__(self, cfg, name):
super(PPYOLODetBlock, self).__init__()
self.conv_module = nn.Sequential()
for idx, (conv_name, layer, args, kwargs) in enumerate(cfg[:-1]):
kwargs.update(name='{}.{}'.format(name, conv_name))
self.conv_module.add_sublayer(conv_name, layer(*args, **kwargs))
conv_name, layer, args, kwargs = cfg[-1]
kwargs.update(name='{}.{}'.format(name, conv_name))
self.tip = layer(*args, **kwargs)
def forward(self, inputs):
route = self.conv_module(inputs)
tip = self.tip(route)
return route, tip
@register
@serializable
class YOLOv3FPN(nn.Layer):
__shared__ = ['norm_type']
def __init__(self, in_channels=[256, 512, 1024], norm_type='bn'):
super(YOLOv3FPN, self).__init__()
assert len(in_channels) > 0, "in_channels length should > 0"
self.in_channels = in_channels
self.num_blocks = len(in_channels)
self._out_channels = []
self.yolo_blocks = []
self.routes = []
for i in range(self.num_blocks):
name = 'yolo_block.{}'.format(i)
in_channel = in_channels[-i - 1]
if i > 0:
in_channel += 512 // (2**i)
yolo_block = self.add_sublayer(
name,
YoloDetBlock(
in_channel,
channel=512 // (2**i),
norm_type=norm_type,
name=name))
self.yolo_blocks.append(yolo_block)
# tip layer output channel doubled
self._out_channels.append(1024 // (2**i))
if i < self.num_blocks - 1:
name = 'yolo_transition.{}'.format(i)
route = self.add_sublayer(
name,
ConvBNLayer(
ch_in=512 // (2**i),
ch_out=256 // (2**i),
filter_size=1,
stride=1,
padding=0,
norm_type=norm_type,
name=name))
self.routes.append(route)
def forward(self, blocks):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
yolo_feats = []
for i, block in enumerate(blocks):
if i > 0:
block = paddle.concat([route, block], axis=1)
route, tip = self.yolo_blocks[i](block)
yolo_feats.append(tip)
if i < self.num_blocks - 1:
route = self.routes[i](route)
route = F.interpolate(route, scale_factor=2.)
return yolo_feats
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels]
@register
@serializable
class PPYOLOFPN(nn.Layer):
__shared__ = ['norm_type']
def __init__(self, in_channels=[512, 1024, 2048], norm_type='bn', **kwargs):
super(PPYOLOFPN, self).__init__()
assert len(in_channels) > 0, "in_channels length should > 0"
self.in_channels = in_channels
self.num_blocks = len(in_channels)
# parse kwargs
self.coord_conv = kwargs.get('coord_conv', False)
self.drop_block = kwargs.get('drop_block', False)
if self.drop_block:
self.block_size = kwargs.get('block_size', 3)
self.keep_prob = kwargs.get('keep_prob', 0.9)
self.spp = kwargs.get('spp', False)
self.conv_block_num = kwargs.get('conv_block_num', 2)
if self.coord_conv:
ConvLayer = CoordConv
else:
ConvLayer = ConvBNLayer
if self.drop_block:
dropblock_cfg = [[
'dropblock', DropBlock, [self.block_size, self.keep_prob],
dict()
]]
else:
dropblock_cfg = []
self._out_channels = []
self.yolo_blocks = []
self.routes = []
for i, ch_in in enumerate(self.in_channels[::-1]):
if i > 0:
ch_in += 512 // (2**i)
channel = 64 * (2**self.num_blocks) // (2**i)
base_cfg = []
c_in, c_out = ch_in, channel
for j in range(self.conv_block_num):
base_cfg += [
[
'conv{}'.format(2 * j), ConvLayer, [c_in, c_out, 1],
dict(
padding=0, norm_type=norm_type)
],
[
'conv{}'.format(2 * j + 1), ConvBNLayer,
[c_out, c_out * 2, 3], dict(
padding=1, norm_type=norm_type)
],
]
c_in, c_out = c_out * 2, c_out
base_cfg += [[
'route', ConvLayer, [c_in, c_out, 1], dict(
padding=0, norm_type=norm_type)
], [
'tip', ConvLayer, [c_out, c_out * 2, 3], dict(
padding=1, norm_type=norm_type)
]]
if self.conv_block_num == 2:
if i == 0:
if self.spp:
spp_cfg = [[
'spp', SPP, [channel * 4, channel, 1], dict(
pool_size=[5, 9, 13], norm_type=norm_type)
]]
else:
spp_cfg = []
cfg = base_cfg[0:3] + spp_cfg + base_cfg[
3:4] + dropblock_cfg + base_cfg[4:6]
else:
cfg = base_cfg[0:2] + dropblock_cfg + base_cfg[2:6]
elif self.conv_block_num == 0:
if self.spp and i == 0:
spp_cfg = [[
'spp', SPP, [c_in * 4, c_in, 1], dict(
pool_size=[5, 9, 13], norm_type=norm_type)
]]
else:
spp_cfg = []
cfg = spp_cfg + dropblock_cfg + base_cfg
name = 'yolo_block.{}'.format(i)
yolo_block = self.add_sublayer(name, PPYOLODetBlock(cfg, name))
self.yolo_blocks.append(yolo_block)
self._out_channels.append(channel * 2)
if i < self.num_blocks - 1:
name = 'yolo_transition.{}'.format(i)
route = self.add_sublayer(
name,
ConvBNLayer(
ch_in=channel,
ch_out=256 // (2**i),
filter_size=1,
stride=1,
padding=0,
norm_type=norm_type,
name=name))
self.routes.append(route)
def forward(self, blocks):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
yolo_feats = []
for i, block in enumerate(blocks):
if i > 0:
block = paddle.concat([route, block], axis=1)
route, tip = self.yolo_blocks[i](block)
yolo_feats.append(tip)
if i < self.num_blocks - 1:
route = self.routes[i](route)
route = F.interpolate(route, scale_factor=2.)
return yolo_feats
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels] | dygraph/ppdet/modeling/necks/yolo_fpn.py |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from ppdet.core.workspace import register, serializable
from ..backbones.darknet import ConvBNLayer
import numpy as np
from ..shape_spec import ShapeSpec
__all__ = ['YOLOv3FPN', 'PPYOLOFPN']
class YoloDetBlock(nn.Layer):
def __init__(self, ch_in, channel, norm_type, name):
super(YoloDetBlock, self).__init__()
self.ch_in = ch_in
self.channel = channel
assert channel % 2 == 0, \
"channel {} cannot be divided by 2".format(channel)
conv_def = [
['conv0', ch_in, channel, 1, '.0.0'],
['conv1', channel, channel * 2, 3, '.0.1'],
['conv2', channel * 2, channel, 1, '.1.0'],
['conv3', channel, channel * 2, 3, '.1.1'],
['route', channel * 2, channel, 1, '.2'],
]
self.conv_module = nn.Sequential()
for idx, (conv_name, ch_in, ch_out, filter_size,
post_name) in enumerate(conv_def):
self.conv_module.add_sublayer(
conv_name,
ConvBNLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=filter_size,
padding=(filter_size - 1) // 2,
norm_type=norm_type,
name=name + post_name))
self.tip = ConvBNLayer(
ch_in=channel,
ch_out=channel * 2,
filter_size=3,
padding=1,
norm_type=norm_type,
name=name + '.tip')
def forward(self, inputs):
route = self.conv_module(inputs)
tip = self.tip(route)
return route, tip
class SPP(nn.Layer):
def __init__(self, ch_in, ch_out, k, pool_size, norm_type, name):
super(SPP, self).__init__()
self.pool = []
for size in pool_size:
pool = self.add_sublayer(
'{}.pool1'.format(name),
nn.MaxPool2D(
kernel_size=size,
stride=1,
padding=size // 2,
ceil_mode=False))
self.pool.append(pool)
self.conv = ConvBNLayer(
ch_in, ch_out, k, padding=k // 2, norm_type=norm_type, name=name)
def forward(self, x):
outs = [x]
for pool in self.pool:
outs.append(pool(x))
y = paddle.concat(outs, axis=1)
y = self.conv(y)
return y
class DropBlock(nn.Layer):
def __init__(self, block_size, keep_prob, name):
super(DropBlock, self).__init__()
self.block_size = block_size
self.keep_prob = keep_prob
self.name = name
def forward(self, x):
if not self.training or self.keep_prob == 1:
return x
else:
gamma = (1. - self.keep_prob) / (self.block_size**2)
for s in x.shape[2:]:
gamma *= s / (s - self.block_size + 1)
matrix = paddle.cast(paddle.rand(x.shape, x.dtype) < gamma, x.dtype)
mask_inv = F.max_pool2d(
matrix, self.block_size, stride=1, padding=self.block_size // 2)
mask = 1. - mask_inv
y = x * mask * (mask.numel() / mask.sum())
return y
class CoordConv(nn.Layer):
def __init__(self, ch_in, ch_out, filter_size, padding, norm_type, name):
super(CoordConv, self).__init__()
self.conv = ConvBNLayer(
ch_in + 2,
ch_out,
filter_size=filter_size,
padding=padding,
norm_type=norm_type,
name=name)
def forward(self, x):
b = x.shape[0]
h = x.shape[2]
w = x.shape[3]
gx = paddle.arange(w, dtype='float32') / (w - 1.) * 2.0 - 1.
gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])
gx.stop_gradient = True
gy = paddle.arange(h, dtype='float32') / (h - 1.) * 2.0 - 1.
gy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])
gy.stop_gradient = True
y = paddle.concat([x, gx, gy], axis=1)
y = self.conv(y)
return y
class PPYOLODetBlock(nn.Layer):
def __init__(self, cfg, name):
super(PPYOLODetBlock, self).__init__()
self.conv_module = nn.Sequential()
for idx, (conv_name, layer, args, kwargs) in enumerate(cfg[:-1]):
kwargs.update(name='{}.{}'.format(name, conv_name))
self.conv_module.add_sublayer(conv_name, layer(*args, **kwargs))
conv_name, layer, args, kwargs = cfg[-1]
kwargs.update(name='{}.{}'.format(name, conv_name))
self.tip = layer(*args, **kwargs)
def forward(self, inputs):
route = self.conv_module(inputs)
tip = self.tip(route)
return route, tip
@register
@serializable
class YOLOv3FPN(nn.Layer):
__shared__ = ['norm_type']
def __init__(self, in_channels=[256, 512, 1024], norm_type='bn'):
super(YOLOv3FPN, self).__init__()
assert len(in_channels) > 0, "in_channels length should > 0"
self.in_channels = in_channels
self.num_blocks = len(in_channels)
self._out_channels = []
self.yolo_blocks = []
self.routes = []
for i in range(self.num_blocks):
name = 'yolo_block.{}'.format(i)
in_channel = in_channels[-i - 1]
if i > 0:
in_channel += 512 // (2**i)
yolo_block = self.add_sublayer(
name,
YoloDetBlock(
in_channel,
channel=512 // (2**i),
norm_type=norm_type,
name=name))
self.yolo_blocks.append(yolo_block)
# tip layer output channel doubled
self._out_channels.append(1024 // (2**i))
if i < self.num_blocks - 1:
name = 'yolo_transition.{}'.format(i)
route = self.add_sublayer(
name,
ConvBNLayer(
ch_in=512 // (2**i),
ch_out=256 // (2**i),
filter_size=1,
stride=1,
padding=0,
norm_type=norm_type,
name=name))
self.routes.append(route)
def forward(self, blocks):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
yolo_feats = []
for i, block in enumerate(blocks):
if i > 0:
block = paddle.concat([route, block], axis=1)
route, tip = self.yolo_blocks[i](block)
yolo_feats.append(tip)
if i < self.num_blocks - 1:
route = self.routes[i](route)
route = F.interpolate(route, scale_factor=2.)
return yolo_feats
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels]
@register
@serializable
class PPYOLOFPN(nn.Layer):
__shared__ = ['norm_type']
def __init__(self, in_channels=[512, 1024, 2048], norm_type='bn', **kwargs):
super(PPYOLOFPN, self).__init__()
assert len(in_channels) > 0, "in_channels length should > 0"
self.in_channels = in_channels
self.num_blocks = len(in_channels)
# parse kwargs
self.coord_conv = kwargs.get('coord_conv', False)
self.drop_block = kwargs.get('drop_block', False)
if self.drop_block:
self.block_size = kwargs.get('block_size', 3)
self.keep_prob = kwargs.get('keep_prob', 0.9)
self.spp = kwargs.get('spp', False)
self.conv_block_num = kwargs.get('conv_block_num', 2)
if self.coord_conv:
ConvLayer = CoordConv
else:
ConvLayer = ConvBNLayer
if self.drop_block:
dropblock_cfg = [[
'dropblock', DropBlock, [self.block_size, self.keep_prob],
dict()
]]
else:
dropblock_cfg = []
self._out_channels = []
self.yolo_blocks = []
self.routes = []
for i, ch_in in enumerate(self.in_channels[::-1]):
if i > 0:
ch_in += 512 // (2**i)
channel = 64 * (2**self.num_blocks) // (2**i)
base_cfg = []
c_in, c_out = ch_in, channel
for j in range(self.conv_block_num):
base_cfg += [
[
'conv{}'.format(2 * j), ConvLayer, [c_in, c_out, 1],
dict(
padding=0, norm_type=norm_type)
],
[
'conv{}'.format(2 * j + 1), ConvBNLayer,
[c_out, c_out * 2, 3], dict(
padding=1, norm_type=norm_type)
],
]
c_in, c_out = c_out * 2, c_out
base_cfg += [[
'route', ConvLayer, [c_in, c_out, 1], dict(
padding=0, norm_type=norm_type)
], [
'tip', ConvLayer, [c_out, c_out * 2, 3], dict(
padding=1, norm_type=norm_type)
]]
if self.conv_block_num == 2:
if i == 0:
if self.spp:
spp_cfg = [[
'spp', SPP, [channel * 4, channel, 1], dict(
pool_size=[5, 9, 13], norm_type=norm_type)
]]
else:
spp_cfg = []
cfg = base_cfg[0:3] + spp_cfg + base_cfg[
3:4] + dropblock_cfg + base_cfg[4:6]
else:
cfg = base_cfg[0:2] + dropblock_cfg + base_cfg[2:6]
elif self.conv_block_num == 0:
if self.spp and i == 0:
spp_cfg = [[
'spp', SPP, [c_in * 4, c_in, 1], dict(
pool_size=[5, 9, 13], norm_type=norm_type)
]]
else:
spp_cfg = []
cfg = spp_cfg + dropblock_cfg + base_cfg
name = 'yolo_block.{}'.format(i)
yolo_block = self.add_sublayer(name, PPYOLODetBlock(cfg, name))
self.yolo_blocks.append(yolo_block)
self._out_channels.append(channel * 2)
if i < self.num_blocks - 1:
name = 'yolo_transition.{}'.format(i)
route = self.add_sublayer(
name,
ConvBNLayer(
ch_in=channel,
ch_out=256 // (2**i),
filter_size=1,
stride=1,
padding=0,
norm_type=norm_type,
name=name))
self.routes.append(route)
def forward(self, blocks):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
yolo_feats = []
for i, block in enumerate(blocks):
if i > 0:
block = paddle.concat([route, block], axis=1)
route, tip = self.yolo_blocks[i](block)
yolo_feats.append(tip)
if i < self.num_blocks - 1:
route = self.routes[i](route)
route = F.interpolate(route, scale_factor=2.)
return yolo_feats
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels] | 0.849129 | 0.282094 |
import logging
from iptv_proxy.providers.iptv_provider.map import ProviderMap
logger = logging.getLogger(__name__)
class VaderStreamsMap(ProviderMap):
__slots__ = []
_api_class = None
_channel_class = None
_configuration_class = None
_configuration_json_api_class = None
_constants_class = None
_database_access_class = None
_database_class = None
_epg_class = None
_epg_source_enum = None
_html_template_engine_class = None
_optional_settings_class = None
_program_class = None
_setting_class = None
_validations_class = None
@classmethod
def initialize(cls):
from iptv_proxy.providers.vaderstreams.api import VaderStreams
from iptv_proxy.providers.vaderstreams.configuration import (
VaderStreamsConfiguration,
)
from iptv_proxy.providers.vaderstreams.configuration import (
VaderStreamsOptionalSettings,
)
from iptv_proxy.providers.vaderstreams.constants import VaderStreamsConstants
from iptv_proxy.providers.vaderstreams.data_access import (
VaderStreamsDatabaseAccess,
)
from iptv_proxy.providers.vaderstreams.data_model import VaderStreamsChannel
from iptv_proxy.providers.vaderstreams.data_model import VaderStreamsProgram
from iptv_proxy.providers.vaderstreams.data_model import VaderStreamsSetting
from iptv_proxy.providers.vaderstreams.db import VaderStreamsDatabase
from iptv_proxy.providers.vaderstreams.enums import VaderStreamsEPGSource
from iptv_proxy.providers.vaderstreams.epg import VaderStreamsEPG
from iptv_proxy.providers.vaderstreams.html_template_engine import (
VaderStreamsHTMLTemplateEngine,
)
from iptv_proxy.providers.vaderstreams.json_api import (
VaderStreamsConfigurationJSONAPI,
)
from iptv_proxy.providers.vaderstreams.validations import (
VaderStreamsValidations,
)
cls._api_class = VaderStreams
cls._channel_class = VaderStreamsChannel
cls._configuration_class = VaderStreamsConfiguration
cls._configuration_json_api_class = VaderStreamsConfigurationJSONAPI
cls._constants_class = VaderStreamsConstants
cls._database_access_class = VaderStreamsDatabaseAccess
cls._database_class = VaderStreamsDatabase
cls._epg_class = VaderStreamsEPG
cls._epg_source_enum = VaderStreamsEPGSource
cls._html_template_engine_class = VaderStreamsHTMLTemplateEngine
cls._optional_settings_class = VaderStreamsOptionalSettings
cls._program_class = VaderStreamsProgram
cls._setting_class = VaderStreamsSetting
cls._validations_class = VaderStreamsValidations | iptv_proxy/providers/vaderstreams/map.py | import logging
from iptv_proxy.providers.iptv_provider.map import ProviderMap
logger = logging.getLogger(__name__)
class VaderStreamsMap(ProviderMap):
__slots__ = []
_api_class = None
_channel_class = None
_configuration_class = None
_configuration_json_api_class = None
_constants_class = None
_database_access_class = None
_database_class = None
_epg_class = None
_epg_source_enum = None
_html_template_engine_class = None
_optional_settings_class = None
_program_class = None
_setting_class = None
_validations_class = None
@classmethod
def initialize(cls):
from iptv_proxy.providers.vaderstreams.api import VaderStreams
from iptv_proxy.providers.vaderstreams.configuration import (
VaderStreamsConfiguration,
)
from iptv_proxy.providers.vaderstreams.configuration import (
VaderStreamsOptionalSettings,
)
from iptv_proxy.providers.vaderstreams.constants import VaderStreamsConstants
from iptv_proxy.providers.vaderstreams.data_access import (
VaderStreamsDatabaseAccess,
)
from iptv_proxy.providers.vaderstreams.data_model import VaderStreamsChannel
from iptv_proxy.providers.vaderstreams.data_model import VaderStreamsProgram
from iptv_proxy.providers.vaderstreams.data_model import VaderStreamsSetting
from iptv_proxy.providers.vaderstreams.db import VaderStreamsDatabase
from iptv_proxy.providers.vaderstreams.enums import VaderStreamsEPGSource
from iptv_proxy.providers.vaderstreams.epg import VaderStreamsEPG
from iptv_proxy.providers.vaderstreams.html_template_engine import (
VaderStreamsHTMLTemplateEngine,
)
from iptv_proxy.providers.vaderstreams.json_api import (
VaderStreamsConfigurationJSONAPI,
)
from iptv_proxy.providers.vaderstreams.validations import (
VaderStreamsValidations,
)
cls._api_class = VaderStreams
cls._channel_class = VaderStreamsChannel
cls._configuration_class = VaderStreamsConfiguration
cls._configuration_json_api_class = VaderStreamsConfigurationJSONAPI
cls._constants_class = VaderStreamsConstants
cls._database_access_class = VaderStreamsDatabaseAccess
cls._database_class = VaderStreamsDatabase
cls._epg_class = VaderStreamsEPG
cls._epg_source_enum = VaderStreamsEPGSource
cls._html_template_engine_class = VaderStreamsHTMLTemplateEngine
cls._optional_settings_class = VaderStreamsOptionalSettings
cls._program_class = VaderStreamsProgram
cls._setting_class = VaderStreamsSetting
cls._validations_class = VaderStreamsValidations | 0.452536 | 0.04548 |
# https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_face_detection.html
# On the Jetson Nano, OpenCV comes preinstalled
# Data files are in /usr/sharc/OpenCV
from __future__ import print_function, division
from PIL import Image
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Defaults to 1280x720 @ 30fps
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of the window on the screen
def gstreamer_pipeline(
capture_width=3280,
capture_height=2464,
display_width=820,
display_height=616,
framerate=21,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
def particle_detect():
model_ft = models.resnext101_32x8d(pretrained=True)
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 4)
model_ft.load_state_dict(torch.load('/model/ResNeXt-101_tuned'))
model_ft.eval()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = model_ft.to(device)
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
cap = cv2.VideoCapture(gstreamer_pipeline(), cv2.CAP_GSTREAMER)
int count = 0
if cap.isOpened():
while (count < 20):
ret, img = cap.read()
img = transform(img).to(device)
outputs = model_ft(img.reshape(1, 3, 224, 224))
_, preds = torch.max(outputs, 1)
print(preds.item())
count += 1
cap.release()
else:
print("Unable to open camera")
if __name__ == "__main__":
particle_detect() | particle_detect.py |
# https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_face_detection.html
# On the Jetson Nano, OpenCV comes preinstalled
# Data files are in /usr/sharc/OpenCV
from __future__ import print_function, division
from PIL import Image
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Defaults to 1280x720 @ 30fps
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of the window on the screen
def gstreamer_pipeline(
capture_width=3280,
capture_height=2464,
display_width=820,
display_height=616,
framerate=21,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
def particle_detect():
model_ft = models.resnext101_32x8d(pretrained=True)
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 4)
model_ft.load_state_dict(torch.load('/model/ResNeXt-101_tuned'))
model_ft.eval()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = model_ft.to(device)
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
cap = cv2.VideoCapture(gstreamer_pipeline(), cv2.CAP_GSTREAMER)
int count = 0
if cap.isOpened():
while (count < 20):
ret, img = cap.read()
img = transform(img).to(device)
outputs = model_ft(img.reshape(1, 3, 224, 224))
_, preds = torch.max(outputs, 1)
print(preds.item())
count += 1
cap.release()
else:
print("Unable to open camera")
if __name__ == "__main__":
particle_detect() | 0.743261 | 0.493836 |
from twisted.cred.portal import Portal
from twisted.conch.ssh import factory, userauth, connection, keys, session
from twisted.conch.ssh.factory import SSHFactory
from twisted.internet import reactor
from twisted.conch.ssh.keys import Key
from twisted.conch.ssh import session, forwarding, filetransfer
from twisted.conch import checkers
from twisted.python.components import registerAdapter
from twisted.conch.interfaces import IConchUser
from twisted.conch.avatar import ConchUser
from twisted.conch.ssh.channel import SSHChannel
from twisted.conch.ssh.filetransfer import FileTransferServer, implementer, ISFTPServer, ISFTPFile
from twisted.conch.ssh.session import parseRequest_pty_req
from twisted.internet.protocol import Protocol
from twisted.conch.ssh.session import SSHSession, SSHSessionProcessProtocol, wrapProtocol
FXF_READ = 0x00000001
FXF_WRITE = 0x00000002
FXF_APPEND = 0x00000004
FXF_CREAT = 0x00000008
FXF_TRUNC = 0x00000010
FXF_EXCL = 0x00000020
FXF_TEXT = 0x00000040
from twisted.python import log
import sys
from twisted.python.filepath import FilePath
if len(sys.argv) < 7:
print("Usage: txftp.py <directory> <privateKeyFile> <publicKeyFile> <username> <clientPublicKeyFile> <port>")
raise SystemExit(1)
log.startLogging(sys.stderr)
port = int(sys.argv[6])
username = sys.argv[4].decode('charmap')
target = FilePath(sys.argv[1])
@implementer(ISFTPFile)
class ServerFile(object):
def __init__(self, fp, flags):
self.filePath = fp;
fm = ''
if flags & FXF_READ:
fm += 'r'
if flags & FXF_WRITE:
fm += ('+' if fm else 'w')
if flags & FXF_APPEND:
fm = 'a'
if flags & FXF_TRUNC:
fm = 'w'
if not (flags & FXF_TEXT):
fm += 'b'
self._handle = fp.open(fm)
def close(self):
self._handle.close()
self._handle = None
def readChunk(self, offset, length):
self._handle.seek(offset)
a = self._handle.read(length)
if a:
return a
raise EOFError("")
def writeChunk(self, offset, data):
self._handle.seek(offset)
self._handle.write(data)
def getAttrs(self):
return getStats(self.filePath)
def setAttrs(self, attrs):
return
def getStats(s):
import os
return dict(size=s.getsize(),
uid=s.getUserID(),
gid=s.getGroupID(),
permissions=os.stat(s.path).st_mode,
atime=s.getatime(),
mtime=s.getmtime())
class DirectoryIterator(object):
def __init__(self, d: FilePath):
self._d = d
def close(self):
pass
def __iter__(self):
for f in self._d.children():
yield f.basename(), f.basename(), getStats(f)
@implementer(ISFTPServer)
class SSHFileServer(Protocol):
def __init__(self, parent, avatar):
print(81)
super().__init__()
self._parent = parent
self.avatar = avatar
def connectionLost(self, reason):
print( 'Connection lost', reason)
def gotVersion(self, otherVersion, extData):
"""
Called when the client sends their version info.
otherVersion is an integer representing the version of the SFTP
protocol they are claiming.
extData is a dictionary of extended_name : extended_data items.
These items are sent by the client to indicate additional features.
This method should return a dictionary of extended_name : extended_data
items. These items are the additional features (if any) supported
by the server.
"""
return {}
def openFile(self, filename, flags, attrs):
return ServerFile(target.descendant(filename.decode('charmap').split('/')), flags)
def removeFile(self, filename):
target.descendant(filename.decode('charmap').split('/')).remove()
def renameFile(self, oldpath, newpath):
target.descendant(oldpath.decode('charmap').split('/')).moveTo(target.descendant(newpath.decode('charmap').split('/')), False)
def makeDirectory(self, path, attrs):
target.descendant(path.decode('charmap').split('/')).makedirs()
def removeDirectory(self, path):
t = target.descendant(path.decode('charmap').split('/'))
if t.isdir() and not t.children():
t.remove()
def openDirectory(self, path):
print(140, path)
return DirectoryIterator(target.descendant(path.decode('charmap').split('/')))
"""
Open a directory for scanning.
This method returns an iterable object that has a close() method,
or a Deferred that is called back with same.
The close() method is called when the client is finished reading
from the directory. At this point, the iterable will no longer
be used.
The iterable should return triples of the form (filename,
longname, attrs) or Deferreds that return the same. The
sequence must support __getitem__, but otherwise may be any
'sequence-like' object.
filename is the name of the file relative to the directory.
logname is an expanded format of the filename. The recommended format
is:
-rwxr-xr-x 1 mjos staff 348911 Mar 25 14:29 t-filexfer
1234567890 123 12345678 12345678 12345678 123456789012
The first line is sample output, the second is the length of the field.
The fields are: permissions, link count, user owner, group owner,
size in bytes, modification time.
attrs is a dictionary in the format of the attrs argument to openFile.
@param path: the directory to open.
"""
def getAttrs(self, path, followLinks):
s = target.descendant(path.decode('charmap').split('/'))
return getStats(s)
"""
Return the attributes for the given path.
This method returns a dictionary in the same format as the attrs
argument to openFile or a Deferred that is called back with same.
@param path: the path to return attributes for as a string.
@param followLinks: a boolean. If it is True, follow symbolic links
and return attributes for the real path at the base. If it is False,
return attributes for the specified path.
"""
def setAttrs(self, path, attrs):
# raise NotImplemented()
"""
Set the attributes for the path.
This method returns when the attributes are set or a Deferred that is
called back when they are.
@param path: the path to set attributes for as a string.
@param attrs: a dictionary in the same format as the attrs argument to
L{openFile}.
"""
def readLink(path):
return path
"""
Find the root of a set of symbolic links.
This method returns the target of the link, or a Deferred that
returns the same.
@param path: the path of the symlink to read.
"""
def makeLink(linkPath, targetPath):
raise NotImplemented()
"""
Create a symbolic link.
This method returns when the link is made, or a Deferred that
returns the same.
@param linkPath: the pathname of the symlink as a string.
@param targetPath: the path of the target of the link as a string.
"""
def realPath(self, path):
return path
"""
Convert any path to an absolute path.
This method returns the absolute path as a string, or a Deferred
that returns the same.
@param path: the path to convert as a string.
"""
def extendedRequest(extendedName, extendedData):
raise NotImplementedError()
with open(sys.argv[2]) as privateBlobFile:
privateBlob = privateBlobFile.read()
privateKey = Key.fromString(data=privateBlob)
with open(sys.argv[3]) as publicBlobFile:
publicBlob = publicBlobFile.read()
publicKey = Key.fromString(data=publicBlob)
with open(sys.argv[5]) as clientBlobFile:
clientBlob = clientBlobFile.read()
clientKey = Key.fromString(data=clientBlob)
class EchoProtocol(Protocol):
def connectionMade(self):
self.transport.write("Echo protocol connected\r\n")
def dataReceived(self, bytes):
self.transport.write("echo: " + repr(bytes) + "\r\n")
class SimpleSession(SSHSession):
name = 'session'
def requestReceived(self, *args):
print(248, args)
return super().requestReceived(*args)
def __getattr__(self, attr):
print(attr)
return super().__getattr__(attr)
def request_shell(self, data):
protocol = EchoProtocol()
transport = SSHSessionProcessProtocol(self)
protocol.makeConnection(transport)
transport.makeConnection(wrapProtocol(protocol))
self.client = transport
return True
def request_subsystem(self, *args):
print(258, args)
ret = super().request_subsystem(*args)
print(ret)
return ret
def request_pty_req(self, *args):
return False
def request_exec(self, data):
return False
def request_window_change(self, *args):
return
def request_env(self, *args):
print(args)
def closed(self):
print( 'closed')
def closeReceived(self):
print( 'closeReceived')
class SimpleUser(ConchUser):
def dataReceived(self, *args):
print(282, args)
return super().dataReceived(*args)
registerAdapter(lambda user: SSHFileServer(None, user), SimpleUser, ISFTPServer)
class SimpleRealm(object):
def requestAvatar(self, avatarId, mind, *interfaces):
user = SimpleUser()
user.subsystemLookup.update(
{b"sftp": filetransfer.FileTransferServer})
# user.subsystemLookup[b'sftp'] = SSHFileServer
user.channelLookup[b'session'] = SimpleSession
return IConchUser, user, print
factory = SSHFactory()
factory.privateKeys = { b'ssh-rsa': privateKey }
factory.publicKeys = { b'ssh-rsa': publicKey }
with open('/etc/ssh/moduli', 'r') as p:
primes = factory.primes = {}
for l in p:
l = l.strip()
if not l or l[0] == '#':
continue
tim, typ, tst, tri, size, gen, mod = l.split()
size = int(size) + 1
gen = int(gen)
mod = int(mod, 16)
if not size in primes:
primes[size] = []
primes[size].append((gen, mod))
factory.portal = Portal(SimpleRealm())
factory.portal.registerChecker(checkers.SSHPublicKeyChecker(checkers.InMemorySSHKeyDB({username:[clientKey]})))
print(307)
print(factory.portal.listCredentialsInterfaces())
reactor.listenTCP(port, factory)
reactor.run() | txftp.py | from twisted.cred.portal import Portal
from twisted.conch.ssh import factory, userauth, connection, keys, session
from twisted.conch.ssh.factory import SSHFactory
from twisted.internet import reactor
from twisted.conch.ssh.keys import Key
from twisted.conch.ssh import session, forwarding, filetransfer
from twisted.conch import checkers
from twisted.python.components import registerAdapter
from twisted.conch.interfaces import IConchUser
from twisted.conch.avatar import ConchUser
from twisted.conch.ssh.channel import SSHChannel
from twisted.conch.ssh.filetransfer import FileTransferServer, implementer, ISFTPServer, ISFTPFile
from twisted.conch.ssh.session import parseRequest_pty_req
from twisted.internet.protocol import Protocol
from twisted.conch.ssh.session import SSHSession, SSHSessionProcessProtocol, wrapProtocol
FXF_READ = 0x00000001
FXF_WRITE = 0x00000002
FXF_APPEND = 0x00000004
FXF_CREAT = 0x00000008
FXF_TRUNC = 0x00000010
FXF_EXCL = 0x00000020
FXF_TEXT = 0x00000040
from twisted.python import log
import sys
from twisted.python.filepath import FilePath
if len(sys.argv) < 7:
print("Usage: txftp.py <directory> <privateKeyFile> <publicKeyFile> <username> <clientPublicKeyFile> <port>")
raise SystemExit(1)
log.startLogging(sys.stderr)
port = int(sys.argv[6])
username = sys.argv[4].decode('charmap')
target = FilePath(sys.argv[1])
@implementer(ISFTPFile)
class ServerFile(object):
def __init__(self, fp, flags):
self.filePath = fp;
fm = ''
if flags & FXF_READ:
fm += 'r'
if flags & FXF_WRITE:
fm += ('+' if fm else 'w')
if flags & FXF_APPEND:
fm = 'a'
if flags & FXF_TRUNC:
fm = 'w'
if not (flags & FXF_TEXT):
fm += 'b'
self._handle = fp.open(fm)
def close(self):
self._handle.close()
self._handle = None
def readChunk(self, offset, length):
self._handle.seek(offset)
a = self._handle.read(length)
if a:
return a
raise EOFError("")
def writeChunk(self, offset, data):
self._handle.seek(offset)
self._handle.write(data)
def getAttrs(self):
return getStats(self.filePath)
def setAttrs(self, attrs):
return
def getStats(s):
import os
return dict(size=s.getsize(),
uid=s.getUserID(),
gid=s.getGroupID(),
permissions=os.stat(s.path).st_mode,
atime=s.getatime(),
mtime=s.getmtime())
class DirectoryIterator(object):
def __init__(self, d: FilePath):
self._d = d
def close(self):
pass
def __iter__(self):
for f in self._d.children():
yield f.basename(), f.basename(), getStats(f)
@implementer(ISFTPServer)
class SSHFileServer(Protocol):
def __init__(self, parent, avatar):
print(81)
super().__init__()
self._parent = parent
self.avatar = avatar
def connectionLost(self, reason):
print( 'Connection lost', reason)
def gotVersion(self, otherVersion, extData):
"""
Called when the client sends their version info.
otherVersion is an integer representing the version of the SFTP
protocol they are claiming.
extData is a dictionary of extended_name : extended_data items.
These items are sent by the client to indicate additional features.
This method should return a dictionary of extended_name : extended_data
items. These items are the additional features (if any) supported
by the server.
"""
return {}
def openFile(self, filename, flags, attrs):
return ServerFile(target.descendant(filename.decode('charmap').split('/')), flags)
def removeFile(self, filename):
target.descendant(filename.decode('charmap').split('/')).remove()
def renameFile(self, oldpath, newpath):
target.descendant(oldpath.decode('charmap').split('/')).moveTo(target.descendant(newpath.decode('charmap').split('/')), False)
def makeDirectory(self, path, attrs):
target.descendant(path.decode('charmap').split('/')).makedirs()
def removeDirectory(self, path):
t = target.descendant(path.decode('charmap').split('/'))
if t.isdir() and not t.children():
t.remove()
def openDirectory(self, path):
print(140, path)
return DirectoryIterator(target.descendant(path.decode('charmap').split('/')))
"""
Open a directory for scanning.
This method returns an iterable object that has a close() method,
or a Deferred that is called back with same.
The close() method is called when the client is finished reading
from the directory. At this point, the iterable will no longer
be used.
The iterable should return triples of the form (filename,
longname, attrs) or Deferreds that return the same. The
sequence must support __getitem__, but otherwise may be any
'sequence-like' object.
filename is the name of the file relative to the directory.
logname is an expanded format of the filename. The recommended format
is:
-rwxr-xr-x 1 mjos staff 348911 Mar 25 14:29 t-filexfer
1234567890 123 12345678 12345678 12345678 123456789012
The first line is sample output, the second is the length of the field.
The fields are: permissions, link count, user owner, group owner,
size in bytes, modification time.
attrs is a dictionary in the format of the attrs argument to openFile.
@param path: the directory to open.
"""
def getAttrs(self, path, followLinks):
s = target.descendant(path.decode('charmap').split('/'))
return getStats(s)
"""
Return the attributes for the given path.
This method returns a dictionary in the same format as the attrs
argument to openFile or a Deferred that is called back with same.
@param path: the path to return attributes for as a string.
@param followLinks: a boolean. If it is True, follow symbolic links
and return attributes for the real path at the base. If it is False,
return attributes for the specified path.
"""
def setAttrs(self, path, attrs):
# raise NotImplemented()
"""
Set the attributes for the path.
This method returns when the attributes are set or a Deferred that is
called back when they are.
@param path: the path to set attributes for as a string.
@param attrs: a dictionary in the same format as the attrs argument to
L{openFile}.
"""
def readLink(path):
return path
"""
Find the root of a set of symbolic links.
This method returns the target of the link, or a Deferred that
returns the same.
@param path: the path of the symlink to read.
"""
def makeLink(linkPath, targetPath):
raise NotImplemented()
"""
Create a symbolic link.
This method returns when the link is made, or a Deferred that
returns the same.
@param linkPath: the pathname of the symlink as a string.
@param targetPath: the path of the target of the link as a string.
"""
def realPath(self, path):
return path
"""
Convert any path to an absolute path.
This method returns the absolute path as a string, or a Deferred
that returns the same.
@param path: the path to convert as a string.
"""
def extendedRequest(extendedName, extendedData):
raise NotImplementedError()
with open(sys.argv[2]) as privateBlobFile:
privateBlob = privateBlobFile.read()
privateKey = Key.fromString(data=privateBlob)
with open(sys.argv[3]) as publicBlobFile:
publicBlob = publicBlobFile.read()
publicKey = Key.fromString(data=publicBlob)
with open(sys.argv[5]) as clientBlobFile:
clientBlob = clientBlobFile.read()
clientKey = Key.fromString(data=clientBlob)
class EchoProtocol(Protocol):
def connectionMade(self):
self.transport.write("Echo protocol connected\r\n")
def dataReceived(self, bytes):
self.transport.write("echo: " + repr(bytes) + "\r\n")
class SimpleSession(SSHSession):
name = 'session'
def requestReceived(self, *args):
print(248, args)
return super().requestReceived(*args)
def __getattr__(self, attr):
print(attr)
return super().__getattr__(attr)
def request_shell(self, data):
protocol = EchoProtocol()
transport = SSHSessionProcessProtocol(self)
protocol.makeConnection(transport)
transport.makeConnection(wrapProtocol(protocol))
self.client = transport
return True
def request_subsystem(self, *args):
print(258, args)
ret = super().request_subsystem(*args)
print(ret)
return ret
def request_pty_req(self, *args):
return False
def request_exec(self, data):
return False
def request_window_change(self, *args):
return
def request_env(self, *args):
print(args)
def closed(self):
print( 'closed')
def closeReceived(self):
print( 'closeReceived')
class SimpleUser(ConchUser):
def dataReceived(self, *args):
print(282, args)
return super().dataReceived(*args)
registerAdapter(lambda user: SSHFileServer(None, user), SimpleUser, ISFTPServer)
class SimpleRealm(object):
def requestAvatar(self, avatarId, mind, *interfaces):
user = SimpleUser()
user.subsystemLookup.update(
{b"sftp": filetransfer.FileTransferServer})
# user.subsystemLookup[b'sftp'] = SSHFileServer
user.channelLookup[b'session'] = SimpleSession
return IConchUser, user, print
factory = SSHFactory()
factory.privateKeys = { b'ssh-rsa': privateKey }
factory.publicKeys = { b'ssh-rsa': publicKey }
with open('/etc/ssh/moduli', 'r') as p:
primes = factory.primes = {}
for l in p:
l = l.strip()
if not l or l[0] == '#':
continue
tim, typ, tst, tri, size, gen, mod = l.split()
size = int(size) + 1
gen = int(gen)
mod = int(mod, 16)
if not size in primes:
primes[size] = []
primes[size].append((gen, mod))
factory.portal = Portal(SimpleRealm())
factory.portal.registerChecker(checkers.SSHPublicKeyChecker(checkers.InMemorySSHKeyDB({username:[clientKey]})))
print(307)
print(factory.portal.listCredentialsInterfaces())
reactor.listenTCP(port, factory)
reactor.run() | 0.376165 | 0.11004 |
import datetime
import random
import string
import typing
import pandas as pd
from audformat.core import define
from audformat.core.common import HeaderBase
class Scheme(HeaderBase):
r"""A scheme defines valid values of an annotation.
Allowed values for ``dtype`` are:
``'bool'``, ``'str'``, ``'int'``, ``'float'``, ``'time'``, and ``'date'``
(see :class:`audformat.define.DataType`).
Values can be restricted to a set of labels provided by a
list or a dictionary.
A continuous range can be limited by a minimum and
maximum value.
Args:
dtype: if ``None`` derived from ``labels``, otherwise set to ``'str'``
labels: list or dictionary with valid labels.
minimum: minimum value
maximum: maximum value
description: scheme description
meta: additional meta fields
Raises:
BadValueError: if an invalid ``dtype`` is passed
ValueError: if ``labels`` are not passed as list or dictionary
ValueError: if ``labels`` are not of same data type
ValueError: ``dtype`` does not match type of ``labels``
Example:
>>> Scheme()
{dtype: str}
>>> Scheme(labels=['a', 'b', 'c'])
dtype: str
labels: [a, b, c]
>>> Scheme(define.DataType.INTEGER)
{dtype: int}
>>> Scheme(float, minimum=0, maximum=1)
{dtype: float, minimum: 0, maximum: 1}
"""
_dtypes = {
'bool': define.DataType.BOOL,
bool: define.DataType.BOOL,
'str': define.DataType.STRING,
str: define.DataType.STRING,
'int': define.DataType.INTEGER,
int: define.DataType.INTEGER,
'float': define.DataType.FLOAT,
float: define.DataType.FLOAT,
'time': define.DataType.TIME,
pd.Timedelta: define.DataType.TIME,
'date': define.DataType.DATE,
datetime.datetime: define.DataType.DATE,
}
def __init__(
self,
dtype: typing.Union[typing.Type, define.DataType] = None,
*,
labels: typing.Union[dict, list] = None,
minimum: typing.Union[int, float] = None,
maximum: typing.Union[int, float] = None,
description: str = None,
meta: dict = None,
):
super().__init__(description=description, meta=meta)
if dtype is not None:
if dtype in self._dtypes:
dtype = self._dtypes[dtype]
define.DataType.assert_has_attribute_value(dtype)
if dtype is None and labels is None:
dtype = define.DataType.STRING
if labels is not None:
dtype_labels = self._dtype_from_labels(labels)
if dtype is not None and dtype != dtype_labels:
raise ValueError(
"Data type is set to "
f"'{dtype}', "
"but data type of labels is "
f"'{dtype_labels}'."
)
dtype = dtype_labels
self.dtype = dtype
r"""Data type"""
self.labels = labels
r"""List of labels"""
self.minimum = minimum if self.is_numeric else None
r"""Minimum value"""
self.maximum = maximum if self.is_numeric else None
r"""Maximum value"""
self._db = None
self._id = None
@property
def is_numeric(self) -> bool:
r"""Check if data type is numeric.
Returns:
``True`` if data type is numeric.
"""
return self.dtype in (define.DataType.INTEGER, define.DataType.FLOAT)
def draw(
self,
n: int,
*,
str_len: int = 10,
p_none: bool = None,
) -> list:
r"""Randomly draws values from scheme.
Args:
n: number of values
str_len: string length if drawing from a string scheme without
labels
p_none: probability for drawing an invalid value
Returns:
list with values
"""
x = None
if self.labels is None:
if self.dtype == define.DataType.BOOL:
x = [random.choice([False, True]) for _ in range(n)]
elif self.dtype == define.DataType.DATE:
x = [pd.to_datetime(round(random.random(), 2), unit='s')
for _ in range(n)]
elif self.dtype == define.DataType.INTEGER:
minimum = self.minimum or 0
maximum = self.maximum or minimum + 100
x = [random.randrange(minimum, maximum)
for _ in range(n)]
elif self.dtype == define.DataType.FLOAT:
minimum = self.minimum or 0.0
maximum = self.maximum or minimum + 1.0
x = [random.uniform(minimum, maximum) for _ in range(n)]
elif self.dtype == define.DataType.TIME:
x = [pd.to_timedelta(round(random.random(), 2), unit='s')
for _ in range(n)]
else:
seq = string.ascii_letters + string.digits
x = [''.join([random.choice(seq) for _ in range(str_len)])
for _ in range(n)]
elif type(self.labels) in (list, dict):
x = [random.choice(list(self.labels)) for _ in range(n)]
if p_none is not None:
for idx in range(len(x)):
if random.random() <= p_none:
x[idx] = None
return x
def to_pandas_dtype(self) -> typing.Union[
str, pd.api.types.CategoricalDtype,
]:
r"""Convert data type to :mod:`pandas` data type.
If ``labels`` is not ``None``, :class:`pandas.CategoricalDtype` is
returned. Otherwise the following rules are applied:
* ``str`` -> ``str``
* ``int`` -> ``Int64`` (to allow NaN)
* ``float`` -> ``float``
* ``time`` -> ``timedelta64[ns]``
* ``date`` -> ``datetime64[ns]``
Returns:
:mod:`pandas` data type
"""
if self.labels is not None:
labels = list(self.labels)
if len(labels) > 0 and isinstance(labels[0], int):
# allow nullable
labels = pd.array(labels, dtype=pd.Int64Dtype())
return pd.api.types.CategoricalDtype(
categories=labels,
ordered=False,
)
elif self.dtype == define.DataType.BOOL:
return 'boolean'
elif self.dtype == define.DataType.DATE:
return 'datetime64[ns]'
elif self.dtype == define.DataType.INTEGER:
return 'Int64'
elif self.dtype == define.DataType.TIME:
return 'timedelta64[ns]'
return self.dtype
def replace_labels(
self,
labels: typing.Union[dict, list],
):
r"""Replace labels.
If scheme is part of a :class:`audformat.Database`
the dtype of all :class:`audformat.Column` objects
that reference the scheme will be updated.
Removed labels are set to ``NaN``.
Args:
labels: new labels
Raises:
ValueError: if scheme does not define labels
ValueError: if dtype of new labels does not match dtype of
scheme
Example:
>>> speaker = Scheme(
... labels={
... 0: {'gender': 'female'},
... 1: {'gender': 'male'},
... }
... )
>>> speaker
dtype: int
labels:
0: {gender: female}
1: {gender: male}
>>> speaker.replace_labels(
... {
... 1: {'gender': 'male', 'age': 33},
... 2: {'gender': 'female', 'age': 44},
... }
... )
>>> speaker
dtype: int
labels:
1: {gender: male, age: 33}
2: {gender: female, age: 44}
"""
if self.labels is None:
raise ValueError(
'Cannot replace labels when '
'scheme does not define labels.'
)
dtype_labels = self._dtype_from_labels(labels)
if dtype_labels != self.dtype:
raise ValueError(
"Data type of labels must not change: \n"
f"'{self.dtype}' \n"
f"!=\n"
f"'{dtype_labels}'"
)
self.labels = labels
if self._db is not None and self._id is not None:
for table in self._db.tables.values():
for column in table.columns.values():
if column.scheme_id == self._id:
column.get(copy=False).cat.set_categories(
new_categories=self.labels,
ordered=False,
inplace=True,
)
def _dtype_from_labels(
self,
labels: typing.Union[dict, list],
) -> str:
r"""Derive dtype from labels."""
if not isinstance(labels, (dict, list)):
raise ValueError(
'Labels must be passed as a dictionary or a list.'
)
if len(labels) > 0:
dtype = type(list(labels)[0])
else:
dtype = 'str'
if not all(isinstance(x, dtype) for x in list(labels)):
raise ValueError(
'All labels must be of the same data type.'
)
if dtype in self._dtypes:
dtype = self._dtypes[dtype]
define.DataType.assert_has_attribute_value(dtype)
return dtype
def __contains__(self, item: typing.Any) -> bool:
r"""Check if scheme contains data type of item.
``None``, ``NaT`` and ``NaN`` always match
Returns:
``True`` if item is covered by scheme
"""
if item is not None and not pd.isna(item):
if self.labels is not None:
return item in self.labels
if self.is_numeric:
if self.minimum and not item >= self.minimum:
return False
if self.maximum and not item <= self.maximum:
return False
return True | audformat/core/scheme.py | import datetime
import random
import string
import typing
import pandas as pd
from audformat.core import define
from audformat.core.common import HeaderBase
class Scheme(HeaderBase):
r"""A scheme defines valid values of an annotation.
Allowed values for ``dtype`` are:
``'bool'``, ``'str'``, ``'int'``, ``'float'``, ``'time'``, and ``'date'``
(see :class:`audformat.define.DataType`).
Values can be restricted to a set of labels provided by a
list or a dictionary.
A continuous range can be limited by a minimum and
maximum value.
Args:
dtype: if ``None`` derived from ``labels``, otherwise set to ``'str'``
labels: list or dictionary with valid labels.
minimum: minimum value
maximum: maximum value
description: scheme description
meta: additional meta fields
Raises:
BadValueError: if an invalid ``dtype`` is passed
ValueError: if ``labels`` are not passed as list or dictionary
ValueError: if ``labels`` are not of same data type
ValueError: ``dtype`` does not match type of ``labels``
Example:
>>> Scheme()
{dtype: str}
>>> Scheme(labels=['a', 'b', 'c'])
dtype: str
labels: [a, b, c]
>>> Scheme(define.DataType.INTEGER)
{dtype: int}
>>> Scheme(float, minimum=0, maximum=1)
{dtype: float, minimum: 0, maximum: 1}
"""
_dtypes = {
'bool': define.DataType.BOOL,
bool: define.DataType.BOOL,
'str': define.DataType.STRING,
str: define.DataType.STRING,
'int': define.DataType.INTEGER,
int: define.DataType.INTEGER,
'float': define.DataType.FLOAT,
float: define.DataType.FLOAT,
'time': define.DataType.TIME,
pd.Timedelta: define.DataType.TIME,
'date': define.DataType.DATE,
datetime.datetime: define.DataType.DATE,
}
def __init__(
self,
dtype: typing.Union[typing.Type, define.DataType] = None,
*,
labels: typing.Union[dict, list] = None,
minimum: typing.Union[int, float] = None,
maximum: typing.Union[int, float] = None,
description: str = None,
meta: dict = None,
):
super().__init__(description=description, meta=meta)
if dtype is not None:
if dtype in self._dtypes:
dtype = self._dtypes[dtype]
define.DataType.assert_has_attribute_value(dtype)
if dtype is None and labels is None:
dtype = define.DataType.STRING
if labels is not None:
dtype_labels = self._dtype_from_labels(labels)
if dtype is not None and dtype != dtype_labels:
raise ValueError(
"Data type is set to "
f"'{dtype}', "
"but data type of labels is "
f"'{dtype_labels}'."
)
dtype = dtype_labels
self.dtype = dtype
r"""Data type"""
self.labels = labels
r"""List of labels"""
self.minimum = minimum if self.is_numeric else None
r"""Minimum value"""
self.maximum = maximum if self.is_numeric else None
r"""Maximum value"""
self._db = None
self._id = None
@property
def is_numeric(self) -> bool:
r"""Check if data type is numeric.
Returns:
``True`` if data type is numeric.
"""
return self.dtype in (define.DataType.INTEGER, define.DataType.FLOAT)
def draw(
self,
n: int,
*,
str_len: int = 10,
p_none: bool = None,
) -> list:
r"""Randomly draws values from scheme.
Args:
n: number of values
str_len: string length if drawing from a string scheme without
labels
p_none: probability for drawing an invalid value
Returns:
list with values
"""
x = None
if self.labels is None:
if self.dtype == define.DataType.BOOL:
x = [random.choice([False, True]) for _ in range(n)]
elif self.dtype == define.DataType.DATE:
x = [pd.to_datetime(round(random.random(), 2), unit='s')
for _ in range(n)]
elif self.dtype == define.DataType.INTEGER:
minimum = self.minimum or 0
maximum = self.maximum or minimum + 100
x = [random.randrange(minimum, maximum)
for _ in range(n)]
elif self.dtype == define.DataType.FLOAT:
minimum = self.minimum or 0.0
maximum = self.maximum or minimum + 1.0
x = [random.uniform(minimum, maximum) for _ in range(n)]
elif self.dtype == define.DataType.TIME:
x = [pd.to_timedelta(round(random.random(), 2), unit='s')
for _ in range(n)]
else:
seq = string.ascii_letters + string.digits
x = [''.join([random.choice(seq) for _ in range(str_len)])
for _ in range(n)]
elif type(self.labels) in (list, dict):
x = [random.choice(list(self.labels)) for _ in range(n)]
if p_none is not None:
for idx in range(len(x)):
if random.random() <= p_none:
x[idx] = None
return x
def to_pandas_dtype(self) -> typing.Union[
str, pd.api.types.CategoricalDtype,
]:
r"""Convert data type to :mod:`pandas` data type.
If ``labels`` is not ``None``, :class:`pandas.CategoricalDtype` is
returned. Otherwise the following rules are applied:
* ``str`` -> ``str``
* ``int`` -> ``Int64`` (to allow NaN)
* ``float`` -> ``float``
* ``time`` -> ``timedelta64[ns]``
* ``date`` -> ``datetime64[ns]``
Returns:
:mod:`pandas` data type
"""
if self.labels is not None:
labels = list(self.labels)
if len(labels) > 0 and isinstance(labels[0], int):
# allow nullable
labels = pd.array(labels, dtype=pd.Int64Dtype())
return pd.api.types.CategoricalDtype(
categories=labels,
ordered=False,
)
elif self.dtype == define.DataType.BOOL:
return 'boolean'
elif self.dtype == define.DataType.DATE:
return 'datetime64[ns]'
elif self.dtype == define.DataType.INTEGER:
return 'Int64'
elif self.dtype == define.DataType.TIME:
return 'timedelta64[ns]'
return self.dtype
def replace_labels(
self,
labels: typing.Union[dict, list],
):
r"""Replace labels.
If scheme is part of a :class:`audformat.Database`
the dtype of all :class:`audformat.Column` objects
that reference the scheme will be updated.
Removed labels are set to ``NaN``.
Args:
labels: new labels
Raises:
ValueError: if scheme does not define labels
ValueError: if dtype of new labels does not match dtype of
scheme
Example:
>>> speaker = Scheme(
... labels={
... 0: {'gender': 'female'},
... 1: {'gender': 'male'},
... }
... )
>>> speaker
dtype: int
labels:
0: {gender: female}
1: {gender: male}
>>> speaker.replace_labels(
... {
... 1: {'gender': 'male', 'age': 33},
... 2: {'gender': 'female', 'age': 44},
... }
... )
>>> speaker
dtype: int
labels:
1: {gender: male, age: 33}
2: {gender: female, age: 44}
"""
if self.labels is None:
raise ValueError(
'Cannot replace labels when '
'scheme does not define labels.'
)
dtype_labels = self._dtype_from_labels(labels)
if dtype_labels != self.dtype:
raise ValueError(
"Data type of labels must not change: \n"
f"'{self.dtype}' \n"
f"!=\n"
f"'{dtype_labels}'"
)
self.labels = labels
if self._db is not None and self._id is not None:
for table in self._db.tables.values():
for column in table.columns.values():
if column.scheme_id == self._id:
column.get(copy=False).cat.set_categories(
new_categories=self.labels,
ordered=False,
inplace=True,
)
def _dtype_from_labels(
self,
labels: typing.Union[dict, list],
) -> str:
r"""Derive dtype from labels."""
if not isinstance(labels, (dict, list)):
raise ValueError(
'Labels must be passed as a dictionary or a list.'
)
if len(labels) > 0:
dtype = type(list(labels)[0])
else:
dtype = 'str'
if not all(isinstance(x, dtype) for x in list(labels)):
raise ValueError(
'All labels must be of the same data type.'
)
if dtype in self._dtypes:
dtype = self._dtypes[dtype]
define.DataType.assert_has_attribute_value(dtype)
return dtype
def __contains__(self, item: typing.Any) -> bool:
r"""Check if scheme contains data type of item.
``None``, ``NaT`` and ``NaN`` always match
Returns:
``True`` if item is covered by scheme
"""
if item is not None and not pd.isna(item):
if self.labels is not None:
return item in self.labels
if self.is_numeric:
if self.minimum and not item >= self.minimum:
return False
if self.maximum and not item <= self.maximum:
return False
return True | 0.893716 | 0.517388 |
import csv
from argparse import ArgumentParser
from collections import defaultdict
from itertools import product
from pathlib import Path
from typing import Set, Optional, Sequence
import librosa.display
import matplotlib.ticker as tckr
import matplotlib.pyplot as plt
import mir_eval
import numpy as np
from numpy import ndarray
from mpl_toolkits.axes_grid1 import make_axes_locatable
from hparams import hparams
def draw_mel_boundary(path_audio: Path, path_figure: Path,
score_out: ndarray, prediction: ndarray, truth: ndarray,
threshold: float,
draw_title=False,
draw_legend=True,
xlim: Optional[Sequence[float]] = None,
xticklabels: Optional[Sequence[float]] = None,
):
audio, _ = librosa.core.load(str(path_audio), sr=hparams.sample_rate)
mel_S = librosa.feature.melspectrogram(audio,
sr=hparams.sample_rate,
n_fft=hparams.fft_size,
hop_length=hparams.hop_size,
n_mels=hparams.num_mels)
t_axis = np.arange(len(score_out)) * hparams.hop_size / hparams.sample_rate
# figure
if xlim is not None:
duration = xlim[1] - xlim[0]
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(duration / 20 + 1.5, 5))
# ax for colorbar
ax_cbar = None
else:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 5))
# ax for colorbar
ax_cbar = make_axes_locatable(ax1).append_axes('right', size=0.1, pad=0.05)
ax_none = make_axes_locatable(ax2).append_axes('right', size=0.1, pad=0.05)
ax_none.set_visible(False)
c_vline_pred = 'C2'
c_vline_truth = 'C9'
# ax1: mel spectrogram
librosa.display.specshow(librosa.power_to_db(mel_S, ref=np.max),
x_axis='time', y_axis='mel',
sr=hparams.sample_rate,
hop_length=hparams.hop_size,
ax=ax1,
)
# prediction and target boundary
ax1.vlines(x=prediction,
ymin=4000, ymax=16000,
colors=c_vline_pred, label='prediction', zorder=2)
ax1.vlines(x=truth,
ymin=0, ymax=600,
colors=c_vline_truth, label='truth', zorder=2)
if ax_cbar:
fig.colorbar(ax1.collections[0], format='%+2.0f dB', cax=ax_cbar)
if draw_title:
ax1.set_title('mel spectrogram')
x_formatter = ax1.xaxis.get_major_formatter()
ax1.xaxis.set_major_locator(tckr.MultipleLocator(30))
ax1.xaxis.set_minor_locator(tckr.MultipleLocator(10))
ax1.set_xlabel('time (min:sec)')
# ax2: boundary score
ax2.plot(t_axis, score_out,
color='C1', zorder=1, label='estimated boundary score',
linewidth=0.75)
# prediction and target boundary
ylim = [-0.3, 1.3]
ax2.vlines(x=prediction,
ymin=0.9, ymax=ylim[1],
colors=c_vline_pred, label='predicted boundary', zorder=2)
ax2.vlines(x=truth,
ymin=ylim[0], ymax=0.1,
colors=c_vline_truth, label='target boundary', zorder=2)
if draw_legend:
ax2.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), ncol=3)
ax2.set_xlim(ax1.get_xlim())
ax2.xaxis.set_major_formatter(x_formatter)
ax2.xaxis.set_major_locator(ax1.xaxis.get_major_locator())
ax2.xaxis.set_minor_locator(ax1.xaxis.get_minor_locator())
ax2.set_xlabel('time (min:sec)')
ax2.set_ylim(*ylim)
ax2.set_yticks([0, 1])
ax2.set_yticks([threshold], minor=True)
ax2.set_yticklabels(['threshold'], minor=True)
ax2.grid(True, which='major', axis='y')
ax2.grid(True, which='minor', axis='y', linestyle='--', linewidth=1)
if xlim is not None:
ax1.set_xlim(*xlim)
ax2.set_xlim(*xlim)
ax1.set_xticks(xlim)
ax2.set_xticks(xlim)
ax1.set_xticklabels(xticklabels)
ax2.set_xticklabels(xticklabels)
fig.tight_layout()
fig.savefig(path_figure, dpi=600)
def main(test_epoch: int, ids_drawn: Set[int], tol: float):
"""
:param test_epoch:
:param ids_drawn: song ids to be plotted in mel and boundary.
:param tol: hit rate tolerance
:return:
"""
# test_eval: precision, recall, fscore
path_test = Path(hparams.logdir, f'test_{test_epoch}')
if not path_test.exists():
raise FileNotFoundError(path_test)
path_metadata = hparams.path_dataset['test'] / 'metadata/metadata.csv'
# Take the genres of each song in id order
ids = []
id_genre = [] # k: id, v: genre
i_col_genre = 3
with path_metadata.open('r', encoding='utf-8') as f:
read = csv.reader(f)
for idx, line in enumerate(read):
if idx == 0:
i_col_genre = line.index('GENRE')
continue
id_ = line[0]
if (path_test / f'{id_}_pred.npy').exists():
ids.append(int(id_))
id_genre.append(line[i_col_genre])
# measure
all_results = [] # k: id, v: float(precision, recall, F1, F0.58)
for i_id, id_ in enumerate(ids):
item_truth = np.load(path_test / f'{id_}_truth.npy')
item_pred = np.load(path_test / f'{id_}_pred.npy')
prec, recall, f1 = mir_eval.segment.detection(item_truth, item_pred, trim=True, window=tol)
_, _, f058 = mir_eval.segment.detection(item_truth, item_pred, beta=0.58, trim=True,
window=tol)
all_results.append(np.array((prec, recall, f1, f058)))
# total mean / min / max
all_results = np.stack(all_results, axis=0) # (N, 4)
total_mean = np.mean(all_results, axis=0) # (4,)
total_min = np.min(all_results, axis=0) # (4,)
total_max = np.max(all_results, axis=0) # (4,)
ids_drawn.add(int(ids[np.argmin(all_results, axis=0)[2]]))
ids_drawn.add(int(ids[np.argmax(all_results, axis=0)[2]]))
total_min_err = total_mean - total_min
total_max_err = total_max - total_mean
total_errs = np.stack((total_min_err, total_max_err), axis=0) # 2, 4
total_stacked = np.stack((total_mean, total_min, total_max), axis=-1) # (4, 3)
# mean / min / max per genres
genre_result = defaultdict(list) # k: genre, v: list
for i_id, g in enumerate(id_genre):
genre_result[g].append(all_results[i_id])
all_genres = list(genre_result.keys())
num_genres = len(genre_result)
xs = np.arange(num_genres + 1)
genre_mean = np.zeros((num_genres, 4))
genre_max = np.zeros((num_genres, 4))
genre_min = np.zeros((num_genres, 4))
for idx, g in enumerate(all_genres):
genre_mean[idx] = np.mean(genre_result[g], axis=0)
genre_max[idx] = np.max(genre_result[g], axis=0)
genre_min[idx] = np.min(genre_result[g], axis=0)
genre_min_err = genre_mean - genre_min
genre_max_err = genre_max - genre_mean
genre_errs = np.stack((genre_min_err, genre_max_err), axis=0) # 2, num_genres, 4
genre_stacked = np.stack((genre_mean.T, genre_min.T, genre_max.T), axis=-1) # 4, genre, 3
# figure
fig, ax = plt.subplots()
common_ebar_kwargs = dict(elinewidth=0.75, capsize=3, linestyle='', marker='o')
ax.errorbar(xs[:-1], genre_mean[:, 2], yerr=genre_errs[:, :, 2],
**common_ebar_kwargs)
ax.errorbar(xs[-1], total_mean[2], yerr=total_errs[:, 2:3],
color='black',
**common_ebar_kwargs)
for x, y in zip(xs[:-1], genre_mean[:, 2]):
ax.text(x + 0.1, y, f'{y:.3f}')
ax.text(xs[-1] + 0.1, total_mean[2], f'{total_mean[2]:.3f}')
ax.set_xticks(xs)
ax.set_xticklabels([*all_genres, 'Total'], rotation='vertical')
ax.set_xlim(xs[0] - 0.7, xs[-1] + 0.9)
# ax.set_ylim(0, ax.get_ylim()[1])
ax.set_ylim(0, 1)
ax.set_ylabel('F1 Score')
ax.grid(True, axis='y')
fig.tight_layout()
fig.savefig(path_test / 'test_genre.png', dpi=300)
# genre(precision, recall, F1, F0.58), total -> CSV
with open(path_test / 'test.csv', 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(
['GENRE',
*list(product(('PRECISION', 'RECALL', 'F1', 'F0.58'),
('mean', 'min', 'max'))),
]
)
for idx, g in enumerate(all_genres):
writer.writerow([g, *genre_stacked[:, idx, :].flatten().tolist()])
writer.writerow(['TOTAL', *total_stacked.flatten().tolist()])
# Draw mel-spectrogram and boundary detection result
try:
thresholds = dict(**np.load(path_test / 'thresholds.npz'))
except IOError:
thresholds = None
for id_ in ids_drawn:
score_out = np.load(path_test / f'{id_}.npy')
prediction = np.load(path_test / f'{id_}_pred.npy')[:, 0]
truth = np.load(path_test / f'{id_}_truth.npy')[:, 0]
draw_mel_boundary(hparams.path_dataset['test'] / f'audio/{id_}.mp3',
path_test / f'test_boundary_{id_}.png',
score_out,
prediction,
truth,
thresholds[str(id_)] if thresholds else 0.5,
# draw_legend=False if id_ == 18 else True,
# xlim=(130, 140) if id_ == 18 else None,
# xticklabels=('2:10', '2:20') if id_ == 18 else None,
)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('epoch', type=int)
parser.add_argument('--song', default='set()')
parser.add_argument('--tol', default=0.5)
args = hparams.parse_argument(parser, print_argument=False)
plt.rc('font', family='Arial', size=12)
s_songs = eval(args.song)
assert isinstance(s_songs, set)
main(args.epoch, s_songs, args.tol) | analyze_test.py | import csv
from argparse import ArgumentParser
from collections import defaultdict
from itertools import product
from pathlib import Path
from typing import Set, Optional, Sequence
import librosa.display
import matplotlib.ticker as tckr
import matplotlib.pyplot as plt
import mir_eval
import numpy as np
from numpy import ndarray
from mpl_toolkits.axes_grid1 import make_axes_locatable
from hparams import hparams
def draw_mel_boundary(path_audio: Path, path_figure: Path,
score_out: ndarray, prediction: ndarray, truth: ndarray,
threshold: float,
draw_title=False,
draw_legend=True,
xlim: Optional[Sequence[float]] = None,
xticklabels: Optional[Sequence[float]] = None,
):
audio, _ = librosa.core.load(str(path_audio), sr=hparams.sample_rate)
mel_S = librosa.feature.melspectrogram(audio,
sr=hparams.sample_rate,
n_fft=hparams.fft_size,
hop_length=hparams.hop_size,
n_mels=hparams.num_mels)
t_axis = np.arange(len(score_out)) * hparams.hop_size / hparams.sample_rate
# figure
if xlim is not None:
duration = xlim[1] - xlim[0]
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(duration / 20 + 1.5, 5))
# ax for colorbar
ax_cbar = None
else:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 5))
# ax for colorbar
ax_cbar = make_axes_locatable(ax1).append_axes('right', size=0.1, pad=0.05)
ax_none = make_axes_locatable(ax2).append_axes('right', size=0.1, pad=0.05)
ax_none.set_visible(False)
c_vline_pred = 'C2'
c_vline_truth = 'C9'
# ax1: mel spectrogram
librosa.display.specshow(librosa.power_to_db(mel_S, ref=np.max),
x_axis='time', y_axis='mel',
sr=hparams.sample_rate,
hop_length=hparams.hop_size,
ax=ax1,
)
# prediction and target boundary
ax1.vlines(x=prediction,
ymin=4000, ymax=16000,
colors=c_vline_pred, label='prediction', zorder=2)
ax1.vlines(x=truth,
ymin=0, ymax=600,
colors=c_vline_truth, label='truth', zorder=2)
if ax_cbar:
fig.colorbar(ax1.collections[0], format='%+2.0f dB', cax=ax_cbar)
if draw_title:
ax1.set_title('mel spectrogram')
x_formatter = ax1.xaxis.get_major_formatter()
ax1.xaxis.set_major_locator(tckr.MultipleLocator(30))
ax1.xaxis.set_minor_locator(tckr.MultipleLocator(10))
ax1.set_xlabel('time (min:sec)')
# ax2: boundary score
ax2.plot(t_axis, score_out,
color='C1', zorder=1, label='estimated boundary score',
linewidth=0.75)
# prediction and target boundary
ylim = [-0.3, 1.3]
ax2.vlines(x=prediction,
ymin=0.9, ymax=ylim[1],
colors=c_vline_pred, label='predicted boundary', zorder=2)
ax2.vlines(x=truth,
ymin=ylim[0], ymax=0.1,
colors=c_vline_truth, label='target boundary', zorder=2)
if draw_legend:
ax2.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), ncol=3)
ax2.set_xlim(ax1.get_xlim())
ax2.xaxis.set_major_formatter(x_formatter)
ax2.xaxis.set_major_locator(ax1.xaxis.get_major_locator())
ax2.xaxis.set_minor_locator(ax1.xaxis.get_minor_locator())
ax2.set_xlabel('time (min:sec)')
ax2.set_ylim(*ylim)
ax2.set_yticks([0, 1])
ax2.set_yticks([threshold], minor=True)
ax2.set_yticklabels(['threshold'], minor=True)
ax2.grid(True, which='major', axis='y')
ax2.grid(True, which='minor', axis='y', linestyle='--', linewidth=1)
if xlim is not None:
ax1.set_xlim(*xlim)
ax2.set_xlim(*xlim)
ax1.set_xticks(xlim)
ax2.set_xticks(xlim)
ax1.set_xticklabels(xticklabels)
ax2.set_xticklabels(xticklabels)
fig.tight_layout()
fig.savefig(path_figure, dpi=600)
def main(test_epoch: int, ids_drawn: Set[int], tol: float):
"""
:param test_epoch:
:param ids_drawn: song ids to be plotted in mel and boundary.
:param tol: hit rate tolerance
:return:
"""
# test_eval: precision, recall, fscore
path_test = Path(hparams.logdir, f'test_{test_epoch}')
if not path_test.exists():
raise FileNotFoundError(path_test)
path_metadata = hparams.path_dataset['test'] / 'metadata/metadata.csv'
# Take the genres of each song in id order
ids = []
id_genre = [] # k: id, v: genre
i_col_genre = 3
with path_metadata.open('r', encoding='utf-8') as f:
read = csv.reader(f)
for idx, line in enumerate(read):
if idx == 0:
i_col_genre = line.index('GENRE')
continue
id_ = line[0]
if (path_test / f'{id_}_pred.npy').exists():
ids.append(int(id_))
id_genre.append(line[i_col_genre])
# measure
all_results = [] # k: id, v: float(precision, recall, F1, F0.58)
for i_id, id_ in enumerate(ids):
item_truth = np.load(path_test / f'{id_}_truth.npy')
item_pred = np.load(path_test / f'{id_}_pred.npy')
prec, recall, f1 = mir_eval.segment.detection(item_truth, item_pred, trim=True, window=tol)
_, _, f058 = mir_eval.segment.detection(item_truth, item_pred, beta=0.58, trim=True,
window=tol)
all_results.append(np.array((prec, recall, f1, f058)))
# total mean / min / max
all_results = np.stack(all_results, axis=0) # (N, 4)
total_mean = np.mean(all_results, axis=0) # (4,)
total_min = np.min(all_results, axis=0) # (4,)
total_max = np.max(all_results, axis=0) # (4,)
ids_drawn.add(int(ids[np.argmin(all_results, axis=0)[2]]))
ids_drawn.add(int(ids[np.argmax(all_results, axis=0)[2]]))
total_min_err = total_mean - total_min
total_max_err = total_max - total_mean
total_errs = np.stack((total_min_err, total_max_err), axis=0) # 2, 4
total_stacked = np.stack((total_mean, total_min, total_max), axis=-1) # (4, 3)
# mean / min / max per genres
genre_result = defaultdict(list) # k: genre, v: list
for i_id, g in enumerate(id_genre):
genre_result[g].append(all_results[i_id])
all_genres = list(genre_result.keys())
num_genres = len(genre_result)
xs = np.arange(num_genres + 1)
genre_mean = np.zeros((num_genres, 4))
genre_max = np.zeros((num_genres, 4))
genre_min = np.zeros((num_genres, 4))
for idx, g in enumerate(all_genres):
genre_mean[idx] = np.mean(genre_result[g], axis=0)
genre_max[idx] = np.max(genre_result[g], axis=0)
genre_min[idx] = np.min(genre_result[g], axis=0)
genre_min_err = genre_mean - genre_min
genre_max_err = genre_max - genre_mean
genre_errs = np.stack((genre_min_err, genre_max_err), axis=0) # 2, num_genres, 4
genre_stacked = np.stack((genre_mean.T, genre_min.T, genre_max.T), axis=-1) # 4, genre, 3
# figure
fig, ax = plt.subplots()
common_ebar_kwargs = dict(elinewidth=0.75, capsize=3, linestyle='', marker='o')
ax.errorbar(xs[:-1], genre_mean[:, 2], yerr=genre_errs[:, :, 2],
**common_ebar_kwargs)
ax.errorbar(xs[-1], total_mean[2], yerr=total_errs[:, 2:3],
color='black',
**common_ebar_kwargs)
for x, y in zip(xs[:-1], genre_mean[:, 2]):
ax.text(x + 0.1, y, f'{y:.3f}')
ax.text(xs[-1] + 0.1, total_mean[2], f'{total_mean[2]:.3f}')
ax.set_xticks(xs)
ax.set_xticklabels([*all_genres, 'Total'], rotation='vertical')
ax.set_xlim(xs[0] - 0.7, xs[-1] + 0.9)
# ax.set_ylim(0, ax.get_ylim()[1])
ax.set_ylim(0, 1)
ax.set_ylabel('F1 Score')
ax.grid(True, axis='y')
fig.tight_layout()
fig.savefig(path_test / 'test_genre.png', dpi=300)
# genre(precision, recall, F1, F0.58), total -> CSV
with open(path_test / 'test.csv', 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(
['GENRE',
*list(product(('PRECISION', 'RECALL', 'F1', 'F0.58'),
('mean', 'min', 'max'))),
]
)
for idx, g in enumerate(all_genres):
writer.writerow([g, *genre_stacked[:, idx, :].flatten().tolist()])
writer.writerow(['TOTAL', *total_stacked.flatten().tolist()])
# Draw mel-spectrogram and boundary detection result
try:
thresholds = dict(**np.load(path_test / 'thresholds.npz'))
except IOError:
thresholds = None
for id_ in ids_drawn:
score_out = np.load(path_test / f'{id_}.npy')
prediction = np.load(path_test / f'{id_}_pred.npy')[:, 0]
truth = np.load(path_test / f'{id_}_truth.npy')[:, 0]
draw_mel_boundary(hparams.path_dataset['test'] / f'audio/{id_}.mp3',
path_test / f'test_boundary_{id_}.png',
score_out,
prediction,
truth,
thresholds[str(id_)] if thresholds else 0.5,
# draw_legend=False if id_ == 18 else True,
# xlim=(130, 140) if id_ == 18 else None,
# xticklabels=('2:10', '2:20') if id_ == 18 else None,
)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('epoch', type=int)
parser.add_argument('--song', default='set()')
parser.add_argument('--tol', default=0.5)
args = hparams.parse_argument(parser, print_argument=False)
plt.rc('font', family='Arial', size=12)
s_songs = eval(args.song)
assert isinstance(s_songs, set)
main(args.epoch, s_songs, args.tol) | 0.817028 | 0.306047 |
import os
BANNED_FILES = set(['.DS_Store'])
OK_CHARS = list(map(ord, list('!@#$%^&*()-_+=`~[]{}|;:\',.<>/? ')))
for i in range(26):
if i < 10: OK_CHARS.append(ord('0') + i)
c = ord('a') + i
OK_CHARS.append(c)
OK_CHARS.append(c - ord('a') + ord('A'))
OK_CHARS = set(OK_CHARS)
escape_lookup = {
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
'"': '\\"',
"\\": "\\\\",
}
ESCAPE_CHARS = {}
for ec in escape_lookup.keys():
ESCAPE_CHARS[ord(ec)] = escape_lookup[ec]
def get_string_chunks(text):
text = text.replace("\r\n", "\n").replace("\r", "\n")
chars = list(map(ord, list(text)))
if str(chars[:3]) == '[239, 187, 191]':
chars = chars[3:]
chunks = []
current_chunk = []
current_length = 0
for c in chars:
if c == 0:
print("Invalid char code: 0")
return None
if current_length >= 100:
chunks.append(current_chunk)
current_chunk = []
current_length = 0
if c in OK_CHARS:
current_chunk.append(c)
current_length += 1
elif ESCAPE_CHARS.get(c) != None:
current_chunk.append(c)
current_length += 2
else:
print("Invalid char code:", c)
return None
if current_length > 0:
if current_length < 20 and len(chunks) > 0:
chunks[-1] += current_chunk
else:
chunks.append(current_chunk)
output = []
for chunk in chunks:
sb = ['"']
for char in chunk:
if char in OK_CHARS:
sb.append(chr(char))
else:
sb.append(ESCAPE_CHARS[char])
sb.append('"')
output.append(''.join(sb))
return output
def main():
files = {}
dir = os.path.join('src', 'resources', 'files')
for file in os.listdir(dir):
if file not in BANNED_FILES:
full_path = os.path.join(dir, file)
c = open(full_path, 'rt')
text = c.read()
c.close()
files[file] = text
file_names = list(files.keys())
file_names.sort()
generated_code = []
for file_name in file_names:
chunks = get_string_chunks(files[file_name])
generated_code.append(' name = new_string("' + file_name + '");')
generated_code.append(' sb = new_string_builder();')
for chunk in chunks:
generated_code.append(' string_builder_append_chars(sb, ' + chunk + ');')
generated_code.append(' dictionary_set(dict, name, string_builder_to_string_and_free(sb));')
generated_code.append('')
generated_code.pop()
resource_file_path = os.path.join('src', 'resources', 'resources.h')
c = open(resource_file_path, 'rt')
src = c.read()
c.close()
lines = src.split("\n")
start_index = -1
end_index = -1
for i in range(len(lines)):
line = lines[i].strip()
if line.startswith('//'):
if 'GEN_BEGIN' in line:
start_index = i
elif 'GEN_END' in line:
end_index = i
header = lines[:start_index + 1]
footer = lines[end_index:]
new_src = "\n".join(header + generated_code + footer)
c = open(resource_file_path, 'wt')
c.write(new_src)
c.close()
if __name__ == "__main__":
main() | resourcegen.py | import os
BANNED_FILES = set(['.DS_Store'])
OK_CHARS = list(map(ord, list('!@#$%^&*()-_+=`~[]{}|;:\',.<>/? ')))
for i in range(26):
if i < 10: OK_CHARS.append(ord('0') + i)
c = ord('a') + i
OK_CHARS.append(c)
OK_CHARS.append(c - ord('a') + ord('A'))
OK_CHARS = set(OK_CHARS)
escape_lookup = {
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
'"': '\\"',
"\\": "\\\\",
}
ESCAPE_CHARS = {}
for ec in escape_lookup.keys():
ESCAPE_CHARS[ord(ec)] = escape_lookup[ec]
def get_string_chunks(text):
text = text.replace("\r\n", "\n").replace("\r", "\n")
chars = list(map(ord, list(text)))
if str(chars[:3]) == '[239, 187, 191]':
chars = chars[3:]
chunks = []
current_chunk = []
current_length = 0
for c in chars:
if c == 0:
print("Invalid char code: 0")
return None
if current_length >= 100:
chunks.append(current_chunk)
current_chunk = []
current_length = 0
if c in OK_CHARS:
current_chunk.append(c)
current_length += 1
elif ESCAPE_CHARS.get(c) != None:
current_chunk.append(c)
current_length += 2
else:
print("Invalid char code:", c)
return None
if current_length > 0:
if current_length < 20 and len(chunks) > 0:
chunks[-1] += current_chunk
else:
chunks.append(current_chunk)
output = []
for chunk in chunks:
sb = ['"']
for char in chunk:
if char in OK_CHARS:
sb.append(chr(char))
else:
sb.append(ESCAPE_CHARS[char])
sb.append('"')
output.append(''.join(sb))
return output
def main():
files = {}
dir = os.path.join('src', 'resources', 'files')
for file in os.listdir(dir):
if file not in BANNED_FILES:
full_path = os.path.join(dir, file)
c = open(full_path, 'rt')
text = c.read()
c.close()
files[file] = text
file_names = list(files.keys())
file_names.sort()
generated_code = []
for file_name in file_names:
chunks = get_string_chunks(files[file_name])
generated_code.append(' name = new_string("' + file_name + '");')
generated_code.append(' sb = new_string_builder();')
for chunk in chunks:
generated_code.append(' string_builder_append_chars(sb, ' + chunk + ');')
generated_code.append(' dictionary_set(dict, name, string_builder_to_string_and_free(sb));')
generated_code.append('')
generated_code.pop()
resource_file_path = os.path.join('src', 'resources', 'resources.h')
c = open(resource_file_path, 'rt')
src = c.read()
c.close()
lines = src.split("\n")
start_index = -1
end_index = -1
for i in range(len(lines)):
line = lines[i].strip()
if line.startswith('//'):
if 'GEN_BEGIN' in line:
start_index = i
elif 'GEN_END' in line:
end_index = i
header = lines[:start_index + 1]
footer = lines[end_index:]
new_src = "\n".join(header + generated_code + footer)
c = open(resource_file_path, 'wt')
c.write(new_src)
c.close()
if __name__ == "__main__":
main() | 0.052479 | 0.078749 |
from pyb import Pin, SPI
import time
class SX1239():
def __init__(self):
super().__init__()
self.SPI = SPI(1, SPI.MASTER, baudrate=600000, polarity=0, phase=0, crc=None)
self.NRST = Pin('Y6', Pin.OUT_PP)
self.NSS = Pin('X5', Pin.OUT_PP)
self.DIO0 = Pin('Y5', Pin.IN, Pin.PULL_UP)
self.DIO1_DCLK = Pin('X12', Pin.IN, Pin.PULL_UP)
self.DIO2_DATA = Pin('X11', Pin.IN, Pin.PULL_UP)
self.Setup()
def Setup(self):
self.NRST.low()
time.sleep(.005)
self.NSS.high()
time.sleep(.01)
time.sleep(.01)
self.Write(0x01, 0b00010000) #RegOpMode - SX1239_SEQUENCER_AUTO + SX1239_LISTEN_DIS + SX1239_MODE_RX
self.Write(0x02, 0b00000000) #RegDataModul - SX1239_DATAMODE_CONTINUOUS_NO_SYNC + SX1239_MODULATION_FSK
self.Write(0x03, 0x0D) #RegBitrateMsb
self.Write(0x04, 0x05) #RegBitrateLsb
#self.Write(0x07, 0xE5) #RegFrfMsb - 916MHz
#self.Write(0x08, 0x21) #RegFrfMid - 916MHz
#self.Write(0x09, 0xC9) #RegFrfLsb - 916MHz
#self.Write(0x07, 0xD9) #RegFrfMsb - 868.3Hz
#self.Write(0x08, 0x33) #RegFrfMid - 868.3Hz
#self.Write(0x09, 0x3A) #RegFrfLsb - 868.3Hz
#self.Write(0x0D, 0b10010001) #RegListen1
self.Write(0x18, 0b00001001) #RegLna
self.Write(0x19, 0b01010011) #RegRxBw - SX1239_BW_DCCFREQ_DEFAULT + SX1239_BW_MANT_24 + SX1239_BW_EXP_0
#self.Write(0x1B, 0b10000000) #RegOokPeak - SX1239_THRESH_PEAK_TYPE_AVERAGE + SX1239_THRESH_PEAK_STEP_05dB + SX1239_THRESH_PEAK_DEC_0
self.Write(0x29, 0xB0) #RegRssiThresh - SX1239_RSSITHRESH_DEFAULT
self.Write(0x2E, 0b10011000) #RegSyncConfig - SX1239_SYNC_DIS
self.Write(0x2F, 0x69) #RegSyncValue1 -
self.Write(0x30, 0x81) #RegSyncValue2 -
self.Write(0x31, 0x7E) #RegSyncValue3 -
self.Write(0x32, 0x96) #RegSyncValue4 -
self.Write(0x37, 0b10000010) #RegPacketConfig1 -
self.Write(0x38, 24) #RegPayloadLength -
self.Write(0x39, 0x64) #RegNodeAdrs -
self.Write(0x3A, 0x64) #RegNodeAdrs -
self.Write(0x3B, 0b00100010) #RegAutoModes -
self.Write(0x58, 0x2D) #RegTestLna -
def Read(self, address = 0x00, numberOfByte = 1):
self.NSS.low()
self.SPI.send(address & 0x7F) # & 0x7F
data = self.SPI.recv(numberOfByte)
self.NSS.high()
return data
def Write(self, address = 0x00, data = bytearray(0)):
sendDataBuffer = bytearray((address | 0x80, data))
self.NSS.low()
self.SPI.send(sendDataBuffer)
self.NSS.high() | MAIN/STM32F405/V00/SX1239.py | from pyb import Pin, SPI
import time
class SX1239():
def __init__(self):
super().__init__()
self.SPI = SPI(1, SPI.MASTER, baudrate=600000, polarity=0, phase=0, crc=None)
self.NRST = Pin('Y6', Pin.OUT_PP)
self.NSS = Pin('X5', Pin.OUT_PP)
self.DIO0 = Pin('Y5', Pin.IN, Pin.PULL_UP)
self.DIO1_DCLK = Pin('X12', Pin.IN, Pin.PULL_UP)
self.DIO2_DATA = Pin('X11', Pin.IN, Pin.PULL_UP)
self.Setup()
def Setup(self):
self.NRST.low()
time.sleep(.005)
self.NSS.high()
time.sleep(.01)
time.sleep(.01)
self.Write(0x01, 0b00010000) #RegOpMode - SX1239_SEQUENCER_AUTO + SX1239_LISTEN_DIS + SX1239_MODE_RX
self.Write(0x02, 0b00000000) #RegDataModul - SX1239_DATAMODE_CONTINUOUS_NO_SYNC + SX1239_MODULATION_FSK
self.Write(0x03, 0x0D) #RegBitrateMsb
self.Write(0x04, 0x05) #RegBitrateLsb
#self.Write(0x07, 0xE5) #RegFrfMsb - 916MHz
#self.Write(0x08, 0x21) #RegFrfMid - 916MHz
#self.Write(0x09, 0xC9) #RegFrfLsb - 916MHz
#self.Write(0x07, 0xD9) #RegFrfMsb - 868.3Hz
#self.Write(0x08, 0x33) #RegFrfMid - 868.3Hz
#self.Write(0x09, 0x3A) #RegFrfLsb - 868.3Hz
#self.Write(0x0D, 0b10010001) #RegListen1
self.Write(0x18, 0b00001001) #RegLna
self.Write(0x19, 0b01010011) #RegRxBw - SX1239_BW_DCCFREQ_DEFAULT + SX1239_BW_MANT_24 + SX1239_BW_EXP_0
#self.Write(0x1B, 0b10000000) #RegOokPeak - SX1239_THRESH_PEAK_TYPE_AVERAGE + SX1239_THRESH_PEAK_STEP_05dB + SX1239_THRESH_PEAK_DEC_0
self.Write(0x29, 0xB0) #RegRssiThresh - SX1239_RSSITHRESH_DEFAULT
self.Write(0x2E, 0b10011000) #RegSyncConfig - SX1239_SYNC_DIS
self.Write(0x2F, 0x69) #RegSyncValue1 -
self.Write(0x30, 0x81) #RegSyncValue2 -
self.Write(0x31, 0x7E) #RegSyncValue3 -
self.Write(0x32, 0x96) #RegSyncValue4 -
self.Write(0x37, 0b10000010) #RegPacketConfig1 -
self.Write(0x38, 24) #RegPayloadLength -
self.Write(0x39, 0x64) #RegNodeAdrs -
self.Write(0x3A, 0x64) #RegNodeAdrs -
self.Write(0x3B, 0b00100010) #RegAutoModes -
self.Write(0x58, 0x2D) #RegTestLna -
def Read(self, address = 0x00, numberOfByte = 1):
self.NSS.low()
self.SPI.send(address & 0x7F) # & 0x7F
data = self.SPI.recv(numberOfByte)
self.NSS.high()
return data
def Write(self, address = 0x00, data = bytearray(0)):
sendDataBuffer = bytearray((address | 0x80, data))
self.NSS.low()
self.SPI.send(sendDataBuffer)
self.NSS.high() | 0.328529 | 0.132767 |
import json
import pytest
from tornado.httpclient import HTTPError, HTTPRequest
from beer_garden.api.http.authentication import issue_token_pair
from beer_garden.db.mongo.models import Garden, Role, RoleAssignment, User
@pytest.fixture(autouse=True)
def garden():
garden = Garden(name="somegarden", connection_type="LOCAL").save()
yield garden
garden.delete()
@pytest.fixture
def operation_data():
yield {"operation_type": "GARDEN_READ", "garden_name": "somegarden"}
@pytest.fixture
def event_forward_role():
role = Role(
name="event_forward",
permissions=["event:forward"],
).save()
yield role
role.delete()
@pytest.fixture
def user_with_permission(event_forward_role):
role_assignment = RoleAssignment(
role=event_forward_role,
domain={
"scope": "Global",
},
)
user = User(username="testuser", role_assignments=[role_assignment]).save()
yield user
user.delete()
@pytest.fixture
def user_without_permission(event_forward_role):
user = User(username="testuser").save()
yield user
user.delete()
@pytest.fixture
def access_token_permitted(user_with_permission):
yield issue_token_pair(user_with_permission)["access"]
@pytest.fixture
def access_token_not_permitted(user_without_permission):
yield issue_token_pair(user_without_permission)["access"]
class TestGardenAPI:
@pytest.mark.gen_test
def test_auth_enabled_allows_forward_with_global_permission(
self,
http_client,
base_url,
app_config_auth_enabled,
access_token_permitted,
operation_data,
):
url = f"{base_url}/api/v1/forward/"
headers = {"Authorization": f"Bearer {access_token_permitted}"}
request = HTTPRequest(
url,
method="POST",
headers=headers,
body=json.dumps(operation_data),
)
response = yield http_client.fetch(request)
assert response.code == 204
@pytest.mark.gen_test
def test_auth_enabled_rejects_forward_without_global_permission(
self,
http_client,
base_url,
app_config_auth_enabled,
access_token_not_permitted,
operation_data,
):
url = f"{base_url}/api/v1/forward/"
headers = {"Authorization": f"Bearer {access_token_not_permitted}"}
request = HTTPRequest(
url, method="POST", headers=headers, body=json.dumps(operation_data)
)
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(request)
assert excinfo.value.code == 403 | src/app/test/api/http/unit/handlers/v1/forward_test.py | import json
import pytest
from tornado.httpclient import HTTPError, HTTPRequest
from beer_garden.api.http.authentication import issue_token_pair
from beer_garden.db.mongo.models import Garden, Role, RoleAssignment, User
@pytest.fixture(autouse=True)
def garden():
garden = Garden(name="somegarden", connection_type="LOCAL").save()
yield garden
garden.delete()
@pytest.fixture
def operation_data():
yield {"operation_type": "GARDEN_READ", "garden_name": "somegarden"}
@pytest.fixture
def event_forward_role():
role = Role(
name="event_forward",
permissions=["event:forward"],
).save()
yield role
role.delete()
@pytest.fixture
def user_with_permission(event_forward_role):
role_assignment = RoleAssignment(
role=event_forward_role,
domain={
"scope": "Global",
},
)
user = User(username="testuser", role_assignments=[role_assignment]).save()
yield user
user.delete()
@pytest.fixture
def user_without_permission(event_forward_role):
user = User(username="testuser").save()
yield user
user.delete()
@pytest.fixture
def access_token_permitted(user_with_permission):
yield issue_token_pair(user_with_permission)["access"]
@pytest.fixture
def access_token_not_permitted(user_without_permission):
yield issue_token_pair(user_without_permission)["access"]
class TestGardenAPI:
@pytest.mark.gen_test
def test_auth_enabled_allows_forward_with_global_permission(
self,
http_client,
base_url,
app_config_auth_enabled,
access_token_permitted,
operation_data,
):
url = f"{base_url}/api/v1/forward/"
headers = {"Authorization": f"Bearer {access_token_permitted}"}
request = HTTPRequest(
url,
method="POST",
headers=headers,
body=json.dumps(operation_data),
)
response = yield http_client.fetch(request)
assert response.code == 204
@pytest.mark.gen_test
def test_auth_enabled_rejects_forward_without_global_permission(
self,
http_client,
base_url,
app_config_auth_enabled,
access_token_not_permitted,
operation_data,
):
url = f"{base_url}/api/v1/forward/"
headers = {"Authorization": f"Bearer {access_token_not_permitted}"}
request = HTTPRequest(
url, method="POST", headers=headers, body=json.dumps(operation_data)
)
with pytest.raises(HTTPError) as excinfo:
yield http_client.fetch(request)
assert excinfo.value.code == 403 | 0.455441 | 0.222658 |
# This script sets up a simple loop for periodical attestation of Pyth data
from pyth_utils import *
from http.client import HTTPConnection
import json
import os
import subprocess
import time
import threading
P2W_ADDRESS = "P2WH424242424242424242424242424242424242424"
P2W_ATTEST_INTERVAL = float(os.environ.get("P2W_ATTEST_INTERVAL", 5))
P2W_OWNER_KEYPAIR = os.environ.get(
"P2W_OWNER_KEYPAIR", f"/usr/src/solana/keys/p2w_owner.json")
PYTH_ACCOUNTS_HOST = "pyth"
PYTH_ACCOUNTS_PORT = 4242
WORMHOLE_ADDRESS = "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
# Get actor pubkeys
P2W_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", P2W_OWNER_KEYPAIR], capture_output=True).stdout.strip()
PYTH_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", PYTH_PROGRAM_KEYPAIR], capture_output=True).stdout.strip()
# Top up pyth2wormhole owner
sol_run_or_die("airdrop", [
str(SOL_AIRDROP_AMT),
"--keypair", P2W_OWNER_KEYPAIR,
"--commitment", "finalized",
], capture_output=True)
# Initialize pyth2wormhole
init_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"init",
"--wh-prog", WORMHOLE_ADDRESS,
"--owner", P2W_OWNER_ADDRESS,
"--pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True, die=False)
if init_result.returncode != 0:
print("NOTE: pyth2wormhole-client init failed, retrying with set_config")
run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"set-config",
"--owner", P2W_OWNER_KEYPAIR,
"--new-owner", P2W_OWNER_ADDRESS,
"--new-wh-prog", WORMHOLE_ADDRESS,
"--new-pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True)
# Retrieve current price/product pubkeys from the pyth publisher
conn = HTTPConnection(PYTH_ACCOUNTS_HOST, PYTH_ACCOUNTS_PORT)
conn.request("GET", "/")
res = conn.getresponse()
pyth_accounts = None
if res.getheader("Content-Type") == "application/json":
pyth_accounts = json.load(res)
else:
print(f"Bad Content type {res.getheader('Content-Type')}", file=sys.stderr)
sys.exit(1)
price_addr = pyth_accounts["price"]
product_addr = pyth_accounts["product"]
nonce = 0
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
print("p2w_autoattest ready to roll.")
print(f"ACCOUNTS: {pyth_accounts}")
print(f"Attest Interval: {P2W_ATTEST_INTERVAL}")
# Let k8s know the service is up
readiness_thread = threading.Thread(target=readiness, daemon=True)
readiness_thread.start()
nonce = 1
while True:
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
time.sleep(P2W_ATTEST_INTERVAL)
nonce += 1
readiness_thread.join() | third_party/pyth/p2w_autoattest.py |
# This script sets up a simple loop for periodical attestation of Pyth data
from pyth_utils import *
from http.client import HTTPConnection
import json
import os
import subprocess
import time
import threading
P2W_ADDRESS = "P2WH424242424242424242424242424242424242424"
P2W_ATTEST_INTERVAL = float(os.environ.get("P2W_ATTEST_INTERVAL", 5))
P2W_OWNER_KEYPAIR = os.environ.get(
"P2W_OWNER_KEYPAIR", f"/usr/src/solana/keys/p2w_owner.json")
PYTH_ACCOUNTS_HOST = "pyth"
PYTH_ACCOUNTS_PORT = 4242
WORMHOLE_ADDRESS = "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
# Get actor pubkeys
P2W_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", P2W_OWNER_KEYPAIR], capture_output=True).stdout.strip()
PYTH_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", PYTH_PROGRAM_KEYPAIR], capture_output=True).stdout.strip()
# Top up pyth2wormhole owner
sol_run_or_die("airdrop", [
str(SOL_AIRDROP_AMT),
"--keypair", P2W_OWNER_KEYPAIR,
"--commitment", "finalized",
], capture_output=True)
# Initialize pyth2wormhole
init_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"init",
"--wh-prog", WORMHOLE_ADDRESS,
"--owner", P2W_OWNER_ADDRESS,
"--pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True, die=False)
if init_result.returncode != 0:
print("NOTE: pyth2wormhole-client init failed, retrying with set_config")
run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"set-config",
"--owner", P2W_OWNER_KEYPAIR,
"--new-owner", P2W_OWNER_ADDRESS,
"--new-wh-prog", WORMHOLE_ADDRESS,
"--new-pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True)
# Retrieve current price/product pubkeys from the pyth publisher
conn = HTTPConnection(PYTH_ACCOUNTS_HOST, PYTH_ACCOUNTS_PORT)
conn.request("GET", "/")
res = conn.getresponse()
pyth_accounts = None
if res.getheader("Content-Type") == "application/json":
pyth_accounts = json.load(res)
else:
print(f"Bad Content type {res.getheader('Content-Type')}", file=sys.stderr)
sys.exit(1)
price_addr = pyth_accounts["price"]
product_addr = pyth_accounts["product"]
nonce = 0
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
print("p2w_autoattest ready to roll.")
print(f"ACCOUNTS: {pyth_accounts}")
print(f"Attest Interval: {P2W_ATTEST_INTERVAL}")
# Let k8s know the service is up
readiness_thread = threading.Thread(target=readiness, daemon=True)
readiness_thread.start()
nonce = 1
while True:
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
time.sleep(P2W_ATTEST_INTERVAL)
nonce += 1
readiness_thread.join() | 0.451085 | 0.175962 |
import json
import random
from datetime import datetime, timedelta
from django.db.models import Sum, Avg, Max
from django.shortcuts import render
from rest_framework.authtoken.models import Token
from .models import UserData, Profile
def home_page(request):
return render(request, 'Data/home_page.html', context={})
def ranking(request):
# User token:
# ----------------------------------------------------
token = Token.objects.get(user=request.user) if request.user.is_authenticated else None
# Best Contributors table:
# ----------------------------------------------------
# Get best first 25 contributors from db
best_friends = Profile.objects.order_by('-score')[:25]
# Format data to json for frontend
bffs = [{'user': profile.user, 'score': profile.score, 'position': i + 1} for i, profile in enumerate(best_friends)]
# Graph data:
# ----------------------------------------------------
# Creating list of days of this week
days_this_week = []
today = datetime.today().date()
for i in range(8):
date = (today + timedelta(days=-i))
days_this_week.append(str(date))
# Creating list of scores from this week
score_this_week = []
for i in range(8):
score = sum([obj.score for obj in
UserData.objects.filter(uploaded_at__date=datetime.today().date() - timedelta(days=i))])
score_this_week.append(score)
# Zipping scores and dates into one dict
data = dict(zip(days_this_week, score_this_week))
# Progress Bar data:
# ----------------------------------------------------
score_sum = Profile.objects.aggregate(Sum('score'))['score__sum']
score_sum = score_sum if score_sum is not None else 0
# Percent of individual help
total_time_played = round(score_sum / 3600, 2)
if request.user.is_authenticated and score_sum > 0:
help_percent = round(100 * (Profile.objects.get(user=request.user).score) / score_sum, 1)
else:
help_percent = 0
# Data Submitted:
# ----------------------------------------------------
if request.user.is_authenticated:
uploads = UserData.objects.filter(user=request.user).order_by('-uploaded_at')
user_data = []
for upload in uploads:
date = upload.uploaded_at.strftime('%Y-%m-%d %H:%M:%S')
user_data.append({"score": upload.score, "id": upload.id, "uploaded_at": date})
else:
user_data = {}
# Number of users:
# ----------------------------------------------------
n_users = Profile.objects.all().count()
# Average number of frames per user
# ----------------------------------------------------
avg_user_score = Profile.objects.aggregate(Avg('score'))['score__avg']
avg_user_score = round(avg_user_score) if avg_user_score is not None else 0
# Average number of sessions per user
# ----------------------------------------------------
avg_session_score = UserData.objects.aggregate(Avg('score'))['score__avg']
avg_session_score = round(avg_session_score) if avg_session_score is not None else 0
avg_session_time = round(avg_session_score / 60, 2) if avg_session_score is not None else 0
# Top 3 users
# ----------------------------------------------------
top_3_score_sum = Profile.objects.order_by('-score')[:3].aggregate(Sum('score'))['score__sum']
if top_3_score_sum is not None and score_sum > 0:
top_3_score_percent = round(100 * top_3_score_sum / score_sum, 2)
else:
top_3_score_percent = 0
# Longest fishing session
# ----------------------------------------------------
max_score = UserData.objects.aggregate(Max('score'))['score__max']
max_score_users = UserData.objects.filter(score=max_score)
if max_score_users is not None and max_score is not None:
rand_user = random.randint(0, len(max_score_users) - 1)
max_score_user = [user for user in max_score_users][rand_user]
time = round(max_score / 60, 1)
else:
max_score = 0
max_score_user = 'admin'
time = 0
longest_session_dict = {'max_score': max_score, 'user': max_score_user, 'time': time}
return render(request, 'Data/dashboard.html', context={
'bffs_dict': bffs,
'data': json.dumps(data),
'score_sum': score_sum,
'total_time_played': total_time_played,
'user_data': user_data,
'help_percent': help_percent,
'n_users': n_users,
'avg_user_score': avg_user_score,
'avg_session_score': avg_session_score,
'avg_session_time': avg_session_time,
'top_3_score_percent': top_3_score_percent,
'longest_session': longest_session_dict,
'token': token
}) | Data/views.py | import json
import random
from datetime import datetime, timedelta
from django.db.models import Sum, Avg, Max
from django.shortcuts import render
from rest_framework.authtoken.models import Token
from .models import UserData, Profile
def home_page(request):
return render(request, 'Data/home_page.html', context={})
def ranking(request):
# User token:
# ----------------------------------------------------
token = Token.objects.get(user=request.user) if request.user.is_authenticated else None
# Best Contributors table:
# ----------------------------------------------------
# Get best first 25 contributors from db
best_friends = Profile.objects.order_by('-score')[:25]
# Format data to json for frontend
bffs = [{'user': profile.user, 'score': profile.score, 'position': i + 1} for i, profile in enumerate(best_friends)]
# Graph data:
# ----------------------------------------------------
# Creating list of days of this week
days_this_week = []
today = datetime.today().date()
for i in range(8):
date = (today + timedelta(days=-i))
days_this_week.append(str(date))
# Creating list of scores from this week
score_this_week = []
for i in range(8):
score = sum([obj.score for obj in
UserData.objects.filter(uploaded_at__date=datetime.today().date() - timedelta(days=i))])
score_this_week.append(score)
# Zipping scores and dates into one dict
data = dict(zip(days_this_week, score_this_week))
# Progress Bar data:
# ----------------------------------------------------
score_sum = Profile.objects.aggregate(Sum('score'))['score__sum']
score_sum = score_sum if score_sum is not None else 0
# Percent of individual help
total_time_played = round(score_sum / 3600, 2)
if request.user.is_authenticated and score_sum > 0:
help_percent = round(100 * (Profile.objects.get(user=request.user).score) / score_sum, 1)
else:
help_percent = 0
# Data Submitted:
# ----------------------------------------------------
if request.user.is_authenticated:
uploads = UserData.objects.filter(user=request.user).order_by('-uploaded_at')
user_data = []
for upload in uploads:
date = upload.uploaded_at.strftime('%Y-%m-%d %H:%M:%S')
user_data.append({"score": upload.score, "id": upload.id, "uploaded_at": date})
else:
user_data = {}
# Number of users:
# ----------------------------------------------------
n_users = Profile.objects.all().count()
# Average number of frames per user
# ----------------------------------------------------
avg_user_score = Profile.objects.aggregate(Avg('score'))['score__avg']
avg_user_score = round(avg_user_score) if avg_user_score is not None else 0
# Average number of sessions per user
# ----------------------------------------------------
avg_session_score = UserData.objects.aggregate(Avg('score'))['score__avg']
avg_session_score = round(avg_session_score) if avg_session_score is not None else 0
avg_session_time = round(avg_session_score / 60, 2) if avg_session_score is not None else 0
# Top 3 users
# ----------------------------------------------------
top_3_score_sum = Profile.objects.order_by('-score')[:3].aggregate(Sum('score'))['score__sum']
if top_3_score_sum is not None and score_sum > 0:
top_3_score_percent = round(100 * top_3_score_sum / score_sum, 2)
else:
top_3_score_percent = 0
# Longest fishing session
# ----------------------------------------------------
max_score = UserData.objects.aggregate(Max('score'))['score__max']
max_score_users = UserData.objects.filter(score=max_score)
if max_score_users is not None and max_score is not None:
rand_user = random.randint(0, len(max_score_users) - 1)
max_score_user = [user for user in max_score_users][rand_user]
time = round(max_score / 60, 1)
else:
max_score = 0
max_score_user = 'admin'
time = 0
longest_session_dict = {'max_score': max_score, 'user': max_score_user, 'time': time}
return render(request, 'Data/dashboard.html', context={
'bffs_dict': bffs,
'data': json.dumps(data),
'score_sum': score_sum,
'total_time_played': total_time_played,
'user_data': user_data,
'help_percent': help_percent,
'n_users': n_users,
'avg_user_score': avg_user_score,
'avg_session_score': avg_session_score,
'avg_session_time': avg_session_time,
'top_3_score_percent': top_3_score_percent,
'longest_session': longest_session_dict,
'token': token
}) | 0.423696 | 0.196498 |
import pytest
from assertpy import assert_that
from pytest_bdd import scenario, given, when, then, parsers
from roguebot.state.entity import Entities, Entity
from roguebot.navigation.path import PathFinder
from roguebot.navigation.path_printer import PathPrinter
from roguebot.state.state import State
from tests.state.dungeon_draw import *
def path_from_picture(picture) -> [Point]:
path = []
@scenario('features/path_finder.feature', 'finds path between rooms on same level')
def test_finds_path_between_rooms_on_same_level():
pass
@scenario('features/path_finder.feature', 'finds path between rooms on same level in a maze')
def test_path_between_two_rooms_in_a_maze():
pass
@scenario('features/path_finder.feature', 'finds path between through funnel')
def test_path_through_funnel():
pass
@given(parsers.parse("a dungeon map of:{picture}"), target_fixture="dungeon_map")
def dungeon_map(picture: str):
dungeon = get_dungeon_from_picture(picture)
return dungeon
@given(parsers.parse("the start-point is {x:d},{y:d},{z:d}"), target_fixture="start_point")
def start_point(x: int, y: int, z: int) -> Point:
return Point(x, y, z)
@given(parsers.parse("the end-point is {x:d},{y:d},{z:d}"), target_fixture="end_point")
def end_point(x: int, y: int, z: int) -> Point:
return Point(x, y, z)
@pytest.fixture
def path_finder() -> PathFinder:
return PathFinder()
@given("there are no entities", target_fixture="entities")
def no_entities() -> Entities:
return Entities()
@pytest.fixture
def state(entities, dungeon_map) -> State:
state = State()
state.entities = entities
state.dungeon_map = dungeon_map
return state
@when("we plot a path", target_fixture="path")
def plot_a_path(state, start_point, end_point, path_finder: PathFinder):
path = path_finder.find_path(start_point, end_point, state)
return path
@pytest.fixture
def path_printer() -> PathPrinter:
return PathPrinter()
@then(parsers.parse("the route is:{picture}"))
def route_check(picture, path, path_printer, start_point, state):
expected_points = get_path_points_from_picture(picture)
printed_actual = path_printer.render_path(state,
path,
only_show_floor=False,
me_point=start_point)
print("actual:")
print(printed_actual)
print(path)
print()
print("expected:")
print(picture)
print(expected_points)
expected_points_not_in_actual = []
for point in expected_points:
if point in path:
path.remove(point)
else:
expected_points_not_in_actual.append(point)
assert_that(path,
"points in actual but not expected").is_empty()
assert_that(expected_points_not_in_actual,
"points expected but not in returned path").is_empty() | tests/bdd/test_path_finder.py | import pytest
from assertpy import assert_that
from pytest_bdd import scenario, given, when, then, parsers
from roguebot.state.entity import Entities, Entity
from roguebot.navigation.path import PathFinder
from roguebot.navigation.path_printer import PathPrinter
from roguebot.state.state import State
from tests.state.dungeon_draw import *
def path_from_picture(picture) -> [Point]:
path = []
@scenario('features/path_finder.feature', 'finds path between rooms on same level')
def test_finds_path_between_rooms_on_same_level():
pass
@scenario('features/path_finder.feature', 'finds path between rooms on same level in a maze')
def test_path_between_two_rooms_in_a_maze():
pass
@scenario('features/path_finder.feature', 'finds path between through funnel')
def test_path_through_funnel():
pass
@given(parsers.parse("a dungeon map of:{picture}"), target_fixture="dungeon_map")
def dungeon_map(picture: str):
dungeon = get_dungeon_from_picture(picture)
return dungeon
@given(parsers.parse("the start-point is {x:d},{y:d},{z:d}"), target_fixture="start_point")
def start_point(x: int, y: int, z: int) -> Point:
return Point(x, y, z)
@given(parsers.parse("the end-point is {x:d},{y:d},{z:d}"), target_fixture="end_point")
def end_point(x: int, y: int, z: int) -> Point:
return Point(x, y, z)
@pytest.fixture
def path_finder() -> PathFinder:
return PathFinder()
@given("there are no entities", target_fixture="entities")
def no_entities() -> Entities:
return Entities()
@pytest.fixture
def state(entities, dungeon_map) -> State:
state = State()
state.entities = entities
state.dungeon_map = dungeon_map
return state
@when("we plot a path", target_fixture="path")
def plot_a_path(state, start_point, end_point, path_finder: PathFinder):
path = path_finder.find_path(start_point, end_point, state)
return path
@pytest.fixture
def path_printer() -> PathPrinter:
return PathPrinter()
@then(parsers.parse("the route is:{picture}"))
def route_check(picture, path, path_printer, start_point, state):
expected_points = get_path_points_from_picture(picture)
printed_actual = path_printer.render_path(state,
path,
only_show_floor=False,
me_point=start_point)
print("actual:")
print(printed_actual)
print(path)
print()
print("expected:")
print(picture)
print(expected_points)
expected_points_not_in_actual = []
for point in expected_points:
if point in path:
path.remove(point)
else:
expected_points_not_in_actual.append(point)
assert_that(path,
"points in actual but not expected").is_empty()
assert_that(expected_points_not_in_actual,
"points expected but not in returned path").is_empty() | 0.648578 | 0.572006 |
import pandas as pd
from .utils import pipeable
@pipeable
def exact_merge(
left: pd.DataFrame,
right: pd.DataFrame,
on: str = None,
left_on: str = None,
right_on: str = None,
how: str = "exact",
suffixes=("_x", "_y"),
):
"""
Merge two dataframes based on two string columns and the specified matching
technique. Techniques currently available:
1. "exact" : strings must match exactly
2. "contains" : the right string must contain the left string
3. "startswith" : the right string must start with the left string
Notes
-----
- This performs a "left" merge — all rows in the left data frame will be
present in the returned data frame
- Data in the left data frame can match multiple values in the right column.
Parameters
----------
left : pandas.DataFrame
the left data to merge
right : pandas.DataFrame
the right DataFrame to merge
on : str, optional
the column to merge on
left_on : str, optional
the name of the string column in the left data frame to merge on
right_on : str, optional
the name of the string column in the right data frame to merge on
exact : str, optional
the merging method, one of 'exact', 'contains', or 'startswith'
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
Returns
-------
merged : pandas.DataFrame
the merged dataframe containg all rows in `left` and any matched data
from the `right` data frame
"""
if on is not None:
left_on = right_on = on
# Verify input parameters
if left_on is None or right_on is None:
raise ValueError("Please specify `on` or `left_on/right_on`")
if left_on not in left.columns:
raise ValueError(f"'{left_on}' is not a column in `left`")
if right_on not in right.columns:
raise ValueError(f"'{right_on}' is not a column in `right`")
def contains(row, right):
return right.loc[
right[right_on].str.contains(row[left_on], na=False, regex=False)
]
def exact(row, right):
return right.loc[right[right_on] == row[left_on]]
def startswith(row, right):
return right.loc[right[right_on].str.startswith(row[left_on], na=False)]
if how == "exact":
comparison = exact
elif how == "contains":
comparison = contains
elif how == "startswith":
comparison = startswith
else:
raise ValueError("how should be one of: 'exact', 'contains', 'startswith'")
# rename the index
right = right.rename_axis("right_index").reset_index()
merged = pd.concat(
left.apply(
lambda row: comparison(row, right).assign(index_left=row.name), axis=1
).tolist()
)
return left.merge(
merged.set_index("index_left"),
left_index=True,
right_index=True,
how="left",
suffixes=suffixes,
) | schuylkill/exact.py | import pandas as pd
from .utils import pipeable
@pipeable
def exact_merge(
left: pd.DataFrame,
right: pd.DataFrame,
on: str = None,
left_on: str = None,
right_on: str = None,
how: str = "exact",
suffixes=("_x", "_y"),
):
"""
Merge two dataframes based on two string columns and the specified matching
technique. Techniques currently available:
1. "exact" : strings must match exactly
2. "contains" : the right string must contain the left string
3. "startswith" : the right string must start with the left string
Notes
-----
- This performs a "left" merge — all rows in the left data frame will be
present in the returned data frame
- Data in the left data frame can match multiple values in the right column.
Parameters
----------
left : pandas.DataFrame
the left data to merge
right : pandas.DataFrame
the right DataFrame to merge
on : str, optional
the column to merge on
left_on : str, optional
the name of the string column in the left data frame to merge on
right_on : str, optional
the name of the string column in the right data frame to merge on
exact : str, optional
the merging method, one of 'exact', 'contains', or 'startswith'
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
Returns
-------
merged : pandas.DataFrame
the merged dataframe containg all rows in `left` and any matched data
from the `right` data frame
"""
if on is not None:
left_on = right_on = on
# Verify input parameters
if left_on is None or right_on is None:
raise ValueError("Please specify `on` or `left_on/right_on`")
if left_on not in left.columns:
raise ValueError(f"'{left_on}' is not a column in `left`")
if right_on not in right.columns:
raise ValueError(f"'{right_on}' is not a column in `right`")
def contains(row, right):
return right.loc[
right[right_on].str.contains(row[left_on], na=False, regex=False)
]
def exact(row, right):
return right.loc[right[right_on] == row[left_on]]
def startswith(row, right):
return right.loc[right[right_on].str.startswith(row[left_on], na=False)]
if how == "exact":
comparison = exact
elif how == "contains":
comparison = contains
elif how == "startswith":
comparison = startswith
else:
raise ValueError("how should be one of: 'exact', 'contains', 'startswith'")
# rename the index
right = right.rename_axis("right_index").reset_index()
merged = pd.concat(
left.apply(
lambda row: comparison(row, right).assign(index_left=row.name), axis=1
).tolist()
)
return left.merge(
merged.set_index("index_left"),
left_index=True,
right_index=True,
how="left",
suffixes=suffixes,
) | 0.803097 | 0.688396 |
import os
import shutil
import pytest
from autopylot.cameras import Camera
from autopylot.datasets import preparedata
from autopylot.models import architectures, utils
from autopylot.utils import memory, settings
dirpath = os.path.join(settings.settings.MODELS_PATH, "test", "test")
@pytest.mark.models
def test_create_model_save():
"""Test the creation and the saving of a model."""
model = architectures.Models.test_model(
[
# testing with "list" shape
["steering", [1, 1]],
# testing with "tuple" shape
("test_output", (1, 20)),
]
)
model.summary()
utils.save_model(model, "test")
assert (
os.path.exists(dirpath + ".h5")
and os.path.exists(dirpath + ".tflite")
and os.path.exists(dirpath + ".info")
)
@pytest.mark.models
def test_input_shapes():
"""Test the expected input and output shape."""
model, model_info = utils.load_model("test/test.tflite")
for input_detail, (_, shape) in zip(model.input_details, model_info["inputs"]):
assert tuple(input_detail["shape"][1:]) == tuple(shape)
for output_detail, (_, shape) in zip(model.output_details, model_info["outputs"]):
assert tuple(output_detail["shape"][1:]) == tuple(shape)
@pytest.mark.models
def test_missing_data():
"""If the memory doens't have the right data, it should raise an Exception."""
model, model_info = utils.load_model("test/test.tflite")
prepare_data = preparedata.PrepareData(model_info)
with pytest.raises(ValueError):
prepare_data(memory.mem)
@pytest.mark.models
def test_tflite_predict():
"""Test the prediction on the .tflite model."""
model, model_info = utils.load_model("test/test.tflite")
prepare_data = preparedata.PrepareData(model_info)
camera = Camera(camera_type="dummy")
camera.update()
memory.mem["speed"] = 0.123
input_data = prepare_data(memory.mem)
predictions = model.predict(input_data)
assert predictions != {}
@pytest.mark.models
def test_tf_predict():
"""Test the prediction on the .h5 model."""
model, model_info = utils.load_model("test/test.h5")
prepare_data = preparedata.PrepareData(model_info)
camera = Camera(camera_type="dummy")
camera.update()
memory.mem["speed"] = 2.3
input_data = prepare_data(memory.mem)
predictions = model.predict(input_data)
assert predictions != {}
@pytest.mark.models
def test_delete_directory():
"""Deletes the created models."""
shutil.rmtree(os.path.join(settings.settings.MODELS_PATH, "test"))
assert os.path.exists(dirpath) is False | autopylot/tests/test_models.py | import os
import shutil
import pytest
from autopylot.cameras import Camera
from autopylot.datasets import preparedata
from autopylot.models import architectures, utils
from autopylot.utils import memory, settings
dirpath = os.path.join(settings.settings.MODELS_PATH, "test", "test")
@pytest.mark.models
def test_create_model_save():
"""Test the creation and the saving of a model."""
model = architectures.Models.test_model(
[
# testing with "list" shape
["steering", [1, 1]],
# testing with "tuple" shape
("test_output", (1, 20)),
]
)
model.summary()
utils.save_model(model, "test")
assert (
os.path.exists(dirpath + ".h5")
and os.path.exists(dirpath + ".tflite")
and os.path.exists(dirpath + ".info")
)
@pytest.mark.models
def test_input_shapes():
"""Test the expected input and output shape."""
model, model_info = utils.load_model("test/test.tflite")
for input_detail, (_, shape) in zip(model.input_details, model_info["inputs"]):
assert tuple(input_detail["shape"][1:]) == tuple(shape)
for output_detail, (_, shape) in zip(model.output_details, model_info["outputs"]):
assert tuple(output_detail["shape"][1:]) == tuple(shape)
@pytest.mark.models
def test_missing_data():
"""If the memory doens't have the right data, it should raise an Exception."""
model, model_info = utils.load_model("test/test.tflite")
prepare_data = preparedata.PrepareData(model_info)
with pytest.raises(ValueError):
prepare_data(memory.mem)
@pytest.mark.models
def test_tflite_predict():
"""Test the prediction on the .tflite model."""
model, model_info = utils.load_model("test/test.tflite")
prepare_data = preparedata.PrepareData(model_info)
camera = Camera(camera_type="dummy")
camera.update()
memory.mem["speed"] = 0.123
input_data = prepare_data(memory.mem)
predictions = model.predict(input_data)
assert predictions != {}
@pytest.mark.models
def test_tf_predict():
"""Test the prediction on the .h5 model."""
model, model_info = utils.load_model("test/test.h5")
prepare_data = preparedata.PrepareData(model_info)
camera = Camera(camera_type="dummy")
camera.update()
memory.mem["speed"] = 2.3
input_data = prepare_data(memory.mem)
predictions = model.predict(input_data)
assert predictions != {}
@pytest.mark.models
def test_delete_directory():
"""Deletes the created models."""
shutil.rmtree(os.path.join(settings.settings.MODELS_PATH, "test"))
assert os.path.exists(dirpath) is False | 0.790934 | 0.652186 |
import numpy as np
import scipy as sp
import pandas as pd
from datetime import datetime
import xgboost as xgb
from sklearn.metrics import log_loss
from utility_common import feature_extraction, data_path
# r087
# 2015/12/16 14h20m
# Ensemble
# XGB
# params: nt (=num_round)
# ncol: 138610
X1, target, v_train, v_test = feature_extraction(useUpc=True)
y = pd.get_dummies(target).values.argmax(1)
X1 = X1[v_train-1]
nModels = 10
sh = .2
cs = .4
bf = .8
xgb_params = {'eta':sh, 'silent':1, 'objective':'multi:softprob', 'num_class':38,
'colsample_bytree':cs, 'subsample':bf,
'eval_metric':'mlogloss', 'nthread':8}
nt_dict = {4:range(500, 951, 50), 5:range(300, 701, 50)}
pr_xgb_dict = {key:[np.zeros((v_train.size, 38)) for _ in range(len(nt_lst))] \
for key, nt_lst in nt_dict.iteritems()}
scores = []
t0 = datetime.now()
for fold, idx in enumerate(kf):
train_idx, valid_idx = idx
dtrain = xgb.DMatrix(X1[train_idx], label = y[train_idx])
dvalid = xgb.DMatrix(X1[valid_idx])
for tc in [4, 5]:
nt_lst = nt_dict[tc]
nt = np.max(nt_lst)
xgb_params['max_depth'] = tc
params = {'tc':tc, 'fold':fold}
for i in range(1, nModels+1):
params.update({'nModels':i})
xgb_params['seed'] = 13913*i+32018
bst = xgb.train(xgb_params, dtrain, nt)
for j, ntree in enumerate(nt_lst):
pr = bst.predict(dvalid, ntree_limit = ntree)
pr_xgb_dict[tc][j][valid_idx] += pr
sc = params.copy()
sc.update({'ntree':ntree,
'each':log_loss(y[valid_idx], pr),
'avg':log_loss(y[valid_idx], pr_xgb_dict[tc][j][valid_idx]/i)})
scores.append(sc)
print scores[-1], datetime.now() - t0
pr_xgb_dict = {key:[pr/nModels for pr in pr_lst] for key, pr_lst in pr_xgb_dict.iteritems()}
output = open(data_path + 'pr_xgb087.pkl', 'wb')
pickle.dump(pr_xgb_dict, output)
output.close()
r087 = pd.DataFrame(scores)
r087.to_csv('logs/r087.csv')
r087_summary = pd.DataFrame(index=range(300, 951, 50))
params = ['ntree']
for tc in [4, 5]:
grouped_avg = r087[(r087.nModels==nModels) & (r087.tc==tc)].groupby(params).avg
grouped_each = r087[r087.tc==tc].groupby(params).each
r087_summary = r087_summary.join(pd.DataFrame({'XGB_avg':grouped_avg.mean(),
'XGB':grouped_each.mean()}), lsuffix='any')
r087_summary.columns = pd.MultiIndex(levels=[[4, 5], ['XGB', 'XGB_avg']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'max_depth', u'model'])
r087_summary.to_csv('logs/r087_summary.csv')
print pd.DataFrame({'loss':r087_summary.min(0), 'ntree':r087_summary.idxmin(0)})
# loss ntree
# max_depth model
# 4 XGB 0.662632 700
# XGB_avg 0.644430 750
# 5 XGB 0.664067 550
# XGB_avg 0.643234 550 | params_tune_xgb.py | import numpy as np
import scipy as sp
import pandas as pd
from datetime import datetime
import xgboost as xgb
from sklearn.metrics import log_loss
from utility_common import feature_extraction, data_path
# r087
# 2015/12/16 14h20m
# Ensemble
# XGB
# params: nt (=num_round)
# ncol: 138610
X1, target, v_train, v_test = feature_extraction(useUpc=True)
y = pd.get_dummies(target).values.argmax(1)
X1 = X1[v_train-1]
nModels = 10
sh = .2
cs = .4
bf = .8
xgb_params = {'eta':sh, 'silent':1, 'objective':'multi:softprob', 'num_class':38,
'colsample_bytree':cs, 'subsample':bf,
'eval_metric':'mlogloss', 'nthread':8}
nt_dict = {4:range(500, 951, 50), 5:range(300, 701, 50)}
pr_xgb_dict = {key:[np.zeros((v_train.size, 38)) for _ in range(len(nt_lst))] \
for key, nt_lst in nt_dict.iteritems()}
scores = []
t0 = datetime.now()
for fold, idx in enumerate(kf):
train_idx, valid_idx = idx
dtrain = xgb.DMatrix(X1[train_idx], label = y[train_idx])
dvalid = xgb.DMatrix(X1[valid_idx])
for tc in [4, 5]:
nt_lst = nt_dict[tc]
nt = np.max(nt_lst)
xgb_params['max_depth'] = tc
params = {'tc':tc, 'fold':fold}
for i in range(1, nModels+1):
params.update({'nModels':i})
xgb_params['seed'] = 13913*i+32018
bst = xgb.train(xgb_params, dtrain, nt)
for j, ntree in enumerate(nt_lst):
pr = bst.predict(dvalid, ntree_limit = ntree)
pr_xgb_dict[tc][j][valid_idx] += pr
sc = params.copy()
sc.update({'ntree':ntree,
'each':log_loss(y[valid_idx], pr),
'avg':log_loss(y[valid_idx], pr_xgb_dict[tc][j][valid_idx]/i)})
scores.append(sc)
print scores[-1], datetime.now() - t0
pr_xgb_dict = {key:[pr/nModels for pr in pr_lst] for key, pr_lst in pr_xgb_dict.iteritems()}
output = open(data_path + 'pr_xgb087.pkl', 'wb')
pickle.dump(pr_xgb_dict, output)
output.close()
r087 = pd.DataFrame(scores)
r087.to_csv('logs/r087.csv')
r087_summary = pd.DataFrame(index=range(300, 951, 50))
params = ['ntree']
for tc in [4, 5]:
grouped_avg = r087[(r087.nModels==nModels) & (r087.tc==tc)].groupby(params).avg
grouped_each = r087[r087.tc==tc].groupby(params).each
r087_summary = r087_summary.join(pd.DataFrame({'XGB_avg':grouped_avg.mean(),
'XGB':grouped_each.mean()}), lsuffix='any')
r087_summary.columns = pd.MultiIndex(levels=[[4, 5], ['XGB', 'XGB_avg']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'max_depth', u'model'])
r087_summary.to_csv('logs/r087_summary.csv')
print pd.DataFrame({'loss':r087_summary.min(0), 'ntree':r087_summary.idxmin(0)})
# loss ntree
# max_depth model
# 4 XGB 0.662632 700
# XGB_avg 0.644430 750
# 5 XGB 0.664067 550
# XGB_avg 0.643234 550 | 0.321353 | 0.180431 |
import tensorflow as tf
import numpy as np
from imgaug import augmenters as iaa
import matplotlib.pyplot as plt
def plot_sample_images(images, labels, num_classes, samples_per_class, mean=None, std=None, dtype='uint8'):
print(images[0].shape)
if mean is not None and std is not None:
images = images * std + mean
for y in range(num_classes):
idxs = np.flatnonzero(labels == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(images[idx, :, :, :].astype(dtype))
plt.axis('off')
def load_cifar10(num_training=49000, num_validation=1000, num_test=10000, augment=False,
rot=0, pad_px=4, h_flip_prop=0.5, size=(32, 32), normalize=True, plot=True):
"""
Fetch the CIFAR-10 dataset from the web and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 dataset and use appropriate data types and shapes
cifar10 = tf.keras.datasets.cifar10.load_data()
(X_train, y_train), (X_test, y_test) = cifar10
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32).flatten()
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32).flatten()
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
if augment:
# Augment the train data
augmenter = iaa.Sequential([iaa.Fliplr(h_flip_prop),
iaa.Rotate((-rot, rot)),
# iaa.Flipud(0.5),
# iaa.Crop(px=(4, 10), keep_size=False),
# iaa.PadToFixedSize(32, 32),
iaa.Pad(px=pad_px, keep_size=False),
iaa.CropToFixedSize(width=32, height=32),
iaa.Resize(size)
# iaa.RandAugment()
])
X_train_list = augmenter.augment_images(X_train.astype('uint8'))
X_train_augmented = np.array(X_train_list, dtype='float32').reshape((X_train.shape[0], *size, X_train.shape[3]))
X_train = np.vstack([X_train_augmented, X_train])
y_train = np.hstack([y_train, y_train])
# X_train = X_train_augmented
# np.random.seed(1)
idxs = list(range(0, X_train.shape[0]))
np.random.shuffle(idxs)
X_train[:, :, :, :] = X_train[idxs, :, :, :]
y_train[:] = y_train[idxs]
if plot:
plot_sample_images(X_train, y_train, 10, 7)
if normalize:
# Normalize the data: subtract the mean pixel and divide by std
mean_pixel = X_train.mean(axis=(0, 1, 2), keepdims=True)
std_pixel = X_train.std(axis=(0, 1, 2), keepdims=True)
X_train = (X_train - mean_pixel) / std_pixel
X_val = (X_val - mean_pixel) / std_pixel
X_test = (X_test - mean_pixel) / std_pixel
return X_train, y_train, X_val, y_val, X_test, y_test
def get_generator_for(train_data=None, augment=False,
rot=0, w_shift=0.125, h_shift=0.125, h_flip=False,
normalize=True, plot=True):
if train_data:
X_train, y_train = train_data
processes = {'featurewise_center': normalize, 'featurewise_std_normalization': normalize}
val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(**processes)
if augment:
processes.update({'rotation_range': rot,
'width_shift_range': w_shift,
'height_shift_range': h_shift,
'horizontal_flip': h_flip,
'fill_mode': 'constant'})
datagen = tf.keras.preprocessing.image.ImageDataGenerator(**processes)
if plot and train_data:
images, labels = next(datagen.flow(X_train, y_train, batch_size=len(X_train) // 2))
plot_sample_images(images, labels, 10, 7)
if normalize and train_data:
datagen.fit(X_train)
val_datagen.fit(X_train)
return datagen, val_datagen
class Dataset(object):
def __init__(self, X, y, batch_size, shuffle=False):
"""
Construct a Dataset object to iterate over data X and labels y
Inputs:
- X: Numpy array of data, of any shape
- y: Numpy array of labels, of any shape but with y.shape[0] == X.shape[0]
- batch_size: Integer giving number of elements per minibatch
- shuffle: (optional) Boolean, whether to shuffle the data on each epoch
"""
assert X.shape[0] == y.shape[0], 'Got different numbers of data and labels'
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
self.augment = iaa.Sequential([iaa.Fliplr(0.5),
# iaa.Flipud(0.5),
# iaa.Crop(px=(4, 10), keep_size=False),
# iaa.PadToFixedSize(32, 32),
iaa.Pad(px=4, keep_size=False),
iaa.CropToFixedSize(width=32, height=32),
# iaa.RandAugment()
])
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.augment(images=self.X[i:i + B]), self.y[i:i + B]) for i in range(0, N, B)) | tensorflow_utils/utils/keras_load_data.py | import tensorflow as tf
import numpy as np
from imgaug import augmenters as iaa
import matplotlib.pyplot as plt
def plot_sample_images(images, labels, num_classes, samples_per_class, mean=None, std=None, dtype='uint8'):
print(images[0].shape)
if mean is not None and std is not None:
images = images * std + mean
for y in range(num_classes):
idxs = np.flatnonzero(labels == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(images[idx, :, :, :].astype(dtype))
plt.axis('off')
def load_cifar10(num_training=49000, num_validation=1000, num_test=10000, augment=False,
rot=0, pad_px=4, h_flip_prop=0.5, size=(32, 32), normalize=True, plot=True):
"""
Fetch the CIFAR-10 dataset from the web and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 dataset and use appropriate data types and shapes
cifar10 = tf.keras.datasets.cifar10.load_data()
(X_train, y_train), (X_test, y_test) = cifar10
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32).flatten()
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32).flatten()
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
if augment:
# Augment the train data
augmenter = iaa.Sequential([iaa.Fliplr(h_flip_prop),
iaa.Rotate((-rot, rot)),
# iaa.Flipud(0.5),
# iaa.Crop(px=(4, 10), keep_size=False),
# iaa.PadToFixedSize(32, 32),
iaa.Pad(px=pad_px, keep_size=False),
iaa.CropToFixedSize(width=32, height=32),
iaa.Resize(size)
# iaa.RandAugment()
])
X_train_list = augmenter.augment_images(X_train.astype('uint8'))
X_train_augmented = np.array(X_train_list, dtype='float32').reshape((X_train.shape[0], *size, X_train.shape[3]))
X_train = np.vstack([X_train_augmented, X_train])
y_train = np.hstack([y_train, y_train])
# X_train = X_train_augmented
# np.random.seed(1)
idxs = list(range(0, X_train.shape[0]))
np.random.shuffle(idxs)
X_train[:, :, :, :] = X_train[idxs, :, :, :]
y_train[:] = y_train[idxs]
if plot:
plot_sample_images(X_train, y_train, 10, 7)
if normalize:
# Normalize the data: subtract the mean pixel and divide by std
mean_pixel = X_train.mean(axis=(0, 1, 2), keepdims=True)
std_pixel = X_train.std(axis=(0, 1, 2), keepdims=True)
X_train = (X_train - mean_pixel) / std_pixel
X_val = (X_val - mean_pixel) / std_pixel
X_test = (X_test - mean_pixel) / std_pixel
return X_train, y_train, X_val, y_val, X_test, y_test
def get_generator_for(train_data=None, augment=False,
rot=0, w_shift=0.125, h_shift=0.125, h_flip=False,
normalize=True, plot=True):
if train_data:
X_train, y_train = train_data
processes = {'featurewise_center': normalize, 'featurewise_std_normalization': normalize}
val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(**processes)
if augment:
processes.update({'rotation_range': rot,
'width_shift_range': w_shift,
'height_shift_range': h_shift,
'horizontal_flip': h_flip,
'fill_mode': 'constant'})
datagen = tf.keras.preprocessing.image.ImageDataGenerator(**processes)
if plot and train_data:
images, labels = next(datagen.flow(X_train, y_train, batch_size=len(X_train) // 2))
plot_sample_images(images, labels, 10, 7)
if normalize and train_data:
datagen.fit(X_train)
val_datagen.fit(X_train)
return datagen, val_datagen
class Dataset(object):
def __init__(self, X, y, batch_size, shuffle=False):
"""
Construct a Dataset object to iterate over data X and labels y
Inputs:
- X: Numpy array of data, of any shape
- y: Numpy array of labels, of any shape but with y.shape[0] == X.shape[0]
- batch_size: Integer giving number of elements per minibatch
- shuffle: (optional) Boolean, whether to shuffle the data on each epoch
"""
assert X.shape[0] == y.shape[0], 'Got different numbers of data and labels'
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
self.augment = iaa.Sequential([iaa.Fliplr(0.5),
# iaa.Flipud(0.5),
# iaa.Crop(px=(4, 10), keep_size=False),
# iaa.PadToFixedSize(32, 32),
iaa.Pad(px=4, keep_size=False),
iaa.CropToFixedSize(width=32, height=32),
# iaa.RandAugment()
])
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.augment(images=self.X[i:i + B]), self.y[i:i + B]) for i in range(0, N, B)) | 0.776538 | 0.613729 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
country_code = {'US': '082', # United States
'AU': '127', # Australia
'CA': '101', # Canada
'GE': '910', # Germany
'UK': '110', # United Kingdom
'JP': '105', # Japan
'FR': '915', # France
'IT': '905'} # Italy
tenors_month = [3, 6, 12, 2*12, 3*12, 4*12, 5*12, 6*12, 7*12, 8*12, 9*12, 10*12, 15*12, 20*12, 30*12]
def gen_ticker(tenor, country):
"""
This function generates the ticker for a given country and tenor.
"""
if tenor < 12:
t_code = '0' + str(tenor) + 'M Index'
else:
t_code = str(int(tenor/12)).zfill(2) + 'Y Index'
ticker = 'F' + country_code[country] + t_code
return ticker
def calc_duration(ytm, t=10, dy=0.0001):
ytm_minus = ytm - dy
ytm_plus = ytm + dy
price0 = 100/((1+ytm) ** t)
price_minus = 100/((1+ytm_minus) ** t)
price_plus = 100 / ((1 + ytm_plus) ** t)
dur = (price_minus - price_plus)/(2*price0*dy)
return dur
# ===== READ THE ZERO COUPON CURVES =====
df_zcc = pd.read_excel(r'data\zero coupon curves.xlsx',
sheet_name='values',
index_col='Dates').multiply(1/100)
# ===== READ THE TRACKERS =====
df_trackers = pd.DataFrame()
for ctry in country_code.keys():
print('Reading tracker for', ctry)
df = pd.read_excel(r'data\df_' + str(ctry) + '.xlsx')
tracker = df['er_index']
tracker.name = ctry
df_trackers = pd.concat([df_trackers, tracker], axis=1)
df_trackers.plot()
plt.show()
# ===== BUILD CARRY SIGNAL ====
df_carry = pd.DataFrame()
for ctry in country_code.keys():
print('Building Carry for', ctry)
ticker_list = [gen_ticker(t, ctry) for t in tenors_month] # gets the tickers for thar country
df_ctry = df_zcc[ticker_list] # gets the verticies for the country
df_curve = pd.DataFrame(index=df_ctry.index, columns=list(range(3, 30 * 12 + 1)), dtype=float)
for t, tick in zip(tenors_month, ticker_list):
if t in tenors_month:
df_curve[t] = df_ctry[tick]
df_curve = df_curve.dropna(how='all').interpolate(axis=1, method='pchip')
dur = calc_duration(ytm=df_curve[12 * 10])
ctry_carry = df_curve[10*12] - df_curve[3] - dur*(df_curve[10*12] - df_curve[9*12 + 9])
ctry_carry.name = ctry
df_carry = pd.concat([df_carry, ctry_carry], axis=1)
df_carry.plot()
plt.show()
# ===== BUILD WEIGHTS =====
N = df_carry.shape[1]
avg_rank = ((1 + N)*N)/(2*N)
c = (df_carry.rank(axis=1).iloc[-1] - avg_rank).abs().sum()/2
df_weights = (df_carry.rank(axis=1) - avg_rank)/c
df_weights.plot()
plt.show()
# ===== BUILD STRATEGY INDEX =====
df_returns = df_trackers.pct_change(1)
df_returns[['US', 'JP']].plot()
plt.show()
strat_index = pd.DataFrame(data={'Return': (df_weights * df_returns).dropna().sum(axis=1),
'Level': np.nan})
strat_index['Level'].iloc[0] = 100
for d, dm1 in zip(strat_index.index[1:], strat_index.index[:-1]):
strat_index['Level'].loc[d] = strat_index['Level'].loc[dm1] * (1 + strat_index['Return'].loc[d])
strat_index['Level'].plot()
plt.show() | fhnotebooks/Example Strategies/rates_carry_rank.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
country_code = {'US': '082', # United States
'AU': '127', # Australia
'CA': '101', # Canada
'GE': '910', # Germany
'UK': '110', # United Kingdom
'JP': '105', # Japan
'FR': '915', # France
'IT': '905'} # Italy
tenors_month = [3, 6, 12, 2*12, 3*12, 4*12, 5*12, 6*12, 7*12, 8*12, 9*12, 10*12, 15*12, 20*12, 30*12]
def gen_ticker(tenor, country):
"""
This function generates the ticker for a given country and tenor.
"""
if tenor < 12:
t_code = '0' + str(tenor) + 'M Index'
else:
t_code = str(int(tenor/12)).zfill(2) + 'Y Index'
ticker = 'F' + country_code[country] + t_code
return ticker
def calc_duration(ytm, t=10, dy=0.0001):
ytm_minus = ytm - dy
ytm_plus = ytm + dy
price0 = 100/((1+ytm) ** t)
price_minus = 100/((1+ytm_minus) ** t)
price_plus = 100 / ((1 + ytm_plus) ** t)
dur = (price_minus - price_plus)/(2*price0*dy)
return dur
# ===== READ THE ZERO COUPON CURVES =====
df_zcc = pd.read_excel(r'data\zero coupon curves.xlsx',
sheet_name='values',
index_col='Dates').multiply(1/100)
# ===== READ THE TRACKERS =====
df_trackers = pd.DataFrame()
for ctry in country_code.keys():
print('Reading tracker for', ctry)
df = pd.read_excel(r'data\df_' + str(ctry) + '.xlsx')
tracker = df['er_index']
tracker.name = ctry
df_trackers = pd.concat([df_trackers, tracker], axis=1)
df_trackers.plot()
plt.show()
# ===== BUILD CARRY SIGNAL ====
df_carry = pd.DataFrame()
for ctry in country_code.keys():
print('Building Carry for', ctry)
ticker_list = [gen_ticker(t, ctry) for t in tenors_month] # gets the tickers for thar country
df_ctry = df_zcc[ticker_list] # gets the verticies for the country
df_curve = pd.DataFrame(index=df_ctry.index, columns=list(range(3, 30 * 12 + 1)), dtype=float)
for t, tick in zip(tenors_month, ticker_list):
if t in tenors_month:
df_curve[t] = df_ctry[tick]
df_curve = df_curve.dropna(how='all').interpolate(axis=1, method='pchip')
dur = calc_duration(ytm=df_curve[12 * 10])
ctry_carry = df_curve[10*12] - df_curve[3] - dur*(df_curve[10*12] - df_curve[9*12 + 9])
ctry_carry.name = ctry
df_carry = pd.concat([df_carry, ctry_carry], axis=1)
df_carry.plot()
plt.show()
# ===== BUILD WEIGHTS =====
N = df_carry.shape[1]
avg_rank = ((1 + N)*N)/(2*N)
c = (df_carry.rank(axis=1).iloc[-1] - avg_rank).abs().sum()/2
df_weights = (df_carry.rank(axis=1) - avg_rank)/c
df_weights.plot()
plt.show()
# ===== BUILD STRATEGY INDEX =====
df_returns = df_trackers.pct_change(1)
df_returns[['US', 'JP']].plot()
plt.show()
strat_index = pd.DataFrame(data={'Return': (df_weights * df_returns).dropna().sum(axis=1),
'Level': np.nan})
strat_index['Level'].iloc[0] = 100
for d, dm1 in zip(strat_index.index[1:], strat_index.index[:-1]):
strat_index['Level'].loc[d] = strat_index['Level'].loc[dm1] * (1 + strat_index['Return'].loc[d])
strat_index['Level'].plot()
plt.show() | 0.393851 | 0.39065 |
from flask import Flask, render_template, request, redirect, url_for, jsonify,g
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from flask_sqlalchemy import SQLAlchemy
from database_setup import Base, User, Month, Transactions
from flask_bcrypt import Bcrypt
from sqlalchemy.sql import exists
from forms import SignupForm, LoginForm
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
app = Flask(__name__)
bcrypt = Bcrypt(app)
login_manager = LoginManager()
login_manager.init_app(app)
engine = create_engine('sqlite:///mywallet.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return session.query(User).filter(User.id == int(user_id)).first()
@app.before_request
def before_request():
g.user = current_user
# JSON return function for APIs
@app.route('/month/<int:month_id>/data/JSON')
def monthTransctionJSON(month_id):
month = session.query(Month).filter_by(id=month_id).one()
items = session.query(Transactions).filter_by(month_id=month_id).all()
return jsonify(Transactions=[i.serialize for i in items])
# Home page of Site
@app.route('/', methods = ['POST', 'GET'])
@app.route('/wallet', methods = ['POST', 'GET'])
def wallet():
error = None
if current_user.is_authenticated:
return redirect(url_for('home'))
return render_template('index.html' ,error = error)
#Login_Page
@app.route('/login', methods = ['POST', 'GET'])
def login():
error = None
form = LoginForm(request.form)
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
registered_user = session.query(User).filter_by(username = username).first()
if registered_user is not None and bcrypt.check_password_hash(registered_user.password, password):
login_user(registered_user)
return redirect(url_for('home'))
else:
error = 'Invalid Credentials ! Try Again'
return render_template('login.html',form = form ,error = error)
#Logout user
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('wallet'))
#Home page of User
@app.route('/home')
@login_required
def home():
error = None
months = session.query(Month).filter_by(user_id = current_user.id).all()
if not months:
error = 'No Months Available'
return render_template('Home.html', months = months, error = error)
#Signup page of User
@app.route('/signup', methods = ['POST', 'GET'])
def signup():
error = None
form = SignupForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
username = request.form['username']
user = session.query(exists().where(User.username == username)).scalar()
if (user == False):
newUser = User(username = request.form['username'], password = <PASSWORD>_<PASSWORD>(request.form['password']),
email = request.form['email'])
session.add(newUser)
session.commit()
error = 'User created successfully.'
else:
error = 'Username already taken'
return render_template('signup.html',form = form, error = error)
else:
return render_template('signup.html', form = form, error = error)
return render_template('signup.html', form = form, error = error)
@app.route('/update-email', methods = ['POST', 'GET'])
@login_required
def updateemail():
error = None
return render_template('cons.html',error = error)
@app.route('/update-pass', methods = ['POST', 'GET'])
@login_required
def updatepass():
error = None
return render_template('cons.html',error = error)
# Add a new month to database
@app.route('/month/new', methods = ['POST', 'GET'])
@login_required
def monthNew():
if request.method == 'POST':
newData = Month(name = request.form['name'],year = request.form['year'], open_bal = request.form['balance'],
curr_bal = request.form['balance'],credits = 0, debits = 0, transactions = 0)
newData.user = g.user
session.add(newData)
session.commit()
return redirect(url_for('home'))
else:
return render_template('newMonth.html')
#Delete a month from database
@app.route('/month/<int:month_id>/delete', methods = ['POST', 'GET'])
@login_required
def monthDelete(month_id):
deleteMonth = session.query(Month).filter_by(id = month_id).first()
deleteTransaction = session.query(Transactions).filter_by(month_id = month_id).all()
if request.method == 'POST':
session.delete(deleteMonth)
for i in deleteTransaction:
session.delete(i)
session.commit()
return redirect(url_for('wallet'))
else:
if (deleteMonth == None):
return render_template('unexist.html')
if (deleteMonth.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('deleteMonth.html', month_id = month_id, i = deleteMonth)
#Edit Month
@app.route('/month/<int:month_id>/edit', methods = ['POST', 'GET'])
@login_required
def monthEdit(month_id):
editMonth = session.query(Month).filter_by(id = month_id).first()
getTransaction = session.query(Transactions).filter_by(month_id = month_id).all()
if request.method == 'POST':
newbal = request.form['balance']
editMonth.open_bal = int(newbal)
editMonth.curr_bal = int(newbal)
for i in getTransaction:
if i.name == 'Debit':
editMonth.curr_bal -= int(i.cost)
else:
editMonth.curr_bal += int(i.cost)
session.add(editMonth)
session.commit()
return redirect(url_for('home'))
else:
if (editMonth == None):
return render_template('unexist.html')
if (editMonth.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('editMonth.html', month_id = month_id, i= editMonth)
#see all transactions of a month
@app.route('/month/<int:month_id>/')
@login_required
def transactions(month_id):
month = session.query(Month).filter_by(id = month_id).first()
if (month != None):
items = session.query(Transactions).filter_by(month_id = month.id, name = "Debit")
items1 = session.query(Transactions).filter_by(month_id = month.id, name="Credit")
if (month == None):
return render_template('unexist.html')
if (month.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('transaction.html', month = month, items = items, items1 = items1)
#Add a new Transactions
@app.route('/month/<int:month_id>/new/', methods = ['GET','POST'])
@login_required
def newTransaction(month_id):
if request.method == 'POST':
newItem = Transactions(name = request.form['option'], description = request.form['description'],cost = request.form['price'],month_id = month_id)
newItem.user = g.user
if request.form['option'] == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal - int(request.form['price'])
month.debits = month.debits + int(request.form['price'])
month.transactions = month.transactions+1;
if request.form['option'] == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal + int(request.form['price'])
month.credits = month.credits + int(request.form['price'])
month.transactions = month.transactions+1;
session.add(newItem)
session.commit()
return redirect(url_for('transactions', month_id = month_id))
else:
month = session.query(Month).filter_by(id = month_id).first()
if (month == None):
return render_template('unexist.html')
if (month.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('newTransaction.html', month_id = month_id)
# Delete a Transaction
@app.route('/month/<int:month_id>/<int:transactions_id>/delete/', methods = ['GET', 'POST'])
@login_required
def deleteTransaction(month_id, transactions_id):
deleteTransaction = session.query(Transactions).filter_by(id = transactions_id).first()
if request.method == 'POST':
if deleteTransaction.name == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal + int(deleteTransaction.cost)
month.debits = month.debits - int(deleteTransaction.cost)
month.transactions = month.transactions-1;
if deleteTransaction.name == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal - int(deleteTransaction.cost)
month.credits = month.credits - int(deleteTransaction.cost)
month.transactions = month.transactions-1;
session.delete(deleteTransaction)
session.commit()
return redirect(url_for('transactions', month_id = month_id))
else:
month = session.query(Month).filter_by(id = month_id).first()
if (deleteTransaction == None):
return render_template('unexist.html')
if (month == None):
return render_template('unexist.html')
if (deleteTransaction.month_id != month_id):
return render_template('unauthorize.html')
if (month.user_id != current_user.id or deleteTransaction.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('deleteTransaction.html', month_id = month_id, transactions_id = transactions_id, i = deleteTransaction)
#Edit Transaction
@app.route('/month/<int:month_id>/<int:transactions_id>/edit/', methods = ['POST', 'GET'])
@login_required
def transactionEdit(month_id, transactions_id):
editTransaction = session.query(Transactions).filter_by(id = transactions_id).first()
if request.method == 'POST':
if editTransaction.name == 'Debit':
if request.form['option'] == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal += int(editTransaction.cost)
month.curr_bal -= int(request.form['price'])
month.debits -= int(editTransaction.cost)
month.debits += int(request.form['price'])
editTransaction.cost = int(request.form['price'])
editTransaction.description = request.form['description']
if request.form['option'] == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal += int(editTransaction.cost)
month.curr_bal += int(request.form['price'])
month.debits -= int(editTransaction.cost)
month.credits += int(request.form['price'])
editTransaction.cost = request.form['price']
editTransaction.description = request.form['description']
if editTransaction.name == 'Credit':
if request.form['option'] == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal -= int(editTransaction.cost)
month.curr_bal -= int(request.form['price'])
month.credits -= int(editTransaction.cost)
month.debits += int(request.form['price'])
editTransaction.cost = int(request.form['price'])
editTransaction.description = request.form['description']
if request.form['option'] == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal -= int(editTransaction.cost)
month.curr_bal += int(request.form['price'])
month.credits -= int(editTransaction.cost)
month.credits += int(request.form['price'])
editTransaction.cost = request.form['price']
editTransaction.description = request.form['description']
session.add(editTransaction)
session.commit()
return redirect(url_for('transactions', month_id = month_id))
else:
month = session.query(Month).filter_by(id = month_id).first()
if (editTransaction == None):
return render_template('unexist.html')
if (month == None):
return render_template('unexist.html')
if (editTransaction.month_id != month_id):
return render_template('unauthorize.html')
if (month.user_id != current_user.id or editTransaction.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('transactionEdit.html', month_id = month_id,transactions_id = transactions_id, i= editTransaction)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000) | app.py | from flask import Flask, render_template, request, redirect, url_for, jsonify,g
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from flask_sqlalchemy import SQLAlchemy
from database_setup import Base, User, Month, Transactions
from flask_bcrypt import Bcrypt
from sqlalchemy.sql import exists
from forms import SignupForm, LoginForm
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
app = Flask(__name__)
bcrypt = Bcrypt(app)
login_manager = LoginManager()
login_manager.init_app(app)
engine = create_engine('sqlite:///mywallet.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return session.query(User).filter(User.id == int(user_id)).first()
@app.before_request
def before_request():
g.user = current_user
# JSON return function for APIs
@app.route('/month/<int:month_id>/data/JSON')
def monthTransctionJSON(month_id):
month = session.query(Month).filter_by(id=month_id).one()
items = session.query(Transactions).filter_by(month_id=month_id).all()
return jsonify(Transactions=[i.serialize for i in items])
# Home page of Site
@app.route('/', methods = ['POST', 'GET'])
@app.route('/wallet', methods = ['POST', 'GET'])
def wallet():
error = None
if current_user.is_authenticated:
return redirect(url_for('home'))
return render_template('index.html' ,error = error)
#Login_Page
@app.route('/login', methods = ['POST', 'GET'])
def login():
error = None
form = LoginForm(request.form)
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
registered_user = session.query(User).filter_by(username = username).first()
if registered_user is not None and bcrypt.check_password_hash(registered_user.password, password):
login_user(registered_user)
return redirect(url_for('home'))
else:
error = 'Invalid Credentials ! Try Again'
return render_template('login.html',form = form ,error = error)
#Logout user
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('wallet'))
#Home page of User
@app.route('/home')
@login_required
def home():
error = None
months = session.query(Month).filter_by(user_id = current_user.id).all()
if not months:
error = 'No Months Available'
return render_template('Home.html', months = months, error = error)
#Signup page of User
@app.route('/signup', methods = ['POST', 'GET'])
def signup():
error = None
form = SignupForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
username = request.form['username']
user = session.query(exists().where(User.username == username)).scalar()
if (user == False):
newUser = User(username = request.form['username'], password = <PASSWORD>_<PASSWORD>(request.form['password']),
email = request.form['email'])
session.add(newUser)
session.commit()
error = 'User created successfully.'
else:
error = 'Username already taken'
return render_template('signup.html',form = form, error = error)
else:
return render_template('signup.html', form = form, error = error)
return render_template('signup.html', form = form, error = error)
@app.route('/update-email', methods = ['POST', 'GET'])
@login_required
def updateemail():
error = None
return render_template('cons.html',error = error)
@app.route('/update-pass', methods = ['POST', 'GET'])
@login_required
def updatepass():
error = None
return render_template('cons.html',error = error)
# Add a new month to database
@app.route('/month/new', methods = ['POST', 'GET'])
@login_required
def monthNew():
if request.method == 'POST':
newData = Month(name = request.form['name'],year = request.form['year'], open_bal = request.form['balance'],
curr_bal = request.form['balance'],credits = 0, debits = 0, transactions = 0)
newData.user = g.user
session.add(newData)
session.commit()
return redirect(url_for('home'))
else:
return render_template('newMonth.html')
#Delete a month from database
@app.route('/month/<int:month_id>/delete', methods = ['POST', 'GET'])
@login_required
def monthDelete(month_id):
deleteMonth = session.query(Month).filter_by(id = month_id).first()
deleteTransaction = session.query(Transactions).filter_by(month_id = month_id).all()
if request.method == 'POST':
session.delete(deleteMonth)
for i in deleteTransaction:
session.delete(i)
session.commit()
return redirect(url_for('wallet'))
else:
if (deleteMonth == None):
return render_template('unexist.html')
if (deleteMonth.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('deleteMonth.html', month_id = month_id, i = deleteMonth)
#Edit Month
@app.route('/month/<int:month_id>/edit', methods = ['POST', 'GET'])
@login_required
def monthEdit(month_id):
editMonth = session.query(Month).filter_by(id = month_id).first()
getTransaction = session.query(Transactions).filter_by(month_id = month_id).all()
if request.method == 'POST':
newbal = request.form['balance']
editMonth.open_bal = int(newbal)
editMonth.curr_bal = int(newbal)
for i in getTransaction:
if i.name == 'Debit':
editMonth.curr_bal -= int(i.cost)
else:
editMonth.curr_bal += int(i.cost)
session.add(editMonth)
session.commit()
return redirect(url_for('home'))
else:
if (editMonth == None):
return render_template('unexist.html')
if (editMonth.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('editMonth.html', month_id = month_id, i= editMonth)
#see all transactions of a month
@app.route('/month/<int:month_id>/')
@login_required
def transactions(month_id):
month = session.query(Month).filter_by(id = month_id).first()
if (month != None):
items = session.query(Transactions).filter_by(month_id = month.id, name = "Debit")
items1 = session.query(Transactions).filter_by(month_id = month.id, name="Credit")
if (month == None):
return render_template('unexist.html')
if (month.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('transaction.html', month = month, items = items, items1 = items1)
#Add a new Transactions
@app.route('/month/<int:month_id>/new/', methods = ['GET','POST'])
@login_required
def newTransaction(month_id):
if request.method == 'POST':
newItem = Transactions(name = request.form['option'], description = request.form['description'],cost = request.form['price'],month_id = month_id)
newItem.user = g.user
if request.form['option'] == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal - int(request.form['price'])
month.debits = month.debits + int(request.form['price'])
month.transactions = month.transactions+1;
if request.form['option'] == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal + int(request.form['price'])
month.credits = month.credits + int(request.form['price'])
month.transactions = month.transactions+1;
session.add(newItem)
session.commit()
return redirect(url_for('transactions', month_id = month_id))
else:
month = session.query(Month).filter_by(id = month_id).first()
if (month == None):
return render_template('unexist.html')
if (month.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('newTransaction.html', month_id = month_id)
# Delete a Transaction
@app.route('/month/<int:month_id>/<int:transactions_id>/delete/', methods = ['GET', 'POST'])
@login_required
def deleteTransaction(month_id, transactions_id):
deleteTransaction = session.query(Transactions).filter_by(id = transactions_id).first()
if request.method == 'POST':
if deleteTransaction.name == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal + int(deleteTransaction.cost)
month.debits = month.debits - int(deleteTransaction.cost)
month.transactions = month.transactions-1;
if deleteTransaction.name == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
month.curr_bal = month.curr_bal - int(deleteTransaction.cost)
month.credits = month.credits - int(deleteTransaction.cost)
month.transactions = month.transactions-1;
session.delete(deleteTransaction)
session.commit()
return redirect(url_for('transactions', month_id = month_id))
else:
month = session.query(Month).filter_by(id = month_id).first()
if (deleteTransaction == None):
return render_template('unexist.html')
if (month == None):
return render_template('unexist.html')
if (deleteTransaction.month_id != month_id):
return render_template('unauthorize.html')
if (month.user_id != current_user.id or deleteTransaction.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('deleteTransaction.html', month_id = month_id, transactions_id = transactions_id, i = deleteTransaction)
#Edit Transaction
@app.route('/month/<int:month_id>/<int:transactions_id>/edit/', methods = ['POST', 'GET'])
@login_required
def transactionEdit(month_id, transactions_id):
editTransaction = session.query(Transactions).filter_by(id = transactions_id).first()
if request.method == 'POST':
if editTransaction.name == 'Debit':
if request.form['option'] == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal += int(editTransaction.cost)
month.curr_bal -= int(request.form['price'])
month.debits -= int(editTransaction.cost)
month.debits += int(request.form['price'])
editTransaction.cost = int(request.form['price'])
editTransaction.description = request.form['description']
if request.form['option'] == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal += int(editTransaction.cost)
month.curr_bal += int(request.form['price'])
month.debits -= int(editTransaction.cost)
month.credits += int(request.form['price'])
editTransaction.cost = request.form['price']
editTransaction.description = request.form['description']
if editTransaction.name == 'Credit':
if request.form['option'] == 'Debit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal -= int(editTransaction.cost)
month.curr_bal -= int(request.form['price'])
month.credits -= int(editTransaction.cost)
month.debits += int(request.form['price'])
editTransaction.cost = int(request.form['price'])
editTransaction.description = request.form['description']
if request.form['option'] == 'Credit':
month = session.query(Month).filter_by(id = month_id).one()
editTransaction.name = request.form['option']
month.curr_bal -= int(editTransaction.cost)
month.curr_bal += int(request.form['price'])
month.credits -= int(editTransaction.cost)
month.credits += int(request.form['price'])
editTransaction.cost = request.form['price']
editTransaction.description = request.form['description']
session.add(editTransaction)
session.commit()
return redirect(url_for('transactions', month_id = month_id))
else:
month = session.query(Month).filter_by(id = month_id).first()
if (editTransaction == None):
return render_template('unexist.html')
if (month == None):
return render_template('unexist.html')
if (editTransaction.month_id != month_id):
return render_template('unauthorize.html')
if (month.user_id != current_user.id or editTransaction.user_id != current_user.id):
return render_template('unauthorize.html')
return render_template('transactionEdit.html', month_id = month_id,transactions_id = transactions_id, i= editTransaction)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000) | 0.290779 | 0.090013 |
import socket, json, os, re
from pathlib import Path
from http.server import HTTPServer, SimpleHTTPRequestHandler
class MyHandler(SimpleHTTPRequestHandler):
dir = "/etc/nsd/nsd.conf.d/"
print("Loading config")
with open('configs/config.json') as f:
config = json.load(f)
print("Ready")
def response(self,httpCode,key,msg):
self.send_response(httpCode)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps({key: msg}).encode()))
def loadZone(self,zone):
records = {}
if Path(self.dir+zone).is_file():
records[zone] = {}
with open(self.dir+zone) as f:
lines = f.readlines()
for line in lines:
#sub ttl IN type target/ttl
parts = re.split(r'\t+', line)
if len(parts) > 3 and "IN" in parts[2]:
if not parts[3] in records[zone]: records[zone][parts[3]] = {}
records[zone][parts[3]][parts[0]] = {}
records[zone][parts[3]][parts[0]]['ttl'] = parts[1]
records[zone][parts[3]][parts[0]]['target'] = parts[4]
return records
def loadFile(self,file):
with open(file, 'r') as file: return file.read()
def saveFile(self,file,data):
with open(file, "w") as file: file.write(data)
def do_GET(self):
if len(self.path) > 200:
self.response(414,"error","way to fucking long")
return
parts = re.split(r'/', self.path)
if len(parts) < 6 or len(parts) > 7:
self.response(400,"error","incomplete")
return
if len(parts) == 6:
empty, token, domain, subdomain, type, param = self.path.split('/')
elif len(parts) == 7:
empty, token, domain, subdomain, type, param, target = self.path.split('/')
if token not in self.config["tokens"]:
self.response(401,"error","token required")
return
results = re.findall("^[a-zA-Z0-9]{2,30}\.[a-zA-Z]{2,30}$",domain, re.MULTILINE)
if not results:
self.response(400,"error","invalid domain")
return
records = self.loadZone(domain)
if domain not in records or subdomain not in records[domain][type]:
if param == "add":
zone = self.loadFile(self.dir+domain)
if type == "TXT":
zone = zone + subdomain + "\t3600\tIN\t"+type+'\t"'+target+'"\n'
else:
zone = zone + subdomain + "\t3600\tIN\t"+type+"\t"+target+"\n"
self.saveFile(self.dir+domain,zone)
os.system("sudo /usr/bin/systemctl reload nsd")
self.response(200,"success","record added")
return
else:
self.response(404,"error","record not found")
return
if param == "update":
zone = self.loadFile(self.dir+domain)
zone = re.sub(subdomain+'\t*[0-9]+\t*IN\t*'+type+'\t*'+records[domain][type][subdomain]['target'], subdomain+'\t300\tIN\t'+type+'\t'+self.headers.get("X-Real-IP")+"\n", zone)
self.saveFile(self.dir+domain,zone)
os.system("sudo /usr/bin/systemctl reload nsd")
self.response(200,"success","record updated")
elif param == "delete":
zone = self.loadFile(self.dir+domain)
zone = re.sub(subdomain+'\t*[0-9]+\t*IN\t*'+type+'\t*'+records[domain][type][subdomain]['target'], "", zone)
self.saveFile(self.dir+domain,zone)
os.system("sudo /usr/bin/systemctl reload nsd")
self.response(200,"success","record updated")
server = HTTPServer(('127.0.0.1', 8080), MyHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
server.socket.close() | api.py | import socket, json, os, re
from pathlib import Path
from http.server import HTTPServer, SimpleHTTPRequestHandler
class MyHandler(SimpleHTTPRequestHandler):
dir = "/etc/nsd/nsd.conf.d/"
print("Loading config")
with open('configs/config.json') as f:
config = json.load(f)
print("Ready")
def response(self,httpCode,key,msg):
self.send_response(httpCode)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps({key: msg}).encode()))
def loadZone(self,zone):
records = {}
if Path(self.dir+zone).is_file():
records[zone] = {}
with open(self.dir+zone) as f:
lines = f.readlines()
for line in lines:
#sub ttl IN type target/ttl
parts = re.split(r'\t+', line)
if len(parts) > 3 and "IN" in parts[2]:
if not parts[3] in records[zone]: records[zone][parts[3]] = {}
records[zone][parts[3]][parts[0]] = {}
records[zone][parts[3]][parts[0]]['ttl'] = parts[1]
records[zone][parts[3]][parts[0]]['target'] = parts[4]
return records
def loadFile(self,file):
with open(file, 'r') as file: return file.read()
def saveFile(self,file,data):
with open(file, "w") as file: file.write(data)
def do_GET(self):
if len(self.path) > 200:
self.response(414,"error","way to fucking long")
return
parts = re.split(r'/', self.path)
if len(parts) < 6 or len(parts) > 7:
self.response(400,"error","incomplete")
return
if len(parts) == 6:
empty, token, domain, subdomain, type, param = self.path.split('/')
elif len(parts) == 7:
empty, token, domain, subdomain, type, param, target = self.path.split('/')
if token not in self.config["tokens"]:
self.response(401,"error","token required")
return
results = re.findall("^[a-zA-Z0-9]{2,30}\.[a-zA-Z]{2,30}$",domain, re.MULTILINE)
if not results:
self.response(400,"error","invalid domain")
return
records = self.loadZone(domain)
if domain not in records or subdomain not in records[domain][type]:
if param == "add":
zone = self.loadFile(self.dir+domain)
if type == "TXT":
zone = zone + subdomain + "\t3600\tIN\t"+type+'\t"'+target+'"\n'
else:
zone = zone + subdomain + "\t3600\tIN\t"+type+"\t"+target+"\n"
self.saveFile(self.dir+domain,zone)
os.system("sudo /usr/bin/systemctl reload nsd")
self.response(200,"success","record added")
return
else:
self.response(404,"error","record not found")
return
if param == "update":
zone = self.loadFile(self.dir+domain)
zone = re.sub(subdomain+'\t*[0-9]+\t*IN\t*'+type+'\t*'+records[domain][type][subdomain]['target'], subdomain+'\t300\tIN\t'+type+'\t'+self.headers.get("X-Real-IP")+"\n", zone)
self.saveFile(self.dir+domain,zone)
os.system("sudo /usr/bin/systemctl reload nsd")
self.response(200,"success","record updated")
elif param == "delete":
zone = self.loadFile(self.dir+domain)
zone = re.sub(subdomain+'\t*[0-9]+\t*IN\t*'+type+'\t*'+records[domain][type][subdomain]['target'], "", zone)
self.saveFile(self.dir+domain,zone)
os.system("sudo /usr/bin/systemctl reload nsd")
self.response(200,"success","record updated")
server = HTTPServer(('127.0.0.1', 8080), MyHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
server.socket.close() | 0.081581 | 0.077762 |
r"""
Prototype for object model backend for the libNeuroML project
"""
import numpy as np
import neuroml
class ArrayMorphology(neuroml.Morphology):
"""Core of the array-based object model backend.
Provides the core arrays - vertices,connectivity etc.
node_types.
The connectivity array is a list of indices pointing to which
other element an element is attached. So for instance,
connectivity[3] is an integer with the index of the section
it refers to in the Backend
- EXAMPLE:
Vertices[3] and connectivity[3] refer to the vertex
and connectivity of the same node.
.. note::
The root section by convention has connectivity == -1.
"""
def __init__(
self,
vertices=[],
connectivity=[],
id=None,
node_types=None,
name=None,
physical_mask=None,
fractions_along=None,
):
super(ArrayMorphology, self).__init__()
self.connectivity = np.array(connectivity)
self.vertices = np.array(vertices)
self.id = id
if np.any(physical_mask):
self.physical_mask = np.array(physical_mask)
else:
self.physical_mask = np.zeros(len(connectivity), dtype="bool")
if np.any(node_types):
self.node_types = np.array(node_types)
else:
self.node_types = np.zeros(len(connectivity), dtype="int32")
if np.any(fractions_along):
self.fractions_along = np.array(fractions_along)
else:
self.fractions_along = np.zeros(len(connectivity), dtype="int32")
# it will need a reference to its parent?
self.segments = SegmentList(self)
assert self.valid_morphology, "invalid_morphology"
@property
def valid_morphology(self):
all_nodes = self.__all_nodes_satisfied
all_vertices = self.__all_vertices_present
return all_nodes and all_vertices
@property
def __all_vertices_present(self):
try:
all_vertices_present = self.vertices.shape[1] == 4
except:
all_vertices_present = False
num_vertices = len(self.vertices)
return all_vertices_present or num_vertices == 0
@property
def valid_ids(self):
valid_flag = True
for internal_id in self.segments.instantiated_segments.keys():
external_id = self.segments.instantiated_segments[internal_id].id
valid_flag = (internal_id == external_id) * valid_flag
return valid_flag
@property
def __all_nodes_satisfied(self):
m = self.vertices.shape[0]
n = self.connectivity.shape[0]
p = self.node_types.shape[0]
all_nodes_satisfied = m == n == p
return all_nodes_satisfied
@property
def root_index(self):
return np.where(self.connectivity == -1)[0][0]
@property
def root_vertex(self):
return self.vertices[self.root_index]
@property
def num_vertices(self):
return len(self.vertices)
@property
def physical_indices(self):
"""returns indices of vertices which are physical"""
physical_indices = np.where(self.physical_mask == 0)[0]
return physical_indices
def children(self, index):
"""Returns an array with indexes of children"""
return np.where(self.connectivity == index)
def to_root(self, index):
"""
Changes the connectivity matrix
so that the node at index becomes the root
"""
old_root_index = self.root_index
new_root_index = index
# do a tree traversal:
parent_index = self.connectivity[index]
grandparent_index = self.connectivity[parent_index]
while index != old_root_index:
self.connectivity[parent_index] = index
index = parent_index
parent_index = grandparent_index
grandparent_index = self.connectivity[parent_index]
self.connectivity[new_root_index] = -1
def parent_id(self, index):
"""Return the parent index for the given index"""
return self.connectivity[index]
def vertex(self, index):
"""Return vertex corresponding to index in morphology"""
return self.vertices[index]
def __len__(self):
return len(self.connectivity)
def pop(self, index):
"""
TODO:This is failing tests (understandably) - need to fix!
Deletes a node from the morphology, its children become
children of the deleted node's parent.
"""
self.vertices = np.delete(self.vertices, index)
self.node_types = np.delete(self.node_types, index)
self.connectivity = np.delete(self.connectivity, index)
k = 0
for i in self.connectivity:
if i >= index:
self.connectivity[k] = i - 1
k += 1
pass
def to_neuroml_morphology(self, id=""):
morphology = neuroml.Morphology()
morphology.id = id
# need to traverse the tree:
for index in range(self.num_vertices - 1):
seg = self.segment_from_vertex_index(index)
morphology.segments.append(seg)
return morphology
def segment_from_vertex_index(self, index):
parent_index = self.connectivity[index]
node_x = self.vertices[index][0]
node_y = self.vertices[index][1]
node_z = self.vertices[index][2]
node_d = self.vertices[index][3]
parent_x = self.vertices[parent_index][0]
parent_y = self.vertices[parent_index][1]
parent_z = self.vertices[parent_index][2]
parent_d = self.vertices[parent_index][3]
p = neuroml.Point3DWithDiam(x=node_x, y=node_y, z=node_z, diameter=node_d)
d = neuroml.Point3DWithDiam(
x=parent_x, y=parent_y, z=parent_z, diameter=parent_d
)
seg = neuroml.Segment(proximal=p, distal=d, id=index)
if index > 1:
parent = neuroml.SegmentParent(segments=parent_index)
seg.parent = parent
return seg
class SegmentList(object):
"""
This class is a proxy, it returns a segment either
from the arraymorph or if it has already been instantiated
it returns the relevant segment.
"""
def __init__(self, arraymorph):
self.arraymorph = arraymorph
self.instantiated_segments = {}
def __vertex_index_from_segment_index__(self, index):
"""
The existence of a physical mask means that segment and
and vertex indices fall out of sync. This function returns the
index of the proximal vertex in the vertices array of the arraymorph
which corresponds to the segment index.
"""
physical_mask = self.arraymorph.physical_mask
segment_distal_vertex_indexes = np.where(physical_mask == False)[0] + 1
return segment_distal_vertex_indexes[index]
def __len__(self):
"""
Override the __len__ magic method to give total numer of
segments which is number of vertices - 1 and minus all
floating segments.
"""
num_vertices = self.arraymorph.num_vertices
num_floating = np.sum(self.arraymorph.physical_mask)
num_segments = num_vertices - num_floating - 1
if num_segments < 0:
num_segments = 0
return int(num_segments)
def __iadd__(self, segment_list):
for segment in segment_list:
self.append(segment)
return self
def __getitem__(self, segment_index):
if segment_index in self.instantiated_segments:
neuroml_segment = self.instantiated_segments[segment_index]
else:
vertex_index = self.__vertex_index_from_segment_index__(segment_index)
neuroml_segment = self.arraymorph.segment_from_vertex_index(vertex_index)
self.instantiated_segments[segment_index] = neuroml_segment
return neuroml_segment
def __setitem__(self, index, user_set_segment):
self.instantiated_segments[index] = user_set_segment
def append(self, segment):
"""
Adds a new segment
TODO: Correct connectivity is currently being ignored -
The new segment is always connected to the root node.
"""
dist_vertex_index = len(self.arraymorph.vertices)
prox_vertex_index = dist_vertex_index + 1
prox_x = segment.proximal.x
prox_y = segment.proximal.y
prox_z = segment.proximal.z
prox_diam = segment.proximal.diameter
dist_x = segment.distal.x
dist_y = segment.distal.y
dist_z = segment.distal.z
distal_diam = segment.distal.diameter
prox_vertex = [prox_x, prox_y, prox_z, prox_diam]
dist_vertex = [dist_x, dist_y, dist_z, distal_diam]
if len(self.arraymorph.vertices) > 0:
self.arraymorph.vertices = np.append(
self.arraymorph.vertices, [dist_vertex, prox_vertex], axis=0
)
else:
self.arraymorph.vertices = np.array([dist_vertex, prox_vertex])
self.arraymorph.connectivity = np.append(
self.arraymorph.connectivity, [-1, dist_vertex_index]
)
if len(self.arraymorph.physical_mask) == 0:
self.arraymorph.physical_mask = np.array([0, 0])
else:
self.arraymorph.physical_mask = np.append(
self.arraymorph.physical_mask, [1, 0]
)
segment_index = len(self) - 1
self.instantiated_segments[segment_index] = segment | neuroml/arraymorph.py | r"""
Prototype for object model backend for the libNeuroML project
"""
import numpy as np
import neuroml
class ArrayMorphology(neuroml.Morphology):
"""Core of the array-based object model backend.
Provides the core arrays - vertices,connectivity etc.
node_types.
The connectivity array is a list of indices pointing to which
other element an element is attached. So for instance,
connectivity[3] is an integer with the index of the section
it refers to in the Backend
- EXAMPLE:
Vertices[3] and connectivity[3] refer to the vertex
and connectivity of the same node.
.. note::
The root section by convention has connectivity == -1.
"""
def __init__(
self,
vertices=[],
connectivity=[],
id=None,
node_types=None,
name=None,
physical_mask=None,
fractions_along=None,
):
super(ArrayMorphology, self).__init__()
self.connectivity = np.array(connectivity)
self.vertices = np.array(vertices)
self.id = id
if np.any(physical_mask):
self.physical_mask = np.array(physical_mask)
else:
self.physical_mask = np.zeros(len(connectivity), dtype="bool")
if np.any(node_types):
self.node_types = np.array(node_types)
else:
self.node_types = np.zeros(len(connectivity), dtype="int32")
if np.any(fractions_along):
self.fractions_along = np.array(fractions_along)
else:
self.fractions_along = np.zeros(len(connectivity), dtype="int32")
# it will need a reference to its parent?
self.segments = SegmentList(self)
assert self.valid_morphology, "invalid_morphology"
@property
def valid_morphology(self):
all_nodes = self.__all_nodes_satisfied
all_vertices = self.__all_vertices_present
return all_nodes and all_vertices
@property
def __all_vertices_present(self):
try:
all_vertices_present = self.vertices.shape[1] == 4
except:
all_vertices_present = False
num_vertices = len(self.vertices)
return all_vertices_present or num_vertices == 0
@property
def valid_ids(self):
valid_flag = True
for internal_id in self.segments.instantiated_segments.keys():
external_id = self.segments.instantiated_segments[internal_id].id
valid_flag = (internal_id == external_id) * valid_flag
return valid_flag
@property
def __all_nodes_satisfied(self):
m = self.vertices.shape[0]
n = self.connectivity.shape[0]
p = self.node_types.shape[0]
all_nodes_satisfied = m == n == p
return all_nodes_satisfied
@property
def root_index(self):
return np.where(self.connectivity == -1)[0][0]
@property
def root_vertex(self):
return self.vertices[self.root_index]
@property
def num_vertices(self):
return len(self.vertices)
@property
def physical_indices(self):
"""returns indices of vertices which are physical"""
physical_indices = np.where(self.physical_mask == 0)[0]
return physical_indices
def children(self, index):
"""Returns an array with indexes of children"""
return np.where(self.connectivity == index)
def to_root(self, index):
"""
Changes the connectivity matrix
so that the node at index becomes the root
"""
old_root_index = self.root_index
new_root_index = index
# do a tree traversal:
parent_index = self.connectivity[index]
grandparent_index = self.connectivity[parent_index]
while index != old_root_index:
self.connectivity[parent_index] = index
index = parent_index
parent_index = grandparent_index
grandparent_index = self.connectivity[parent_index]
self.connectivity[new_root_index] = -1
def parent_id(self, index):
"""Return the parent index for the given index"""
return self.connectivity[index]
def vertex(self, index):
"""Return vertex corresponding to index in morphology"""
return self.vertices[index]
def __len__(self):
return len(self.connectivity)
def pop(self, index):
"""
TODO:This is failing tests (understandably) - need to fix!
Deletes a node from the morphology, its children become
children of the deleted node's parent.
"""
self.vertices = np.delete(self.vertices, index)
self.node_types = np.delete(self.node_types, index)
self.connectivity = np.delete(self.connectivity, index)
k = 0
for i in self.connectivity:
if i >= index:
self.connectivity[k] = i - 1
k += 1
pass
def to_neuroml_morphology(self, id=""):
morphology = neuroml.Morphology()
morphology.id = id
# need to traverse the tree:
for index in range(self.num_vertices - 1):
seg = self.segment_from_vertex_index(index)
morphology.segments.append(seg)
return morphology
def segment_from_vertex_index(self, index):
parent_index = self.connectivity[index]
node_x = self.vertices[index][0]
node_y = self.vertices[index][1]
node_z = self.vertices[index][2]
node_d = self.vertices[index][3]
parent_x = self.vertices[parent_index][0]
parent_y = self.vertices[parent_index][1]
parent_z = self.vertices[parent_index][2]
parent_d = self.vertices[parent_index][3]
p = neuroml.Point3DWithDiam(x=node_x, y=node_y, z=node_z, diameter=node_d)
d = neuroml.Point3DWithDiam(
x=parent_x, y=parent_y, z=parent_z, diameter=parent_d
)
seg = neuroml.Segment(proximal=p, distal=d, id=index)
if index > 1:
parent = neuroml.SegmentParent(segments=parent_index)
seg.parent = parent
return seg
class SegmentList(object):
"""
This class is a proxy, it returns a segment either
from the arraymorph or if it has already been instantiated
it returns the relevant segment.
"""
def __init__(self, arraymorph):
self.arraymorph = arraymorph
self.instantiated_segments = {}
def __vertex_index_from_segment_index__(self, index):
"""
The existence of a physical mask means that segment and
and vertex indices fall out of sync. This function returns the
index of the proximal vertex in the vertices array of the arraymorph
which corresponds to the segment index.
"""
physical_mask = self.arraymorph.physical_mask
segment_distal_vertex_indexes = np.where(physical_mask == False)[0] + 1
return segment_distal_vertex_indexes[index]
def __len__(self):
"""
Override the __len__ magic method to give total numer of
segments which is number of vertices - 1 and minus all
floating segments.
"""
num_vertices = self.arraymorph.num_vertices
num_floating = np.sum(self.arraymorph.physical_mask)
num_segments = num_vertices - num_floating - 1
if num_segments < 0:
num_segments = 0
return int(num_segments)
def __iadd__(self, segment_list):
for segment in segment_list:
self.append(segment)
return self
def __getitem__(self, segment_index):
if segment_index in self.instantiated_segments:
neuroml_segment = self.instantiated_segments[segment_index]
else:
vertex_index = self.__vertex_index_from_segment_index__(segment_index)
neuroml_segment = self.arraymorph.segment_from_vertex_index(vertex_index)
self.instantiated_segments[segment_index] = neuroml_segment
return neuroml_segment
def __setitem__(self, index, user_set_segment):
self.instantiated_segments[index] = user_set_segment
def append(self, segment):
"""
Adds a new segment
TODO: Correct connectivity is currently being ignored -
The new segment is always connected to the root node.
"""
dist_vertex_index = len(self.arraymorph.vertices)
prox_vertex_index = dist_vertex_index + 1
prox_x = segment.proximal.x
prox_y = segment.proximal.y
prox_z = segment.proximal.z
prox_diam = segment.proximal.diameter
dist_x = segment.distal.x
dist_y = segment.distal.y
dist_z = segment.distal.z
distal_diam = segment.distal.diameter
prox_vertex = [prox_x, prox_y, prox_z, prox_diam]
dist_vertex = [dist_x, dist_y, dist_z, distal_diam]
if len(self.arraymorph.vertices) > 0:
self.arraymorph.vertices = np.append(
self.arraymorph.vertices, [dist_vertex, prox_vertex], axis=0
)
else:
self.arraymorph.vertices = np.array([dist_vertex, prox_vertex])
self.arraymorph.connectivity = np.append(
self.arraymorph.connectivity, [-1, dist_vertex_index]
)
if len(self.arraymorph.physical_mask) == 0:
self.arraymorph.physical_mask = np.array([0, 0])
else:
self.arraymorph.physical_mask = np.append(
self.arraymorph.physical_mask, [1, 0]
)
segment_index = len(self) - 1
self.instantiated_segments[segment_index] = segment | 0.757436 | 0.62088 |
from typing import Optional
import pytest
from chains import tasks
from chains.models import Message
from users.models import User
pytestmark = [
pytest.mark.django_db(transaction=True),
pytest.mark.freeze_time('2032-12-01 15:30'),
]
@pytest.fixture
def owl(mocker):
return mocker.patch('app.tasks.mail.TemplOwl')
@pytest.fixture
def assert_message_is_sent(owl, study):
def _assert(message: Message, to: Optional[User] = None, reset: Optional[bool] = True):
student = to or study.student
owl.assert_any_call(
to=student.email,
subject='',
disable_antispam=False,
template_id=message.template_id,
ctx={
'firstname': student.first_name,
'lastname': student.last_name,
},
)
if reset:
owl.reset_mock()
return _assert
@pytest.fixture
def assert_nothing_is_sent(owl):
return owl.assert_not_called
@pytest.fixture
def another_order(factory, course, another_user):
return factory.order(user=another_user, item=course, is_paid=True)
@pytest.fixture
def another_study(another_order):
return another_order.study
def test(study, parent_message, message, assert_message_is_sent, freezer):
tasks.send_active_chains()
assert_message_is_sent(parent_message) # root message is sent for the first time
freezer.move_to('2032-12-01 15:40') # 10 minutes forward
tasks.send_active_chains()
assert_message_is_sent(message) # second message is sent
def test_two_users(parent_message, message, assert_message_is_sent, freezer, study, another_study):
tasks.send_active_chains()
assert_message_is_sent(parent_message, to=study.student, reset=False)
assert_message_is_sent(parent_message, to=another_study.student)
freezer.move_to('2032-12-01 15:40') # 10 minutes forward
tasks.send_active_chains()
assert_message_is_sent(message, to=study.student, reset=False)
assert_message_is_sent(message, to=another_study.student)
def test_second_message_is_not_sent_when_it_is_too_early(study, parent_message, message, assert_message_is_sent, assert_nothing_is_sent):
tasks.send_active_chains()
assert_message_is_sent(parent_message) # root message is sent for the first time
tasks.send_active_chains()
assert_nothing_is_sent() # nothing should be sent right after that, cuz time has not come
def test_message_is_not_sent_when_study_model_disappeares_during_learning(study, parent_message, assert_message_is_sent, assert_nothing_is_sent, freezer, order):
tasks.send_active_chains()
assert_message_is_sent(parent_message) # root message is sent for the first time
freezer.move_to('2032-12-01 15:40') # 10 minutes forward
order.unship()
tasks.send_active_chains()
assert_nothing_is_sent() # nothing should be sent cuz student has canceled learning
def test_message_is_not_sent_when_sending_is_disabled(study, parent_message, assert_nothing_is_sent, chain):
chain.sending_is_active = False
chain.save()
tasks.send_active_chains()
assert_nothing_is_sent() | src/chains/tests/chain_sender/test_chain_sender_integrational.py | from typing import Optional
import pytest
from chains import tasks
from chains.models import Message
from users.models import User
pytestmark = [
pytest.mark.django_db(transaction=True),
pytest.mark.freeze_time('2032-12-01 15:30'),
]
@pytest.fixture
def owl(mocker):
return mocker.patch('app.tasks.mail.TemplOwl')
@pytest.fixture
def assert_message_is_sent(owl, study):
def _assert(message: Message, to: Optional[User] = None, reset: Optional[bool] = True):
student = to or study.student
owl.assert_any_call(
to=student.email,
subject='',
disable_antispam=False,
template_id=message.template_id,
ctx={
'firstname': student.first_name,
'lastname': student.last_name,
},
)
if reset:
owl.reset_mock()
return _assert
@pytest.fixture
def assert_nothing_is_sent(owl):
return owl.assert_not_called
@pytest.fixture
def another_order(factory, course, another_user):
return factory.order(user=another_user, item=course, is_paid=True)
@pytest.fixture
def another_study(another_order):
return another_order.study
def test(study, parent_message, message, assert_message_is_sent, freezer):
tasks.send_active_chains()
assert_message_is_sent(parent_message) # root message is sent for the first time
freezer.move_to('2032-12-01 15:40') # 10 minutes forward
tasks.send_active_chains()
assert_message_is_sent(message) # second message is sent
def test_two_users(parent_message, message, assert_message_is_sent, freezer, study, another_study):
tasks.send_active_chains()
assert_message_is_sent(parent_message, to=study.student, reset=False)
assert_message_is_sent(parent_message, to=another_study.student)
freezer.move_to('2032-12-01 15:40') # 10 minutes forward
tasks.send_active_chains()
assert_message_is_sent(message, to=study.student, reset=False)
assert_message_is_sent(message, to=another_study.student)
def test_second_message_is_not_sent_when_it_is_too_early(study, parent_message, message, assert_message_is_sent, assert_nothing_is_sent):
tasks.send_active_chains()
assert_message_is_sent(parent_message) # root message is sent for the first time
tasks.send_active_chains()
assert_nothing_is_sent() # nothing should be sent right after that, cuz time has not come
def test_message_is_not_sent_when_study_model_disappeares_during_learning(study, parent_message, assert_message_is_sent, assert_nothing_is_sent, freezer, order):
tasks.send_active_chains()
assert_message_is_sent(parent_message) # root message is sent for the first time
freezer.move_to('2032-12-01 15:40') # 10 minutes forward
order.unship()
tasks.send_active_chains()
assert_nothing_is_sent() # nothing should be sent cuz student has canceled learning
def test_message_is_not_sent_when_sending_is_disabled(study, parent_message, assert_nothing_is_sent, chain):
chain.sending_is_active = False
chain.save()
tasks.send_active_chains()
assert_nothing_is_sent() | 0.876667 | 0.613208 |
class Collection:
"""
A class to abstract the common functionalities of Stack and Queue.
This class should not be initialized directly.
"""
def __init__(self):
""" Constructor. """
self.items = []
self.num_items = 0
def size(self):
""" Get the number of items stored. """
return self.num_items
def is_empty(self):
""" Check whether the collection is empty. """
if self.size() == 0:
return True
return False
def clear(self):
""" Remove all items in the collection. """
self.items = []
self.num_items = 0
# Question 1.2
class Stack(Collection):
"""
Stack class.
>>> stk = Stack()
>>> stk.size()
0
>>> stk.is_empty()
True
>>> str(stk)
'(bottom) (top)'
>>> stk.push(None)
Traceback (most recent call last):
...
ValueError: item cannot be None
>>> stk.push('LAB 10')
>>> stk.size()
1
>>> stk.is_empty()
False
>>> stk.push('DSC')
>>> stk.push(20)
>>> stk.size()
3
>>> str(stk)
'(bottom) LAB 10 -- DSC -- 20 (top)'
>>> stk.pop()
20
>>> stk.pop()
'DSC'
>>> stk.peek()
'LAB 10'
>>> stk.size()
1
>>> stk.clear()
>>> stk.pop()
>>> stk.peek()
"""
def push(self, item):
""" Push `item` to the stack. """
if item == None:
raise ValueError('item cannot be None')
self.items.append(item)
self.num_items += 1
def pop(self):
""" Pop the top item from the stack. """
if self.size() == 0:
return None
item = self.items[self.size() - 1]
self.items.pop(self.size() - 1)
self.num_items -= 1
return item
def peek(self):
""" Peek the top item. """
if self.size() == 0:
return None
return self.items[self.size() - 1]
def __str__(self):
""" Return the string representation of the stack. """
string = '(bottom) '
for val, item in enumerate(self.items):
if val == self.size() - 1:
string += str(item) + ' '
else:
string += str(item) + ' -- '
return string + '(top)'
# Question 1.3
class Queue(Collection):
"""
Queue class.
>>> que = Queue()
>>> que.size()
0
>>> que.is_empty()
True
>>> str(que)
'(front) (rear)'
>>> que.enqueue(None)
Traceback (most recent call last):
...
ValueError: item cannot be None
>>> que.enqueue('LAB 10')
>>> que.size()
1
>>> que.is_empty()
False
>>> que.enqueue('DSC')
>>> que.enqueue(20)
>>> que.size()
3
>>> str(que)
'(front) LAB 10 -- DSC -- 20 (rear)'
>>> que.dequeue()
'LAB 10'
>>> que.dequeue()
'DSC'
>>> que.peek()
20
>>> que.size()
1
>>> que.clear()
>>> que.dequeue()
>>> que.peek()
"""
def enqueue(self, item):
""" Enqueue `item` to the queue. """
if item == None:
raise ValueError('item cannot be None')
self.items.append(item)
self.num_items += 1
def dequeue(self):
""" Dequeue the front item from the queue. """
if self.size() == 0:
return None
item = self.items[0]
self.items.pop(0)
self.num_items -= 1
return item
def peek(self):
""" Peek the front item. """
if self.size() == 0:
return None
return self.items[0]
def __str__(self):
""" Return the string representation of the queue. """
string = '(front) '
for val, item in enumerate(self.items):
if val == self.size() - 1:
string += str(item) + ' '
else:
string += str(item) + ' -- '
return string + '(rear)' | lab/lab10.py | class Collection:
"""
A class to abstract the common functionalities of Stack and Queue.
This class should not be initialized directly.
"""
def __init__(self):
""" Constructor. """
self.items = []
self.num_items = 0
def size(self):
""" Get the number of items stored. """
return self.num_items
def is_empty(self):
""" Check whether the collection is empty. """
if self.size() == 0:
return True
return False
def clear(self):
""" Remove all items in the collection. """
self.items = []
self.num_items = 0
# Question 1.2
class Stack(Collection):
"""
Stack class.
>>> stk = Stack()
>>> stk.size()
0
>>> stk.is_empty()
True
>>> str(stk)
'(bottom) (top)'
>>> stk.push(None)
Traceback (most recent call last):
...
ValueError: item cannot be None
>>> stk.push('LAB 10')
>>> stk.size()
1
>>> stk.is_empty()
False
>>> stk.push('DSC')
>>> stk.push(20)
>>> stk.size()
3
>>> str(stk)
'(bottom) LAB 10 -- DSC -- 20 (top)'
>>> stk.pop()
20
>>> stk.pop()
'DSC'
>>> stk.peek()
'LAB 10'
>>> stk.size()
1
>>> stk.clear()
>>> stk.pop()
>>> stk.peek()
"""
def push(self, item):
""" Push `item` to the stack. """
if item == None:
raise ValueError('item cannot be None')
self.items.append(item)
self.num_items += 1
def pop(self):
""" Pop the top item from the stack. """
if self.size() == 0:
return None
item = self.items[self.size() - 1]
self.items.pop(self.size() - 1)
self.num_items -= 1
return item
def peek(self):
""" Peek the top item. """
if self.size() == 0:
return None
return self.items[self.size() - 1]
def __str__(self):
""" Return the string representation of the stack. """
string = '(bottom) '
for val, item in enumerate(self.items):
if val == self.size() - 1:
string += str(item) + ' '
else:
string += str(item) + ' -- '
return string + '(top)'
# Question 1.3
class Queue(Collection):
"""
Queue class.
>>> que = Queue()
>>> que.size()
0
>>> que.is_empty()
True
>>> str(que)
'(front) (rear)'
>>> que.enqueue(None)
Traceback (most recent call last):
...
ValueError: item cannot be None
>>> que.enqueue('LAB 10')
>>> que.size()
1
>>> que.is_empty()
False
>>> que.enqueue('DSC')
>>> que.enqueue(20)
>>> que.size()
3
>>> str(que)
'(front) LAB 10 -- DSC -- 20 (rear)'
>>> que.dequeue()
'LAB 10'
>>> que.dequeue()
'DSC'
>>> que.peek()
20
>>> que.size()
1
>>> que.clear()
>>> que.dequeue()
>>> que.peek()
"""
def enqueue(self, item):
""" Enqueue `item` to the queue. """
if item == None:
raise ValueError('item cannot be None')
self.items.append(item)
self.num_items += 1
def dequeue(self):
""" Dequeue the front item from the queue. """
if self.size() == 0:
return None
item = self.items[0]
self.items.pop(0)
self.num_items -= 1
return item
def peek(self):
""" Peek the front item. """
if self.size() == 0:
return None
return self.items[0]
def __str__(self):
""" Return the string representation of the queue. """
string = '(front) '
for val, item in enumerate(self.items):
if val == self.size() - 1:
string += str(item) + ' '
else:
string += str(item) + ' -- '
return string + '(rear)' | 0.748995 | 0.423041 |
from VyPy.data import HashedDict
import pickle
from copy import deepcopy
from time import time, sleep
import numpy as np
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# --------------------------------------------------------
# Initialize
# --------------------------------------------------------
cache = HashedDict()
# --------------------------------------------------------
# Load up data
# --------------------------------------------------------
cache['a'] = 1 # normal dictionary keys are strings
cache[[1,2,3]] = 2 # HashedDict accepts lists for example
cache[[1,2,5]] = 5
funny_key = object()
cache[[6,2,5]] = HashedDict() # sub-dictionary
cache[[6,2,5]][funny_key] = 77
# --------------------------------------------------------
# Printing
# --------------------------------------------------------
print '>>> print cache'
print cache
print '>>> print cache[[1,2,3]]'
print cache[[1,2,3]]
print ''
print '>>> print cache[(1,2,3)]'
print cache[(1,2,3)]
print ''
print 'should be True:' , cache.has_key([1,2,3])
assert cache.has_key([1,2,3])
print 'should be True:' , [1,2,3] in cache
assert [1,2,3] in cache
del cache[[1,2,3]]
print 'should be False:' , cache.has_key([1,2,3])
assert not cache.has_key([1,2,3])
print ''
# --------------------------------------------------------
# Pickling test
# --------------------------------------------------------
print '>>> pickle.dumps()'
d = pickle.dumps(cache)
print '>>> pickle.loads()'
p = pickle.loads(d)
print ''
print '>>> print p'
print p
print 'should be True:' , [1,2,5] in p
assert [1,2,5] in p
# beware after pickling some objects...
print 'should be False:' , funny_key in p[[6,2,5]]
assert not funny_key in p[[6,2,5]]
print ''
# --------------------------------------------------------
# Access Speed test
# --------------------------------------------------------
print 'Access speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e5)):
v = cache[[6,2,5]][funny_key]
t1 = time()-t0
# a test dictionary
z = dict()
z['t'] = dict()
z['t']['i'] = 0
# accessing a normal dictionary
t0 = time()
for i in range(int(1e5)):
v = z['t']['i']
t2 = time()-t0
# results
print 'HashedDict: %.6f s' % (t1)
print 'dict: %.6f s' % (t2)
assert (t1-t2)/t2 < 60.0
print ''
# --------------------------------------------------------
# Assignment Speed test
# --------------------------------------------------------
print 'Assignment speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e5)):
v = cache[[6,2,5]][funny_key] = 10
t1 = time()-t0
# accessing a normal dictionary
t0 = time()
for i in range(int(1e5)):
z['t']['i'] = 10
t2 = time()-t0
# results
print 'HashedDict: %.6f s' % (t1)
print 'dict: %.6f s' % (t2)
assert (t1-t2)/t2 < 60.0
print ''
# ----------------------------------------------------------------------
# Call Main
# ----------------------------------------------------------------------
if __name__ == '__main__':
main() | tests/data/hashed_dict.py |
from VyPy.data import HashedDict
import pickle
from copy import deepcopy
from time import time, sleep
import numpy as np
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# --------------------------------------------------------
# Initialize
# --------------------------------------------------------
cache = HashedDict()
# --------------------------------------------------------
# Load up data
# --------------------------------------------------------
cache['a'] = 1 # normal dictionary keys are strings
cache[[1,2,3]] = 2 # HashedDict accepts lists for example
cache[[1,2,5]] = 5
funny_key = object()
cache[[6,2,5]] = HashedDict() # sub-dictionary
cache[[6,2,5]][funny_key] = 77
# --------------------------------------------------------
# Printing
# --------------------------------------------------------
print '>>> print cache'
print cache
print '>>> print cache[[1,2,3]]'
print cache[[1,2,3]]
print ''
print '>>> print cache[(1,2,3)]'
print cache[(1,2,3)]
print ''
print 'should be True:' , cache.has_key([1,2,3])
assert cache.has_key([1,2,3])
print 'should be True:' , [1,2,3] in cache
assert [1,2,3] in cache
del cache[[1,2,3]]
print 'should be False:' , cache.has_key([1,2,3])
assert not cache.has_key([1,2,3])
print ''
# --------------------------------------------------------
# Pickling test
# --------------------------------------------------------
print '>>> pickle.dumps()'
d = pickle.dumps(cache)
print '>>> pickle.loads()'
p = pickle.loads(d)
print ''
print '>>> print p'
print p
print 'should be True:' , [1,2,5] in p
assert [1,2,5] in p
# beware after pickling some objects...
print 'should be False:' , funny_key in p[[6,2,5]]
assert not funny_key in p[[6,2,5]]
print ''
# --------------------------------------------------------
# Access Speed test
# --------------------------------------------------------
print 'Access speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e5)):
v = cache[[6,2,5]][funny_key]
t1 = time()-t0
# a test dictionary
z = dict()
z['t'] = dict()
z['t']['i'] = 0
# accessing a normal dictionary
t0 = time()
for i in range(int(1e5)):
v = z['t']['i']
t2 = time()-t0
# results
print 'HashedDict: %.6f s' % (t1)
print 'dict: %.6f s' % (t2)
assert (t1-t2)/t2 < 60.0
print ''
# --------------------------------------------------------
# Assignment Speed test
# --------------------------------------------------------
print 'Assignment speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e5)):
v = cache[[6,2,5]][funny_key] = 10
t1 = time()-t0
# accessing a normal dictionary
t0 = time()
for i in range(int(1e5)):
z['t']['i'] = 10
t2 = time()-t0
# results
print 'HashedDict: %.6f s' % (t1)
print 'dict: %.6f s' % (t2)
assert (t1-t2)/t2 < 60.0
print ''
# ----------------------------------------------------------------------
# Call Main
# ----------------------------------------------------------------------
if __name__ == '__main__':
main() | 0.123855 | 0.156362 |
import time
import tensorflow as tf
from model import CNN_Encoder, RNN_Decoder, FeatureExtraction
from tokenization import load_tokenizer, TOP_K
tf.get_logger().setLevel('INFO')
def load_latest_imgcap(checkpoint_path, ckpt_index=-1):
embedding_dim = 256
units = 512
vocab_size = TOP_K + 1
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = tf.keras.optimizers.Adam()
ckpt = tf.train.Checkpoint(encoder=encoder, decoder=decoder, optimizer=optimizer)
ckpt_man = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=None)
ckpt.restore(ckpt_man.checkpoints[ckpt_index])
return encoder, decoder
def formatt_result(result: list):
if '<end>' in result:
result.remove('<end>')
result.append('.')
return ' '.join(result)
def inference(image, models, random_seed=None):
feature_extractor, tokenizer, max_length, encoder, decoder = models
hidden = decoder.reset_state(batch_size=1)
img_batch = tf.expand_dims(FeatureExtraction.load_image_InceptionV3(image), 0)
img_batch = feature_extractor(img_batch)
img_batch = tf.reshape(img_batch, (img_batch.shape[0], -1, img_batch.shape[3]))
features = encoder(img_batch)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
for i in range(max_length):
predictions, hidden, _ = decoder(dec_input, features, hidden)
predicted_id = None
if random_seed:
predicted_id = tf.random.categorical(predictions, 1, seed=random_seed)[0][0].numpy()
else:
predicted_id = tf.argmax(predictions, 1)[0].numpy()
result.append(tokenizer.index_word[predicted_id])
if tokenizer.index_word[predicted_id] == '<end>':
return formatt_result(result)
dec_input = tf.expand_dims([predicted_id], 0)
return formatt_result(result)
if '__main__' == __name__:
image_file = 'surf.jpg'
annotation_file='./annotations/captions_train2014.json'
checkpoint_path='./checkpoints/train/'
ts = time.time()
feature_extractor = FeatureExtraction.build_model_InceptionV3()
tokenizer, max_length = load_tokenizer(annotation_file)
encoder, decoder = load_latest_imgcap(checkpoint_path)
te = time.time()
load_model_time = te - ts
models = [feature_extractor,
tokenizer, max_length,
encoder, decoder]
ts = time.time()
print(inference(image_file, models))
te = time.time()
inference_time = te - ts
print(f'Loading models takes {load_model_time} seconds')
print(f'Inference takes {inference_time} seconds') | inf.py | import time
import tensorflow as tf
from model import CNN_Encoder, RNN_Decoder, FeatureExtraction
from tokenization import load_tokenizer, TOP_K
tf.get_logger().setLevel('INFO')
def load_latest_imgcap(checkpoint_path, ckpt_index=-1):
embedding_dim = 256
units = 512
vocab_size = TOP_K + 1
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = tf.keras.optimizers.Adam()
ckpt = tf.train.Checkpoint(encoder=encoder, decoder=decoder, optimizer=optimizer)
ckpt_man = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=None)
ckpt.restore(ckpt_man.checkpoints[ckpt_index])
return encoder, decoder
def formatt_result(result: list):
if '<end>' in result:
result.remove('<end>')
result.append('.')
return ' '.join(result)
def inference(image, models, random_seed=None):
feature_extractor, tokenizer, max_length, encoder, decoder = models
hidden = decoder.reset_state(batch_size=1)
img_batch = tf.expand_dims(FeatureExtraction.load_image_InceptionV3(image), 0)
img_batch = feature_extractor(img_batch)
img_batch = tf.reshape(img_batch, (img_batch.shape[0], -1, img_batch.shape[3]))
features = encoder(img_batch)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
for i in range(max_length):
predictions, hidden, _ = decoder(dec_input, features, hidden)
predicted_id = None
if random_seed:
predicted_id = tf.random.categorical(predictions, 1, seed=random_seed)[0][0].numpy()
else:
predicted_id = tf.argmax(predictions, 1)[0].numpy()
result.append(tokenizer.index_word[predicted_id])
if tokenizer.index_word[predicted_id] == '<end>':
return formatt_result(result)
dec_input = tf.expand_dims([predicted_id], 0)
return formatt_result(result)
if '__main__' == __name__:
image_file = 'surf.jpg'
annotation_file='./annotations/captions_train2014.json'
checkpoint_path='./checkpoints/train/'
ts = time.time()
feature_extractor = FeatureExtraction.build_model_InceptionV3()
tokenizer, max_length = load_tokenizer(annotation_file)
encoder, decoder = load_latest_imgcap(checkpoint_path)
te = time.time()
load_model_time = te - ts
models = [feature_extractor,
tokenizer, max_length,
encoder, decoder]
ts = time.time()
print(inference(image_file, models))
te = time.time()
inference_time = te - ts
print(f'Loading models takes {load_model_time} seconds')
print(f'Inference takes {inference_time} seconds') | 0.743168 | 0.283521 |
import aiosqlite
import discord
from datetime import datetime
import sqlite3
import math
from init import sourceDb, guild_ids
from database import Utilisateur, Quiz, Instance, Reponse, Statistiques
from discord_slash import cog_ext
from discord_slash.utils.manage_commands import create_option
from discord.ext import commands
import asyncio
import time
from utils import createEmbed, quizEmbed, recapEmbed
class Commandes(commands.Cog):
def __init__(self, client):
self.client = client
@cog_ext.cog_slash(name="addquestion",
guild_ids=guild_ids,
description="Ajoute une question à un quiz existant si spécifié ou créé un nouveau quiz pour la question.",
options=[
create_option(
name="titre",
description="Titre de la question",
option_type=3,
required=True
),
create_option(
name="reponse1",
description="Première reponse possible",
option_type=3,
required=True
),
create_option(
name="reponse2",
description="Deuxième reponse possible",
option_type=3,
required=True
),
create_option(
name="reponse3",
description="Troisième reponse possible",
option_type=3,
required=False
),
create_option(
name="reponse4",
description="Quatrième reponse possible",
option_type=3,
required=False
),
create_option(
name="idquiz",
description="Identifiant du quiz auquel on rajoute la question",
option_type=4,
required=False
)
])
async def addquestion(self, ctx, titre: str, reponse1: str, reponse2: str, reponse3: str = None, reponse4: str = None, idquiz: int = None):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
reponses = [reponse for reponse in [reponse1, reponse2, reponse3, reponse4] if reponse is not None and type(reponse) == str]
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
embed = discord.Embed(title=":pencil: Récapitulatif de la question :pencil:", colour=discord.Colour(0x42a010), description="\u200b", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="En cours de création", icon_url=ctx.author.avatar_url)
embed.set_footer(text="Appuyer sur ❌ pour annuler la question", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
embed.add_field(name=":book: __La Question__:", value=f"**“ {titre} ”**", inline=False)
embed.add_field(name=":white_check_mark: __Les reponses possibles__:", value="\u200b", inline=False)
for i, reponse in enumerate(reponses):
embed.add_field(name=keycaps[i] + " - " + str(reponse), value="\u200b", inline=False)
message = await ctx.send(embed=embed)
for i, reponse in enumerate(reponses):
await message.add_reaction(keycaps[i])
await message.add_reaction('❌')
try:
reaction, user = await self.client.wait_for('reaction_add', timeout = 15.0, check = lambda reaction, user: user.id == ctx.author.id and reaction.message.id == message.id and (str(reaction.emoji) in keycaps or str(reaction.emoji) == '❌'))
await message.clear_reactions()
if str(reaction.emoji) == '❌':
await message.edit(embed=await createEmbed("annulé", ctx))
elif str(reaction.emoji) in keycaps:
estValide = [1 if keycaps[i] == reaction.emoji else 0 for i, reponse in enumerate(reponses)]
if idquiz is None:
quiz = await Quiz.create(titre, 10, ctx.author.id, db)
question = await quiz.addQuestion(titre)
for i, reponse in enumerate(reponses):
await question.addChoix(reponse, estValide[i])
bonneRéponse = await question.getBonneReponse()
await message.edit(embed=await createEmbed("success",ctx, quiz,question,bonneRéponse))
else:
quiz = await Quiz.get(idquiz, db)
if quiz:
creator = await quiz.getCreator(ctx.guild.id)
if await creator.getIdDiscord() != ctx.author.id:
await message.edit(embed=await createEmbed("creator", ctx))
else:
if await quiz.getNbQuestions() >= 4:
await message.edit(embed=await createEmbed("maxQuestions", ctx))
else:
question = await quiz.addQuestion(titre)
for i, reponse in enumerate(reponses):
await question.addChoix(reponse, estValide[i])
bonneRéponse = await question.getBonneReponse()
await message.edit(embed=await createEmbed("success", ctx, quiz,question,bonneRéponse))
else:
await message.edit(embed=await createEmbed("incorrecte", ctx))
except asyncio.TimeoutError:
await ctx.send("<a:error:804691277010567189> Tu n'as pas spécifié la bonne reponse, la question a été annulée")
await message.edit(embed=await createEmbed("annulé", ctx))
except Exception as e:
print(f"[ ERROR ] Sur /addquestion: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une <NAME> survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="<NAME>", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="createquiz",
guild_ids=guild_ids,
description="Permet de créer un nouveau quiz. N'oubliez pas d'ajouter des questions avec /addQuestion",
options=[
create_option(
name="titre",
description="Titre du quiz",
option_type=3,
required=True
),
create_option(
name="points",
description="Nombre de points que vaut le quiz",
option_type=4,
required=False
)
])
async def createquiz(self, ctx, titre: str, points: int = 10):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
points = max(min(points, 100),1)
quiz = await Quiz.create(titre, points, ctx.author.id, db)
await ctx.send(embed= await createEmbed("createQuiz", ctx, quiz), hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="leaderboard",
guild_ids=guild_ids,
description="Permet d'afficher le classement des meilleurs joueurs en termes de points.")
async def leaderboard(self, ctx):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
user = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
stats = await user.getStatistiques()
position = await user.getCurrentPosition()
points = round(await stats.getScoreTotal(), 2)
embed = discord.Embed(title=":trophy: Voici le top 10 des meilleurs joueurs :trophy:", colour=discord.Colour(0x42a010), description="*Classé en termes de points totaux sur le serveur*", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Votre place: " + (str(position) + 'er' if position == 1 else str(position) +'ème'), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous avez {points} points", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
leaderboard = await Statistiques.getLeaderboard(ctx.guild.id, db)
for i, ranker in enumerate(leaderboard):
embed.add_field(name=keycaps[i] + " - " + str(await ranker[0].getName()), value=str(round(await ranker[1].getScoreTotal(), 2)) + " points", inline=False)
await ctx.send(embed = embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="getquizs",
guild_ids=guild_ids,
description="Permet de récupérers tout les quizs disponibles sur la base de données.",
options=[
create_option(
name="personal",
description="Limiter la recherche des quizs à ceux que vous avez créés.",
option_type=5,
required=False
)])
async def getquizs(self, ctx, personal: bool = True):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
utilisateur = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
if personal:
quizCount = await Quiz.getCount(db, ctx.author.id)
else:
quizCount = await Quiz.getCount(db)
pages = math.ceil(quizCount/10)
page = 1
offset = 0
reaction = None
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, 1, pages)
message = await ctx.send(embed=embed)
if page < pages:
await message.add_reaction('▶')
try:
while True:
if str(reaction) == '◀' and page > 1:
page -= 1
offset -= 10
if page == 1:
await message.remove_reaction('◀', self.client.user)
if page == pages-1:
await message.add_reaction('▶')
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, page, pages, offset)
await message.edit(embed=embed)
elif str(reaction) == '▶' and page < pages:
page += 1
offset += 10
if page == pages:
await message.remove_reaction('▶', self.client.user)
if page == 2:
await message.remove_reaction('▶', self.client.user)
await message.add_reaction('◀')
await message.add_reaction('▶')
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, page, pages, offset)
await message.edit(embed=embed)
try:
reaction, discordUser = await self.client.wait_for('reaction_add', timeout = 10.0, check = lambda reaction, discordUser: discordUser.id == ctx.author.id and reaction.message.id == message.id and str(reaction.emoji) in ['◀', '▶'])
await message.remove_reaction(reaction, discordUser)
except asyncio.TimeoutError:
await message.clear_reactions()
break
except Exception as e:
print(f"[ ERROR ] Sur /getquizs: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="getresults",
guild_ids=guild_ids,
description="Permet de récuperer la moyenne et le classement d'une game.",
options=[
create_option(
name="id_game",
description="L'identifiant unique de la game.",
option_type=4,
required=True
)])
async def getresults(self, ctx, id_game: int):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
game = await Instance.get(id_game, db)
if game:
if await game.getDateFin():
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
moyenne, nbPoints = await game.getMoyenne(False, True)
quiz = await game.getQuiz()
nbQuestions = await quiz.getNbQuestions()
pointsParQ = await quiz.getPoints()*await game.getMultiplicateur()/nbQuestions
classement = await game.getClassement()
reponseTrie = await game.getReponsesTrie()
dateDébut = await game.getDateDeb(True)
DateFin = await game.getDateFin(True)
embed = discord.Embed(title=f":chart_with_upwards_trend: Instance {id_game} du Quiz: " + await quiz.getTitre() , colour=discord.Colour(0x42a010), description=f"La moyenne pour cette instance de quiz est de: **{round(moyenne,2)}/{nbPoints}**", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Nombre de participants: " + str(await game.getNbParticipants()), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous pouvez utilisez /viewResult {id_game} pour voir votre résultat", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
if len(reponseTrie) > 1:
mieuxReussi = reponseTrie[0]
moinsReussi = reponseTrie[-1]
embed.add_field(name=":white_check_mark: Question la mieux réussi:", value='**' + await mieuxReussi[0].getTitre() + "** avec " + str(mieuxReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":negative_squared_cross_mark: Question la moins réussi:", value='**' + await moinsReussi[0].getTitre() + "** avec " + str(moinsReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":calendar: Date de la game", value=f"Début : {dateDébut}\nFin: " + DateFin if DateFin else "Le quiz n'est pas terminé", inline=False)
embed.add_field(name=":trophy: Classement des 10 meilleurs participants", value="\u200b", inline=False)
for i, (ranker, nbBnReponse) in enumerate(classement):
points = nbBnReponse*pointsParQ
embed.add_field(name=keycaps[i] + " - " + str(await ranker.getName()), value=f"{nbBnReponse}/{nbQuestions} bonnes réponses. Soit {round(points,2)} points.", inline=False)
await ctx.send(embed = embed, hidden = True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Veuillez attendre la fin de la partie d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden = True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun résultat n'a été trouvé pour une instance d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="viewresult",
guild_ids=guild_ids,
description="Permet de récuperer votre résultat pour une game.",
options=[
create_option(
name="id_game",
description="L'identifiant unique de la game.",
option_type=4,
required=True
)])
async def viewresult(self, ctx, id_game: int):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
user = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
resultats = await user.getResultats(id_game)
instance = await Instance.get(id_game, db)
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
if resultats and instance:
if await instance.getDateFin():
quiz = await instance.getQuiz()
nbQuestions = await quiz.getNbQuestions()
pointsParQ = await quiz.getPoints()*await instance.getMultiplicateur()/nbQuestions
nbBnReponse = await instance.getNbCorrectes(ctx.author.id)
points = nbBnReponse*pointsParQ
moyenne, nbPoints = await instance.getMoyenne(False, True)
classement = await user.getCurrentPosition(id_game)
embed = discord.Embed(title=f":chart_with_upwards_trend: Instance {id_game} du Quiz: " + await quiz.getTitre() , colour=discord.Colour(0x42a010), description=f"Vous avez eu **{nbBnReponse}/{nbQuestions}** bonnes réponses", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Nombre de participants: " + str(await instance.getNbParticipants()), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"La moyenne est de {round(moyenne,2)}/{nbPoints}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
embed.add_field(name=":trophy: Classement:", value=f"Vous êtes **{classement}" + ("er" if classement == 1 else "ème") + f"** du classement avec un total de **{round(points, 2)} points** *(sur {nbPoints})*\n", inline=False)
embed.add_field(name=":pencil: Récapitulatif des questions:", value="\u200b", inline=False)
for i, (question, estCorrecte, choix) in enumerate(resultats):
bonneReponse = await question.getBonneReponse()
titre = await bonneReponse.getTitre()
if choix:
titreChoix = await choix.getTitre()
else:
titreChoix = "Vous n'avez pas répondu à cette question"
embed.add_field(name=keycaps[i] + " - " + await question.getTitre(), value=f"⠀⠀⠀:ballot_box_with_check: **Réponse attendue:** {titre}\n⠀⠀⠀" + (":white_check_mark:" if estCorrecte else (":negative_squared_cross_mark:" if choix else ":x:")) + f" **Votre réponse: ** {titreChoix}", inline=False)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Veuillez attendre la fin de la partie d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun résultat n'a été trouvé pour votre compte sur l'instance d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="launchquiz",
description="Commande pour lancer une game d'un quiz !",
options=[
create_option(
name="idquiz",
description="L'identifiant du quiz a lancer. Utilisez la commande /getquizs pour retrouver les identifiants.",
option_type=4,
required=True
),
create_option(
name="durée_attente",
description="La durée (en secondes) que le bot attendera pour des réactions avant de lancer la game.",
option_type=4,
required=False
),
create_option(
name="durée_réponse",
description="La durée (en secondes) que possédera un participant pour chaque question.",
option_type=4,
required=False
),
create_option(
name="multiplicateur",
description="Tel un coefficient, vient multiplier le nombre de points d'un quiz par le multiplicateur.",
option_type=4,
required=False
)],
guild_ids=guild_ids)
async def launchquiz(self, ctx, idquiz: int, durée_attente: int = 30, durée_réponse: int = 30, multiplicateur: int = 1):
try:
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
durée_attente = max(min(durée_attente, 36000), 30)
durée_réponse = max(min(durée_réponse, 3600), 15)
multiplicateur = max(min(multiplicateur, 100), 1)
quiz = await Quiz.get(idquiz, db)
if quiz:
quizQuestions = await quiz.getNbQuestions()
if quizQuestions > 0:
createur = await quiz.getCreator(ctx.guild.id)
createurId = await createur.getIdDiscord()
creatorNom = await createur.getName()
creator = discord.utils.get(self.client.get_all_members(), id=createurId)
quizName = await quiz.getTitre()
quizPoints = await quiz.getPoints()*multiplicateur
embed = discord.Embed(title=f":books: Participation au quiz : {quizName}", description=f"Une game du quiz **{quizName}** va bientôt commencer.", color=0x50E3C2, timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name=f"Game lancée par {ctx.author.name}#{ctx.author.discriminator}", icon_url= ctx.author.avatar_url)
embed.add_field(name=":information_source: - Informations", value=f'Le quiz contient **{quizQuestions}** question(s) pour un total de **{quizPoints}** point(s).', inline=False)
embed.add_field(name=":ballot_box: - Comment participer", value="Appuyer sur la réaction :ballot_box: pour participer, une fois le temps d'attente écoulé un channel privé vous sera généré")
embed.add_field(name=":alarm_clock: - Temps", value=f'Vous avez **{time.strftime("%H heures %M minutes et %S secondes" if durée_attente >= 3600 else ("%M minutes et %S secondes" if durée_attente >= 120 else ("%M minute et %S secondes" if durée_attente >= 60 else "%S secondes")), time.gmtime(durée_attente))}** avant le lancement du test.', inline=False)
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
message = await ctx.send(embed=embed)
await message.add_reaction(emoji="🗳️")
await asyncio.sleep(durée_attente)
message = await ctx.channel.fetch_message(message.id)
reaction = [reaction for reaction in message.reactions if reaction.emoji == "🗳️"][0]
users = await reaction.users().flatten()
await message.clear_reactions()
if len(users) > 1:
instance = await Instance.create(idquiz, db, ctx.guild.id, multiplicateur)
if not instance:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- La création de l'instance a échoué```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
idInst = await instance.getIdInst()
embed = discord.Embed(title=f":books: Participation au quiz : {quizName}", description="", color=0xff4c5b, timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name=f"Game lancée par {ctx.author.name}#{ctx.author.discriminator}", icon_url= ctx.author.avatar_url)
embed.add_field(name=":lock: - Le quiz est maintenant fermé", value="Le temps d'attente est écoulé. Le quiz est maintenant lancé.\nCherchez un channel à votre nom dans les channels du serveur et répondez aux questions à l'aide des reactions à l'intérieur de celui-ci.")
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
await message.edit(embed=embed)
newCat = await ctx.guild.create_category(name=quizName)
embed = discord.Embed(title="Le quiz va bientôt commencer!", colour=discord.Colour(0x4A90E2), description="Encore quelques instants. Le bot ouvre les channels aux participants...", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
tasks = []
for user in users:
if not user.bot:
overwrites = {ctx.guild.default_role: discord.PermissionOverwrite(read_messages = False),
user: discord.PermissionOverwrite(read_messages = True)}
channel = await newCat.create_text_channel(name=f"{user.name}-{user.discriminator}", overwrites=overwrites)
answerMessage = await channel.send(user.mention, embed=embed)
tasks.append(self.envoyerQuestion(channel, instance, quiz, creator, user, answerMessage, durée_réponse))
await asyncio.gather(*tasks)
await instance.setDateFin()
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
moyenne, nbPoints = await instance.getMoyenne(False, True)
pointsParQ = quizPoints/quizQuestions
classement = await instance.getClassement()
reponseTrie = await instance.getReponsesTrie()
dateDébut = await instance.getDateDeb(True)
dateFin = await instance.getDateFin(True)
embed = discord.Embed(title=f":chart_with_upwards_trend: Instance {idInst} du Quiz: {quizName}", description=f"La moyenne pour cette instance de quiz est de: **{round(moyenne,2)}/{nbPoints}**", colour=discord.Colour(0x42a010), timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Nombre de participants: " + str(await instance.getNbParticipants()), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous pouvez utilisez /viewResult {idInst} pour voir votre résultat", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
if len(reponseTrie) > 1:
mieuxReussi = reponseTrie[0]
moinsReussi = reponseTrie[-1]
embed.add_field(name=":white_check_mark: Question la mieux réussi:", value='**' + await mieuxReussi[0].getTitre() + "** avec " + str(mieuxReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":negative_squared_cross_mark: Question la moins réussi:", value='**' + await moinsReussi[0].getTitre() + "** avec " + str(moinsReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":calendar: Date de la game", value=f"Début : {dateDébut}\nFin: " + dateFin if dateFin else "Le quiz n'est pas terminé", inline=False)
embed.add_field(name=":trophy: Classement des 10 meilleurs participants", value="\u200b", inline=False)
for i, (ranker, nbBnReponse) in enumerate(classement):
points = nbBnReponse*pointsParQ
embed.add_field(name=keycaps[i] + " - " + str(await ranker.getName()), value=f"{nbBnReponse}/{quizQuestions} bonnes réponses. Soit {round(points,2)} points.", inline=False)
await message.edit(embed=embed)
await asyncio.sleep(3)
await newCat.delete()
else:
embed = discord.Embed(title=f":books: Participation au quiz : {quizName}", description="", color=0xc20010, timestamp=datetime.today())
embed.set_author(name=f"Game lancée par {ctx.author.name}#{ctx.author.discriminator}", icon_url= ctx.author.avatar_url)
embed.add_field(name=":x: Game annulé", value="La game n'a pas reçu de participations dans le temps impartie. La game a été annulée")
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne pouvez pas lancer un quiz qui n'a pas de questions.\n\n- Faites /addQuestion pour ajouter au moins 1 question à ce quiz ou utiliser /getQuizs pour avoir une liste des quizs disponibles.```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun Quiz d'id {idquiz} n'a été trouvé```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
except Exception as e:
print(f"[ ERROR ] Sur /launchquiz: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
async def envoyerQuestion(self, channel, instance, quiz, creator, user, message, durée_réponse):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
reactions = ["1️⃣","2️⃣","3️⃣","4️⃣"]
await Utilisateur.get(user.id, channel.guild.id, db)
for question in await quiz.getQuestions():
choix = await question.getChoix()
nbChoix = len(choix)
embed = discord.Embed(title=":pencil: "+ str(await quiz.getTitre()), description=f'''Vous avez **{time.strftime("%H heures %M minutes et %S secondes" if durée_réponse >= 3600 else ("%M minutes et %S secondes" if durée_réponse >= 120 else ("%M minute et %S secondes" if durée_réponse >= 60 else "%S secondes")), time.gmtime(durée_réponse))}** pour répondre à chaque question à l'aide des réactions sous ce message.''', color=0x0011ff, timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name=f"{user.name}#{user.discriminator}", icon_url=user.avatar_url)
embed.set_footer(text=f"Utilisez les réactions de 1️⃣ à {reactions[nbChoix-1]} pour choisir votre réponse", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
embed.add_field(name=":book: "+ await question.getTitre()+ " :book:", value = "\u200b")
for i,choix in enumerate(choix):
titreChoix = await choix.getTitre()
embed.add_field(name=f"{reactions[i]} - {titreChoix}", value="\u200b", inline=False)
await message.edit(embed=embed)
reactPossible = []
for i in range(nbChoix):
await message.add_reaction(emoji=reactions[i])
reactPossible.append(reactions[i])
try:
reaction, u = await self.client.wait_for('reaction_add', timeout=durée_réponse, check=lambda reaction, discordUser: discordUser.id == user.id and reaction.message.id == message.id and str(reaction.emoji) in reactPossible)
if reaction.emoji == "1️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 1, user.id, db)
if reaction.emoji == "2️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 2, user.id, db)
if reaction.emoji == "3️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 3, user.id, db)
if reaction.emoji == "4️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 4, user.id, db)
except asyncio.TimeoutError:
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 0, user.id, db)
await message.clear_reactions()
embed = discord.Embed(title="Quiz terminé!", colour=discord.Colour(0xF5A623), description="Le quiz est maintenant terminé. Ce channel sera supprimé dans quelques instants", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_footer(text=f"Quiz créé par {creator.name}#{creator.discriminator}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
await message.edit(embed=embed)
await asyncio.sleep(5)
await channel.delete()
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="reset",
guild_ids=guild_ids,
description="Permet de reinitialiser les scores et le leaderboard du serveur.")
async def reset(self, ctx):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
await Statistiques.clearLeaderboard(ctx.guild.id, db)
await ctx.send(":white_check_mark:", hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="recap",
guild_ids=guild_ids,
description="Permet de faire un récapitulatif d'un quiz.",
options=[
create_option(
name="idquiz",
description="Id du quiz dont on veut faire le récapitulatif.",
option_type=4,
required=True
)])
async def recap(self, ctx, idQuiz: int):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
quiz = await Quiz.get(idQuiz, db)
if quiz:
pages = await quiz.getNbQuestions()
if pages > 0:
page = 1
reaction = None
embed = await recapEmbed(ctx, quiz, page, pages, db)
message = await ctx.send(embed=embed)
if page < pages:
await message.add_reaction('▶')
try:
while True:
if str(reaction) == '◀' and page > 1:
page -= 1
if page == 1:
await message.remove_reaction('◀', self.client.user)
if page == pages-1:
await message.add_reaction('▶')
embed = await recapEmbed(ctx, quiz, page, pages, db)
await message.edit(embed=embed)
elif str(reaction) == '▶' and page < pages:
page += 1
if page == pages:
await message.remove_reaction('▶', self.client.user)
if page == 2:
await message.remove_reaction('▶', self.client.user)
await message.add_reaction('◀')
await message.add_reaction('▶')
embed = await recapEmbed(ctx, quiz, page, pages, db)
await message.edit(embed=embed)
try:
reaction, discordUser = await self.client.wait_for('reaction_add', timeout = 20.0, check = lambda reaction, discordUser: discordUser.id == ctx.author.id and reaction.message.id == message.id and str(reaction.emoji) in ['◀', '▶'])
await message.remove_reaction(reaction, discordUser)
except asyncio.TimeoutError:
await message.clear_reactions()
break
except Exception as e:
print(f"[ ERROR ] Sur /recap: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Le quiz d'id {idQuiz} n'a aucune question```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun Quiz d'id {idQuiz} n'a été trouvé```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True) | src/commandsSlash.py | import aiosqlite
import discord
from datetime import datetime
import sqlite3
import math
from init import sourceDb, guild_ids
from database import Utilisateur, Quiz, Instance, Reponse, Statistiques
from discord_slash import cog_ext
from discord_slash.utils.manage_commands import create_option
from discord.ext import commands
import asyncio
import time
from utils import createEmbed, quizEmbed, recapEmbed
class Commandes(commands.Cog):
def __init__(self, client):
self.client = client
@cog_ext.cog_slash(name="addquestion",
guild_ids=guild_ids,
description="Ajoute une question à un quiz existant si spécifié ou créé un nouveau quiz pour la question.",
options=[
create_option(
name="titre",
description="Titre de la question",
option_type=3,
required=True
),
create_option(
name="reponse1",
description="Première reponse possible",
option_type=3,
required=True
),
create_option(
name="reponse2",
description="Deuxième reponse possible",
option_type=3,
required=True
),
create_option(
name="reponse3",
description="Troisième reponse possible",
option_type=3,
required=False
),
create_option(
name="reponse4",
description="Quatrième reponse possible",
option_type=3,
required=False
),
create_option(
name="idquiz",
description="Identifiant du quiz auquel on rajoute la question",
option_type=4,
required=False
)
])
async def addquestion(self, ctx, titre: str, reponse1: str, reponse2: str, reponse3: str = None, reponse4: str = None, idquiz: int = None):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
reponses = [reponse for reponse in [reponse1, reponse2, reponse3, reponse4] if reponse is not None and type(reponse) == str]
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
embed = discord.Embed(title=":pencil: Récapitulatif de la question :pencil:", colour=discord.Colour(0x42a010), description="\u200b", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="En cours de création", icon_url=ctx.author.avatar_url)
embed.set_footer(text="Appuyer sur ❌ pour annuler la question", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
embed.add_field(name=":book: __La Question__:", value=f"**“ {titre} ”**", inline=False)
embed.add_field(name=":white_check_mark: __Les reponses possibles__:", value="\u200b", inline=False)
for i, reponse in enumerate(reponses):
embed.add_field(name=keycaps[i] + " - " + str(reponse), value="\u200b", inline=False)
message = await ctx.send(embed=embed)
for i, reponse in enumerate(reponses):
await message.add_reaction(keycaps[i])
await message.add_reaction('❌')
try:
reaction, user = await self.client.wait_for('reaction_add', timeout = 15.0, check = lambda reaction, user: user.id == ctx.author.id and reaction.message.id == message.id and (str(reaction.emoji) in keycaps or str(reaction.emoji) == '❌'))
await message.clear_reactions()
if str(reaction.emoji) == '❌':
await message.edit(embed=await createEmbed("annulé", ctx))
elif str(reaction.emoji) in keycaps:
estValide = [1 if keycaps[i] == reaction.emoji else 0 for i, reponse in enumerate(reponses)]
if idquiz is None:
quiz = await Quiz.create(titre, 10, ctx.author.id, db)
question = await quiz.addQuestion(titre)
for i, reponse in enumerate(reponses):
await question.addChoix(reponse, estValide[i])
bonneRéponse = await question.getBonneReponse()
await message.edit(embed=await createEmbed("success",ctx, quiz,question,bonneRéponse))
else:
quiz = await Quiz.get(idquiz, db)
if quiz:
creator = await quiz.getCreator(ctx.guild.id)
if await creator.getIdDiscord() != ctx.author.id:
await message.edit(embed=await createEmbed("creator", ctx))
else:
if await quiz.getNbQuestions() >= 4:
await message.edit(embed=await createEmbed("maxQuestions", ctx))
else:
question = await quiz.addQuestion(titre)
for i, reponse in enumerate(reponses):
await question.addChoix(reponse, estValide[i])
bonneRéponse = await question.getBonneReponse()
await message.edit(embed=await createEmbed("success", ctx, quiz,question,bonneRéponse))
else:
await message.edit(embed=await createEmbed("incorrecte", ctx))
except asyncio.TimeoutError:
await ctx.send("<a:error:804691277010567189> Tu n'as pas spécifié la bonne reponse, la question a été annulée")
await message.edit(embed=await createEmbed("annulé", ctx))
except Exception as e:
print(f"[ ERROR ] Sur /addquestion: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une <NAME> survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="<NAME>", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="createquiz",
guild_ids=guild_ids,
description="Permet de créer un nouveau quiz. N'oubliez pas d'ajouter des questions avec /addQuestion",
options=[
create_option(
name="titre",
description="Titre du quiz",
option_type=3,
required=True
),
create_option(
name="points",
description="Nombre de points que vaut le quiz",
option_type=4,
required=False
)
])
async def createquiz(self, ctx, titre: str, points: int = 10):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
points = max(min(points, 100),1)
quiz = await Quiz.create(titre, points, ctx.author.id, db)
await ctx.send(embed= await createEmbed("createQuiz", ctx, quiz), hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="leaderboard",
guild_ids=guild_ids,
description="Permet d'afficher le classement des meilleurs joueurs en termes de points.")
async def leaderboard(self, ctx):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
user = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
stats = await user.getStatistiques()
position = await user.getCurrentPosition()
points = round(await stats.getScoreTotal(), 2)
embed = discord.Embed(title=":trophy: Voici le top 10 des meilleurs joueurs :trophy:", colour=discord.Colour(0x42a010), description="*Classé en termes de points totaux sur le serveur*", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Votre place: " + (str(position) + 'er' if position == 1 else str(position) +'ème'), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous avez {points} points", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
leaderboard = await Statistiques.getLeaderboard(ctx.guild.id, db)
for i, ranker in enumerate(leaderboard):
embed.add_field(name=keycaps[i] + " - " + str(await ranker[0].getName()), value=str(round(await ranker[1].getScoreTotal(), 2)) + " points", inline=False)
await ctx.send(embed = embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="getquizs",
guild_ids=guild_ids,
description="Permet de récupérers tout les quizs disponibles sur la base de données.",
options=[
create_option(
name="personal",
description="Limiter la recherche des quizs à ceux que vous avez créés.",
option_type=5,
required=False
)])
async def getquizs(self, ctx, personal: bool = True):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
utilisateur = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
if personal:
quizCount = await Quiz.getCount(db, ctx.author.id)
else:
quizCount = await Quiz.getCount(db)
pages = math.ceil(quizCount/10)
page = 1
offset = 0
reaction = None
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, 1, pages)
message = await ctx.send(embed=embed)
if page < pages:
await message.add_reaction('▶')
try:
while True:
if str(reaction) == '◀' and page > 1:
page -= 1
offset -= 10
if page == 1:
await message.remove_reaction('◀', self.client.user)
if page == pages-1:
await message.add_reaction('▶')
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, page, pages, offset)
await message.edit(embed=embed)
elif str(reaction) == '▶' and page < pages:
page += 1
offset += 10
if page == pages:
await message.remove_reaction('▶', self.client.user)
if page == 2:
await message.remove_reaction('▶', self.client.user)
await message.add_reaction('◀')
await message.add_reaction('▶')
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, page, pages, offset)
await message.edit(embed=embed)
try:
reaction, discordUser = await self.client.wait_for('reaction_add', timeout = 10.0, check = lambda reaction, discordUser: discordUser.id == ctx.author.id and reaction.message.id == message.id and str(reaction.emoji) in ['◀', '▶'])
await message.remove_reaction(reaction, discordUser)
except asyncio.TimeoutError:
await message.clear_reactions()
break
except Exception as e:
print(f"[ ERROR ] Sur /getquizs: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="getresults",
guild_ids=guild_ids,
description="Permet de récuperer la moyenne et le classement d'une game.",
options=[
create_option(
name="id_game",
description="L'identifiant unique de la game.",
option_type=4,
required=True
)])
async def getresults(self, ctx, id_game: int):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
game = await Instance.get(id_game, db)
if game:
if await game.getDateFin():
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
moyenne, nbPoints = await game.getMoyenne(False, True)
quiz = await game.getQuiz()
nbQuestions = await quiz.getNbQuestions()
pointsParQ = await quiz.getPoints()*await game.getMultiplicateur()/nbQuestions
classement = await game.getClassement()
reponseTrie = await game.getReponsesTrie()
dateDébut = await game.getDateDeb(True)
DateFin = await game.getDateFin(True)
embed = discord.Embed(title=f":chart_with_upwards_trend: Instance {id_game} du Quiz: " + await quiz.getTitre() , colour=discord.Colour(0x42a010), description=f"La moyenne pour cette instance de quiz est de: **{round(moyenne,2)}/{nbPoints}**", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Nombre de participants: " + str(await game.getNbParticipants()), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous pouvez utilisez /viewResult {id_game} pour voir votre résultat", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
if len(reponseTrie) > 1:
mieuxReussi = reponseTrie[0]
moinsReussi = reponseTrie[-1]
embed.add_field(name=":white_check_mark: Question la mieux réussi:", value='**' + await mieuxReussi[0].getTitre() + "** avec " + str(mieuxReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":negative_squared_cross_mark: Question la moins réussi:", value='**' + await moinsReussi[0].getTitre() + "** avec " + str(moinsReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":calendar: Date de la game", value=f"Début : {dateDébut}\nFin: " + DateFin if DateFin else "Le quiz n'est pas terminé", inline=False)
embed.add_field(name=":trophy: Classement des 10 meilleurs participants", value="\u200b", inline=False)
for i, (ranker, nbBnReponse) in enumerate(classement):
points = nbBnReponse*pointsParQ
embed.add_field(name=keycaps[i] + " - " + str(await ranker.getName()), value=f"{nbBnReponse}/{nbQuestions} bonnes réponses. Soit {round(points,2)} points.", inline=False)
await ctx.send(embed = embed, hidden = True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Veuillez attendre la fin de la partie d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden = True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun résultat n'a été trouvé pour une instance d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="viewresult",
guild_ids=guild_ids,
description="Permet de récuperer votre résultat pour une game.",
options=[
create_option(
name="id_game",
description="L'identifiant unique de la game.",
option_type=4,
required=True
)])
async def viewresult(self, ctx, id_game: int):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
user = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
resultats = await user.getResultats(id_game)
instance = await Instance.get(id_game, db)
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
if resultats and instance:
if await instance.getDateFin():
quiz = await instance.getQuiz()
nbQuestions = await quiz.getNbQuestions()
pointsParQ = await quiz.getPoints()*await instance.getMultiplicateur()/nbQuestions
nbBnReponse = await instance.getNbCorrectes(ctx.author.id)
points = nbBnReponse*pointsParQ
moyenne, nbPoints = await instance.getMoyenne(False, True)
classement = await user.getCurrentPosition(id_game)
embed = discord.Embed(title=f":chart_with_upwards_trend: Instance {id_game} du Quiz: " + await quiz.getTitre() , colour=discord.Colour(0x42a010), description=f"Vous avez eu **{nbBnReponse}/{nbQuestions}** bonnes réponses", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Nombre de participants: " + str(await instance.getNbParticipants()), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"La moyenne est de {round(moyenne,2)}/{nbPoints}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
embed.add_field(name=":trophy: Classement:", value=f"Vous êtes **{classement}" + ("er" if classement == 1 else "ème") + f"** du classement avec un total de **{round(points, 2)} points** *(sur {nbPoints})*\n", inline=False)
embed.add_field(name=":pencil: Récapitulatif des questions:", value="\u200b", inline=False)
for i, (question, estCorrecte, choix) in enumerate(resultats):
bonneReponse = await question.getBonneReponse()
titre = await bonneReponse.getTitre()
if choix:
titreChoix = await choix.getTitre()
else:
titreChoix = "Vous n'avez pas répondu à cette question"
embed.add_field(name=keycaps[i] + " - " + await question.getTitre(), value=f"⠀⠀⠀:ballot_box_with_check: **Réponse attendue:** {titre}\n⠀⠀⠀" + (":white_check_mark:" if estCorrecte else (":negative_squared_cross_mark:" if choix else ":x:")) + f" **Votre réponse: ** {titreChoix}", inline=False)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Veuillez attendre la fin de la partie d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun résultat n'a été trouvé pour votre compte sur l'instance d'id {id_game}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="launchquiz",
description="Commande pour lancer une game d'un quiz !",
options=[
create_option(
name="idquiz",
description="L'identifiant du quiz a lancer. Utilisez la commande /getquizs pour retrouver les identifiants.",
option_type=4,
required=True
),
create_option(
name="durée_attente",
description="La durée (en secondes) que le bot attendera pour des réactions avant de lancer la game.",
option_type=4,
required=False
),
create_option(
name="durée_réponse",
description="La durée (en secondes) que possédera un participant pour chaque question.",
option_type=4,
required=False
),
create_option(
name="multiplicateur",
description="Tel un coefficient, vient multiplier le nombre de points d'un quiz par le multiplicateur.",
option_type=4,
required=False
)],
guild_ids=guild_ids)
async def launchquiz(self, ctx, idquiz: int, durée_attente: int = 30, durée_réponse: int = 30, multiplicateur: int = 1):
try:
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
durée_attente = max(min(durée_attente, 36000), 30)
durée_réponse = max(min(durée_réponse, 3600), 15)
multiplicateur = max(min(multiplicateur, 100), 1)
quiz = await Quiz.get(idquiz, db)
if quiz:
quizQuestions = await quiz.getNbQuestions()
if quizQuestions > 0:
createur = await quiz.getCreator(ctx.guild.id)
createurId = await createur.getIdDiscord()
creatorNom = await createur.getName()
creator = discord.utils.get(self.client.get_all_members(), id=createurId)
quizName = await quiz.getTitre()
quizPoints = await quiz.getPoints()*multiplicateur
embed = discord.Embed(title=f":books: Participation au quiz : {quizName}", description=f"Une game du quiz **{quizName}** va bientôt commencer.", color=0x50E3C2, timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name=f"Game lancée par {ctx.author.name}#{ctx.author.discriminator}", icon_url= ctx.author.avatar_url)
embed.add_field(name=":information_source: - Informations", value=f'Le quiz contient **{quizQuestions}** question(s) pour un total de **{quizPoints}** point(s).', inline=False)
embed.add_field(name=":ballot_box: - Comment participer", value="Appuyer sur la réaction :ballot_box: pour participer, une fois le temps d'attente écoulé un channel privé vous sera généré")
embed.add_field(name=":alarm_clock: - Temps", value=f'Vous avez **{time.strftime("%H heures %M minutes et %S secondes" if durée_attente >= 3600 else ("%M minutes et %S secondes" if durée_attente >= 120 else ("%M minute et %S secondes" if durée_attente >= 60 else "%S secondes")), time.gmtime(durée_attente))}** avant le lancement du test.', inline=False)
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
message = await ctx.send(embed=embed)
await message.add_reaction(emoji="🗳️")
await asyncio.sleep(durée_attente)
message = await ctx.channel.fetch_message(message.id)
reaction = [reaction for reaction in message.reactions if reaction.emoji == "🗳️"][0]
users = await reaction.users().flatten()
await message.clear_reactions()
if len(users) > 1:
instance = await Instance.create(idquiz, db, ctx.guild.id, multiplicateur)
if not instance:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- La création de l'instance a échoué```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
idInst = await instance.getIdInst()
embed = discord.Embed(title=f":books: Participation au quiz : {quizName}", description="", color=0xff4c5b, timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name=f"Game lancée par {ctx.author.name}#{ctx.author.discriminator}", icon_url= ctx.author.avatar_url)
embed.add_field(name=":lock: - Le quiz est maintenant fermé", value="Le temps d'attente est écoulé. Le quiz est maintenant lancé.\nCherchez un channel à votre nom dans les channels du serveur et répondez aux questions à l'aide des reactions à l'intérieur de celui-ci.")
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
await message.edit(embed=embed)
newCat = await ctx.guild.create_category(name=quizName)
embed = discord.Embed(title="Le quiz va bientôt commencer!", colour=discord.Colour(0x4A90E2), description="Encore quelques instants. Le bot ouvre les channels aux participants...", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
tasks = []
for user in users:
if not user.bot:
overwrites = {ctx.guild.default_role: discord.PermissionOverwrite(read_messages = False),
user: discord.PermissionOverwrite(read_messages = True)}
channel = await newCat.create_text_channel(name=f"{user.name}-{user.discriminator}", overwrites=overwrites)
answerMessage = await channel.send(user.mention, embed=embed)
tasks.append(self.envoyerQuestion(channel, instance, quiz, creator, user, answerMessage, durée_réponse))
await asyncio.gather(*tasks)
await instance.setDateFin()
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
moyenne, nbPoints = await instance.getMoyenne(False, True)
pointsParQ = quizPoints/quizQuestions
classement = await instance.getClassement()
reponseTrie = await instance.getReponsesTrie()
dateDébut = await instance.getDateDeb(True)
dateFin = await instance.getDateFin(True)
embed = discord.Embed(title=f":chart_with_upwards_trend: Instance {idInst} du Quiz: {quizName}", description=f"La moyenne pour cette instance de quiz est de: **{round(moyenne,2)}/{nbPoints}**", colour=discord.Colour(0x42a010), timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Nombre de participants: " + str(await instance.getNbParticipants()), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous pouvez utilisez /viewResult {idInst} pour voir votre résultat", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
if len(reponseTrie) > 1:
mieuxReussi = reponseTrie[0]
moinsReussi = reponseTrie[-1]
embed.add_field(name=":white_check_mark: Question la mieux réussi:", value='**' + await mieuxReussi[0].getTitre() + "** avec " + str(mieuxReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":negative_squared_cross_mark: Question la moins réussi:", value='**' + await moinsReussi[0].getTitre() + "** avec " + str(moinsReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":calendar: Date de la game", value=f"Début : {dateDébut}\nFin: " + dateFin if dateFin else "Le quiz n'est pas terminé", inline=False)
embed.add_field(name=":trophy: Classement des 10 meilleurs participants", value="\u200b", inline=False)
for i, (ranker, nbBnReponse) in enumerate(classement):
points = nbBnReponse*pointsParQ
embed.add_field(name=keycaps[i] + " - " + str(await ranker.getName()), value=f"{nbBnReponse}/{quizQuestions} bonnes réponses. Soit {round(points,2)} points.", inline=False)
await message.edit(embed=embed)
await asyncio.sleep(3)
await newCat.delete()
else:
embed = discord.Embed(title=f":books: Participation au quiz : {quizName}", description="", color=0xc20010, timestamp=datetime.today())
embed.set_author(name=f"Game lancée par {ctx.author.name}#{ctx.author.discriminator}", icon_url= ctx.author.avatar_url)
embed.add_field(name=":x: Game annulé", value="La game n'a pas reçu de participations dans le temps impartie. La game a été annulée")
embed.set_footer(text=f"Quiz créé par {creatorNom}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne pouvez pas lancer un quiz qui n'a pas de questions.\n\n- Faites /addQuestion pour ajouter au moins 1 question à ce quiz ou utiliser /getQuizs pour avoir une liste des quizs disponibles.```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun Quiz d'id {idquiz} n'a été trouvé```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
except Exception as e:
print(f"[ ERROR ] Sur /launchquiz: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
async def envoyerQuestion(self, channel, instance, quiz, creator, user, message, durée_réponse):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
reactions = ["1️⃣","2️⃣","3️⃣","4️⃣"]
await Utilisateur.get(user.id, channel.guild.id, db)
for question in await quiz.getQuestions():
choix = await question.getChoix()
nbChoix = len(choix)
embed = discord.Embed(title=":pencil: "+ str(await quiz.getTitre()), description=f'''Vous avez **{time.strftime("%H heures %M minutes et %S secondes" if durée_réponse >= 3600 else ("%M minutes et %S secondes" if durée_réponse >= 120 else ("%M minute et %S secondes" if durée_réponse >= 60 else "%S secondes")), time.gmtime(durée_réponse))}** pour répondre à chaque question à l'aide des réactions sous ce message.''', color=0x0011ff, timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name=f"{user.name}#{user.discriminator}", icon_url=user.avatar_url)
embed.set_footer(text=f"Utilisez les réactions de 1️⃣ à {reactions[nbChoix-1]} pour choisir votre réponse", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
embed.add_field(name=":book: "+ await question.getTitre()+ " :book:", value = "\u200b")
for i,choix in enumerate(choix):
titreChoix = await choix.getTitre()
embed.add_field(name=f"{reactions[i]} - {titreChoix}", value="\u200b", inline=False)
await message.edit(embed=embed)
reactPossible = []
for i in range(nbChoix):
await message.add_reaction(emoji=reactions[i])
reactPossible.append(reactions[i])
try:
reaction, u = await self.client.wait_for('reaction_add', timeout=durée_réponse, check=lambda reaction, discordUser: discordUser.id == user.id and reaction.message.id == message.id and str(reaction.emoji) in reactPossible)
if reaction.emoji == "1️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 1, user.id, db)
if reaction.emoji == "2️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 2, user.id, db)
if reaction.emoji == "3️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 3, user.id, db)
if reaction.emoji == "4️⃣":
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 4, user.id, db)
except asyncio.TimeoutError:
await Reponse.create(await instance.getIdInst(), await question.getIdQuestion(), 0, user.id, db)
await message.clear_reactions()
embed = discord.Embed(title="Quiz terminé!", colour=discord.Colour(0xF5A623), description="Le quiz est maintenant terminé. Ce channel sera supprimé dans quelques instants", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_footer(text=f"Quiz créé par {creator.name}#{creator.discriminator}", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
await message.edit(embed=embed)
await asyncio.sleep(5)
await channel.delete()
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="reset",
guild_ids=guild_ids,
description="Permet de reinitialiser les scores et le leaderboard du serveur.")
async def reset(self, ctx):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
await Statistiques.clearLeaderboard(ctx.guild.id, db)
await ctx.send(":white_check_mark:", hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="recap",
guild_ids=guild_ids,
description="Permet de faire un récapitulatif d'un quiz.",
options=[
create_option(
name="idquiz",
description="Id du quiz dont on veut faire le récapitulatif.",
option_type=4,
required=True
)])
async def recap(self, ctx, idQuiz: int):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
quiz = await Quiz.get(idQuiz, db)
if quiz:
pages = await quiz.getNbQuestions()
if pages > 0:
page = 1
reaction = None
embed = await recapEmbed(ctx, quiz, page, pages, db)
message = await ctx.send(embed=embed)
if page < pages:
await message.add_reaction('▶')
try:
while True:
if str(reaction) == '◀' and page > 1:
page -= 1
if page == 1:
await message.remove_reaction('◀', self.client.user)
if page == pages-1:
await message.add_reaction('▶')
embed = await recapEmbed(ctx, quiz, page, pages, db)
await message.edit(embed=embed)
elif str(reaction) == '▶' and page < pages:
page += 1
if page == pages:
await message.remove_reaction('▶', self.client.user)
if page == 2:
await message.remove_reaction('▶', self.client.user)
await message.add_reaction('◀')
await message.add_reaction('▶')
embed = await recapEmbed(ctx, quiz, page, pages, db)
await message.edit(embed=embed)
try:
reaction, discordUser = await self.client.wait_for('reaction_add', timeout = 20.0, check = lambda reaction, discordUser: discordUser.id == ctx.author.id and reaction.message.id == message.id and str(reaction.emoji) in ['◀', '▶'])
await message.remove_reaction(reaction, discordUser)
except asyncio.TimeoutError:
await message.clear_reactions()
break
except Exception as e:
print(f"[ ERROR ] Sur /recap: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Le quiz d'id {idQuiz} n'a aucune question```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- Aucun Quiz d'id {idQuiz} n'a été trouvé```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True) | 0.378804 | 0.258671 |
import os
import unittest as ut
import numpy as np
from mykit.core.utils import get_matched_files
from mykit.vasp.xml import Vasprunxml, VasprunxmlError
class test_vasprunxml_read(ut.TestCase):
def test_scf_xml(self):
'''Test reading XMLs for SCF calculations (LORBIT not set)
'''
dataDir = 'vasprun_scf'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
typeMapping = vxml.typeMapping
# get all index
self.assertListEqual(list(range(vxml.natoms)),
vxml.get_atom_index())
self.assertFalse(vxml.edos is None)
self.assertFalse(vxml.totalDos is None)
self.assertFalse(vxml.dos is None)
# static calculation, there is only 1 ion step.
self.assertEqual(1, vxml.nIonSteps)
self.assertTupleEqual(np.shape(vxml.forces),
(vxml.nIonSteps, vxml.natoms, 3))
self.assertTupleEqual(np.shape(vxml.stress),
(vxml.nIonSteps, 3, 3))
self.assertEqual(1, len(vxml.interPoscars))
vxml.ntypes
vxml.natomsPerType
vxml.get_atom_index(0)
vxml.get_atom_index(-1)
vxml.get_atom_index(typeMapping[0])
self.assertRaisesRegex(VasprunxmlError,
r"Atom type not found: *",
vxml.get_atom_index, "UNKNOWNSYMBOL")
# empty properties
self.assertTrue(vxml.projs is None)
self.assertEqual(0, vxml.nprojs)
self.assertTrue(vxml.pDos is None)
self.assertTrue(vxml.pWave is None)
def test_band_xml(self):
'''Test reading XMLs for band calculations (LORBIT set or not)
'''
dataDir = 'vasprun_band'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
self.assertEqual(vxml.kmode, "L")
self.assertTupleEqual(np.shape(vxml.weight), (vxml.nibzkpt,))
self.assertTupleEqual(np.shape(vxml.kpoints), (vxml.nibzkpt, 3))
self.assertTupleEqual(np.shape(vxml.kptsWeight), (vxml.nibzkpt, 4))
bs = vxml.load_band()
self.assertAlmostEqual(bs.nelect, vxml.nelect, places=4)
self.assertTrue(bs.hasKvec)
self.assertTrue(bs.isKpath)
bs.kvec
bsTrimed = vxml.load_band(1)
self.assertEqual(1, bs.nkpts - bsTrimed.nkpts)
def test_mixed_k_band_xml(self):
'''Test reading XMLs for band calculations with manual input kpoints
in case of SCAN and HF band calculations
'''
dataDir = 'vasprun_mixed_k_band'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
bsMix = vxml.load_band()
bsBand = vxml.load_band(kTrimBefore=20)
self.assertEqual(bsMix.nkpts - bsBand.nkpts, 20)
self.assertTrue(np.allclose(bsBand.weight, np.ones(bsBand.nkpts)))
self.assertTrue(bsBand.isKpath)
def test_opt_xml(self):
'''Test reading XMLs for geometry optimization
'''
dataDir = 'vasprun_opt'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
self.assertTupleEqual(np.shape(vxml.forces), \
(vxml.nIonSteps, vxml.natoms, 3))
def test_pdos_xml(self):
'''Test reading XMLs with LORBIT set
'''
dataDir = 'vasprun_partial'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
msg = "Wrong when processing {}".format(fn)
vxml = Vasprunxml(fn)
self.assertFalse(vxml.pDos is None, msg=msg)
bs = vxml.load_band()
self.assertAlmostEqual(bs.nelect, vxml.nelect, places=4, msg=msg)
self.assertTrue(bs.hasProjection, msg=msg)
# Dos related
dos = vxml.load_dos()
self.assertEqual(dos.nspins, bs.nspins, msg=msg)
self.assertTrue(dos.hasProjection, msg=msg)
if __name__ == '__main__':
ut.main() | test/vasp/xml_test.py |
import os
import unittest as ut
import numpy as np
from mykit.core.utils import get_matched_files
from mykit.vasp.xml import Vasprunxml, VasprunxmlError
class test_vasprunxml_read(ut.TestCase):
def test_scf_xml(self):
'''Test reading XMLs for SCF calculations (LORBIT not set)
'''
dataDir = 'vasprun_scf'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
typeMapping = vxml.typeMapping
# get all index
self.assertListEqual(list(range(vxml.natoms)),
vxml.get_atom_index())
self.assertFalse(vxml.edos is None)
self.assertFalse(vxml.totalDos is None)
self.assertFalse(vxml.dos is None)
# static calculation, there is only 1 ion step.
self.assertEqual(1, vxml.nIonSteps)
self.assertTupleEqual(np.shape(vxml.forces),
(vxml.nIonSteps, vxml.natoms, 3))
self.assertTupleEqual(np.shape(vxml.stress),
(vxml.nIonSteps, 3, 3))
self.assertEqual(1, len(vxml.interPoscars))
vxml.ntypes
vxml.natomsPerType
vxml.get_atom_index(0)
vxml.get_atom_index(-1)
vxml.get_atom_index(typeMapping[0])
self.assertRaisesRegex(VasprunxmlError,
r"Atom type not found: *",
vxml.get_atom_index, "UNKNOWNSYMBOL")
# empty properties
self.assertTrue(vxml.projs is None)
self.assertEqual(0, vxml.nprojs)
self.assertTrue(vxml.pDos is None)
self.assertTrue(vxml.pWave is None)
def test_band_xml(self):
'''Test reading XMLs for band calculations (LORBIT set or not)
'''
dataDir = 'vasprun_band'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
self.assertEqual(vxml.kmode, "L")
self.assertTupleEqual(np.shape(vxml.weight), (vxml.nibzkpt,))
self.assertTupleEqual(np.shape(vxml.kpoints), (vxml.nibzkpt, 3))
self.assertTupleEqual(np.shape(vxml.kptsWeight), (vxml.nibzkpt, 4))
bs = vxml.load_band()
self.assertAlmostEqual(bs.nelect, vxml.nelect, places=4)
self.assertTrue(bs.hasKvec)
self.assertTrue(bs.isKpath)
bs.kvec
bsTrimed = vxml.load_band(1)
self.assertEqual(1, bs.nkpts - bsTrimed.nkpts)
def test_mixed_k_band_xml(self):
'''Test reading XMLs for band calculations with manual input kpoints
in case of SCAN and HF band calculations
'''
dataDir = 'vasprun_mixed_k_band'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
bsMix = vxml.load_band()
bsBand = vxml.load_band(kTrimBefore=20)
self.assertEqual(bsMix.nkpts - bsBand.nkpts, 20)
self.assertTrue(np.allclose(bsBand.weight, np.ones(bsBand.nkpts)))
self.assertTrue(bsBand.isKpath)
def test_opt_xml(self):
'''Test reading XMLs for geometry optimization
'''
dataDir = 'vasprun_opt'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
self.assertTupleEqual(np.shape(vxml.forces), \
(vxml.nIonSteps, vxml.natoms, 3))
def test_pdos_xml(self):
'''Test reading XMLs with LORBIT set
'''
dataDir = 'vasprun_partial'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
msg = "Wrong when processing {}".format(fn)
vxml = Vasprunxml(fn)
self.assertFalse(vxml.pDos is None, msg=msg)
bs = vxml.load_band()
self.assertAlmostEqual(bs.nelect, vxml.nelect, places=4, msg=msg)
self.assertTrue(bs.hasProjection, msg=msg)
# Dos related
dos = vxml.load_dos()
self.assertEqual(dos.nspins, bs.nspins, msg=msg)
self.assertTrue(dos.hasProjection, msg=msg)
if __name__ == '__main__':
ut.main() | 0.444565 | 0.368377 |
import numpy as np
import numpy.linalg as linalg
import cv2 as cv
import scipy.optimize as opt
import functools
import json
import math
from .common.camera import Camera, Permutation
from .common.linear import solve_dlt
from .common.math import euclidean, homogeneous
from .common.matrix import matrix_intrinsic, matrix_permute_ned, \
matrix_decompose_projection, matrix_ypr, matrix_decompose_ypr
def obj_from_file(path):
return json.load(open(path))
def camera_from_parameters(param):
position = np.array((param["x"], param["y"], param["z"]))
orientation = np.radians((param["yaw"], param["pitch"], param["roll"]))
fov = np.radians((param["horizontal-fov"], param["vertical-fov"]))
return Camera(position, orientation, fov,
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
def intrinsic_and_permute(param):
intrinsic = matrix_intrinsic(np.radians((param["horizontal-fov"],
param["vertical-fov"])),
rect=np.array([-0.5, -0.5, 1.0, 1.0]))
return (intrinsic, matrix_permute_ned())
def reprojection_errors(points, camera):
err = []
for point in points:
uv0 = np.array((point["u"], point["v"]))
xyz = np.array((point["x"], point["y"], point["z"]))
uv1 = camera.project(xyz)
err.append(linalg.norm(uv0 - uv1))
return err
def squared_sum(xs):
return functools.reduce(lambda acc, x: acc + x**2, xs)
def obj_f(points, position=np.array((0, 0, 0)), orientation=np.array((0, 0, 0)),
fov=np.array((0, 0))):
camera = Camera(position, orientation, fov,
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
return reprojection_errors(points, camera)
def minimize_orientation(points, position, orientation, fov):
obj = functools.partial(obj_f, points, position=position, fov=fov)
res = opt.least_squares(lambda ypr: obj(
orientation=ypr), orientation, method='lm')
return res.x
def minimize_position(points, position, orientation, fov):
obj = functools.partial(obj_f, points, orientation=orientation, fov=fov)
res = opt.least_squares(lambda xyz: obj(
position=xyz), position, method='lm')
return res.x
def minimize_fov(points, position, orientation, fov):
obj = functools.partial(
obj_f, points, position=position, orientation=orientation)
res = opt.least_squares(lambda hv: obj(fov=hv), fov, method='lm')
return res.x
def minimize_all(points, position, orientation, fov):
obj = functools.partial(obj_f, points)
x0 = np.zeros(8)
np.put(x0, [0, 1, 2], position)
np.put(x0, [3, 4, 5], orientation)
np.put(x0, [6, 7], fov)
res = opt.least_squares(lambda x: obj(
position=x[0:3], orientation=x[3:6], fov=x[6:8]), x0, method='lm')
return (res.x[0:3], res.x[3:6], res.x[6:8])
def run2(path):
images = obj_from_file(path)["images"]
for image in images:
if image["confidence"] > 0.99:
print("Image id: %d" % image["image-id"])
print("confidence: %.5f" % image["confidence"])
params = image["camera-parameters"]
print("Camera params:\n%s" % params)
position0 = np.array((params["x"], params["y"], params["z"]))
orientation0 = np.array(
(params["yaw"], params["pitch"], params["roll"]))
fov0 = np.array((params["horizontal-fov"], params["vertical-fov"]))
cam0 = Camera(position0, np.radians(orientation0), np.radians(fov0),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
intrinsic, permute = intrinsic_and_permute(params)
cam0_ypr, cam0_t = matrix_decompose_projection(
cam0.projection_matrix, intrinsic, permute)
cam0_r = (matrix_ypr(np.array(cam0_ypr)) @ permute).T
print("Cam0 y: %f, p: %f, r: %f" % (math.degrees(
cam0_ypr[0]), math.degrees(cam0_ypr[1]), math.degrees(cam0_ypr[2])))
print("Cam0 t: %s" % cam0_t)
print("Cam0 r:\n%s" % cam0_r)
print("Cam0 camera matrix:\n%s" % cam0.camera_matrix)
print("===")
points = image["point-correspondences"]
obj_points = []
img_points = []
for point in points:
obj_points.append((point["x"], point["y"], point["z"]))
img_points.append((point["u"], point["v"]))
# Solve EPNP
ret, rvec, tvec = cv.solvePnP(np.array(obj_points), np.array(img_points),
intrinsic, np.array([]),
np.array([]), np.array([]),
useExtrinsicGuess=False,
flags=cv.SOLVEPNP_EPNP)
print("Solve EPNP")
print("tvec: %s" % tvec)
rr, j = cv.Rodrigues(rvec)
print("r:\n%s" % rr)
# Solve SQPNP
ret, rvec, tvec = cv.solvePnP(np.array(obj_points), np.array(img_points),
intrinsic, np.array([]),
np.array([]), np.array([]),
useExtrinsicGuess=False,
flags=cv.SOLVEPNP_SQPNP)
print("Solve SQPNP")
print("tvec: %s" % tvec)
rr, j = cv.Rodrigues(rvec)
print("r:\n%s" % rr)
input("Press ENTER to continue")
def run(path):
images = obj_from_file(path)["images"]
for image in images:
if image["confidence"] > 0.99:
intrinsic, permute = intrinsic_and_permute(
image["camera-parameters"])
params = image["camera-parameters"]
print("===")
points = image["point-correspondences"]
position0 = np.array((params["x"], params["y"], params["z"]))
orientation0 = np.array(
(params["yaw"], params["pitch"], params["roll"]))
fov0 = np.array((params["horizontal-fov"], params["vertical-fov"]))
print("Truth params.\n Position: %s\n Orientation: %s\n FOV: %s" %
(position0, orientation0, fov0))
# Truth camera.
cam0 = Camera(position0, np.radians(orientation0), np.radians(fov0),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
print("Truth err: %f" % squared_sum(
reprojection_errors(points, cam0)))
position1 = position0 + (32.0, 22.6, 4.5)
print("Modified position: %s" % position1)
orientation1 = orientation0 + (-3.31, 1.27, 1.0)
print("Modified orientation: %s" % orientation1)
fov1 = fov0 * 1.09
print("Modified fov: %s" % fov1)
print("===")
"""
cam1 = Camera(position, np.radians(orientation1), np.radians(fov),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
print("Cam1 err: %f" % squared_sum(
reprojection_errors(points, cam1)))
"""
"""
cam11 = Camera(position, np.radians(orientation11), np.radians(fov),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
print("Cam11 err: %f" % squared_sum(
reprojection_errors(points, cam11)))
"""
position11 = minimize_position(
points, position1, np.radians(orientation0), np.radians(fov0))
print("Solved position11: %s" % position11)
orientation11 = np.degrees(minimize_orientation(points, position0, np.radians(
orientation1), np.radians(fov0)))
print("Solved orientation11: %s" % orientation11)
fov11 = np.degrees(minimize_fov(points, position0, np.radians(
orientation0), np.radians(fov1)))
print("Solved fov11: %s" % fov11)
(position12, orientation12, fov12) = minimize_all(
points, position1, np.radians(orientation1), np.radians(fov1))
print("Solved position12: %s" % position12)
print("Solved orientation12: %s" % np.degrees(orientation12))
print("Solved fov12: %s" % np.degrees(fov12))
position13 = minimize_position(
points, position12, orientation12, fov12)
print("Solved position13: %s" % position13)
orientation13 = np.degrees(minimize_orientation(
points, position13, orientation12, fov12))
print("Solved orientation13: %s" % orientation13)
fov13 = np.degrees(minimize_fov(
points, position13, np.radians(orientation13), fov12))
print("Solved fov13: %s" % fov13)
break | trio/play_pose.py | import numpy as np
import numpy.linalg as linalg
import cv2 as cv
import scipy.optimize as opt
import functools
import json
import math
from .common.camera import Camera, Permutation
from .common.linear import solve_dlt
from .common.math import euclidean, homogeneous
from .common.matrix import matrix_intrinsic, matrix_permute_ned, \
matrix_decompose_projection, matrix_ypr, matrix_decompose_ypr
def obj_from_file(path):
return json.load(open(path))
def camera_from_parameters(param):
position = np.array((param["x"], param["y"], param["z"]))
orientation = np.radians((param["yaw"], param["pitch"], param["roll"]))
fov = np.radians((param["horizontal-fov"], param["vertical-fov"]))
return Camera(position, orientation, fov,
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
def intrinsic_and_permute(param):
intrinsic = matrix_intrinsic(np.radians((param["horizontal-fov"],
param["vertical-fov"])),
rect=np.array([-0.5, -0.5, 1.0, 1.0]))
return (intrinsic, matrix_permute_ned())
def reprojection_errors(points, camera):
err = []
for point in points:
uv0 = np.array((point["u"], point["v"]))
xyz = np.array((point["x"], point["y"], point["z"]))
uv1 = camera.project(xyz)
err.append(linalg.norm(uv0 - uv1))
return err
def squared_sum(xs):
return functools.reduce(lambda acc, x: acc + x**2, xs)
def obj_f(points, position=np.array((0, 0, 0)), orientation=np.array((0, 0, 0)),
fov=np.array((0, 0))):
camera = Camera(position, orientation, fov,
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
return reprojection_errors(points, camera)
def minimize_orientation(points, position, orientation, fov):
obj = functools.partial(obj_f, points, position=position, fov=fov)
res = opt.least_squares(lambda ypr: obj(
orientation=ypr), orientation, method='lm')
return res.x
def minimize_position(points, position, orientation, fov):
obj = functools.partial(obj_f, points, orientation=orientation, fov=fov)
res = opt.least_squares(lambda xyz: obj(
position=xyz), position, method='lm')
return res.x
def minimize_fov(points, position, orientation, fov):
obj = functools.partial(
obj_f, points, position=position, orientation=orientation)
res = opt.least_squares(lambda hv: obj(fov=hv), fov, method='lm')
return res.x
def minimize_all(points, position, orientation, fov):
obj = functools.partial(obj_f, points)
x0 = np.zeros(8)
np.put(x0, [0, 1, 2], position)
np.put(x0, [3, 4, 5], orientation)
np.put(x0, [6, 7], fov)
res = opt.least_squares(lambda x: obj(
position=x[0:3], orientation=x[3:6], fov=x[6:8]), x0, method='lm')
return (res.x[0:3], res.x[3:6], res.x[6:8])
def run2(path):
images = obj_from_file(path)["images"]
for image in images:
if image["confidence"] > 0.99:
print("Image id: %d" % image["image-id"])
print("confidence: %.5f" % image["confidence"])
params = image["camera-parameters"]
print("Camera params:\n%s" % params)
position0 = np.array((params["x"], params["y"], params["z"]))
orientation0 = np.array(
(params["yaw"], params["pitch"], params["roll"]))
fov0 = np.array((params["horizontal-fov"], params["vertical-fov"]))
cam0 = Camera(position0, np.radians(orientation0), np.radians(fov0),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
intrinsic, permute = intrinsic_and_permute(params)
cam0_ypr, cam0_t = matrix_decompose_projection(
cam0.projection_matrix, intrinsic, permute)
cam0_r = (matrix_ypr(np.array(cam0_ypr)) @ permute).T
print("Cam0 y: %f, p: %f, r: %f" % (math.degrees(
cam0_ypr[0]), math.degrees(cam0_ypr[1]), math.degrees(cam0_ypr[2])))
print("Cam0 t: %s" % cam0_t)
print("Cam0 r:\n%s" % cam0_r)
print("Cam0 camera matrix:\n%s" % cam0.camera_matrix)
print("===")
points = image["point-correspondences"]
obj_points = []
img_points = []
for point in points:
obj_points.append((point["x"], point["y"], point["z"]))
img_points.append((point["u"], point["v"]))
# Solve EPNP
ret, rvec, tvec = cv.solvePnP(np.array(obj_points), np.array(img_points),
intrinsic, np.array([]),
np.array([]), np.array([]),
useExtrinsicGuess=False,
flags=cv.SOLVEPNP_EPNP)
print("Solve EPNP")
print("tvec: %s" % tvec)
rr, j = cv.Rodrigues(rvec)
print("r:\n%s" % rr)
# Solve SQPNP
ret, rvec, tvec = cv.solvePnP(np.array(obj_points), np.array(img_points),
intrinsic, np.array([]),
np.array([]), np.array([]),
useExtrinsicGuess=False,
flags=cv.SOLVEPNP_SQPNP)
print("Solve SQPNP")
print("tvec: %s" % tvec)
rr, j = cv.Rodrigues(rvec)
print("r:\n%s" % rr)
input("Press ENTER to continue")
def run(path):
images = obj_from_file(path)["images"]
for image in images:
if image["confidence"] > 0.99:
intrinsic, permute = intrinsic_and_permute(
image["camera-parameters"])
params = image["camera-parameters"]
print("===")
points = image["point-correspondences"]
position0 = np.array((params["x"], params["y"], params["z"]))
orientation0 = np.array(
(params["yaw"], params["pitch"], params["roll"]))
fov0 = np.array((params["horizontal-fov"], params["vertical-fov"]))
print("Truth params.\n Position: %s\n Orientation: %s\n FOV: %s" %
(position0, orientation0, fov0))
# Truth camera.
cam0 = Camera(position0, np.radians(orientation0), np.radians(fov0),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
print("Truth err: %f" % squared_sum(
reprojection_errors(points, cam0)))
position1 = position0 + (32.0, 22.6, 4.5)
print("Modified position: %s" % position1)
orientation1 = orientation0 + (-3.31, 1.27, 1.0)
print("Modified orientation: %s" % orientation1)
fov1 = fov0 * 1.09
print("Modified fov: %s" % fov1)
print("===")
"""
cam1 = Camera(position, np.radians(orientation1), np.radians(fov),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
print("Cam1 err: %f" % squared_sum(
reprojection_errors(points, cam1)))
"""
"""
cam11 = Camera(position, np.radians(orientation11), np.radians(fov),
rect=np.array([-0.5, -0.5, 1.0, 1.0]),
perm=Permutation.NED)
print("Cam11 err: %f" % squared_sum(
reprojection_errors(points, cam11)))
"""
position11 = minimize_position(
points, position1, np.radians(orientation0), np.radians(fov0))
print("Solved position11: %s" % position11)
orientation11 = np.degrees(minimize_orientation(points, position0, np.radians(
orientation1), np.radians(fov0)))
print("Solved orientation11: %s" % orientation11)
fov11 = np.degrees(minimize_fov(points, position0, np.radians(
orientation0), np.radians(fov1)))
print("Solved fov11: %s" % fov11)
(position12, orientation12, fov12) = minimize_all(
points, position1, np.radians(orientation1), np.radians(fov1))
print("Solved position12: %s" % position12)
print("Solved orientation12: %s" % np.degrees(orientation12))
print("Solved fov12: %s" % np.degrees(fov12))
position13 = minimize_position(
points, position12, orientation12, fov12)
print("Solved position13: %s" % position13)
orientation13 = np.degrees(minimize_orientation(
points, position13, orientation12, fov12))
print("Solved orientation13: %s" % orientation13)
fov13 = np.degrees(minimize_fov(
points, position13, np.radians(orientation13), fov12))
print("Solved fov13: %s" % fov13)
break | 0.627152 | 0.42179 |
import os
import time
import numpy as np
import tensorflow as tf
import core.nn as nn
from config.constants import ACTIVATION, INTERVAL, LOG_PATH
from core.log import get_logger
class DeepHPM:
def __init__(self, idn_lb, idn_ub, t, x, u, tb, x0, u0, X_f, layers,
sol_lb, sol_ub, u_layers, pde_layers):
# Identifier Boundary
self.idn_lb = idn_lb
self.idn_ub = idn_ub
# Solver Boundary
self.sol_lb = sol_lb
self.sol_ub = sol_ub
# Initialization for Identification
self.identifier_init(t, x, u, u_layers, pde_layers)
# Initialization for Solver
self.solver_init(x0, u0, tb, X_f, layers)
# Model saver
self.saver = tf.train.Saver()
# Logging Tool
self.logger = get_logger(LOG_PATH)
# TF session
self.sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
gpu_options=tf.GPUOptions(
per_process_gpu_memory_fraction=0.95,
visible_device_list="0")))
init = tf.global_variables_initializer()
self.sess.run(init)
def identifier_init(self, t, x, u, u_layers, pde_layers):
# Training Data for Identification
self.t = t
self.x = x
self.u = u
# Layers for Identification
self.u_layers = u_layers
self.pde_layers = pde_layers
# Initialize NNs for Identification
self.u_weights, self.u_biases = nn.initialize_nn(u_layers)
self.pde_weights, self.pde_biases = nn.initialize_nn(pde_layers)
# TF placeholders
self.t_placeholder = tf.placeholder(tf.float32, [None, 1])
self.u_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_placeholder = tf.placeholder(tf.float32, [None, 1])
self.terms_placeholder = tf.placeholder(tf.float32,
[None, pde_layers[0]])
# TF graphs
self.u_pred = self.identifier_net(self.t_placeholder,
self.x_placeholder)
self.pde_pred = self.pde_net(self.terms_placeholder)
self.f_pred = self.identifier_f(self.t_placeholder, self.x_placeholder)
# Loss
self.u_loss = tf.reduce_sum(
tf.square(self.u_pred - self.u_placeholder) +
tf.square(self.f_pred))
self.f_loss = tf.reduce_sum(tf.square(self.f_pred))
# Scipy Optimizer
self.scipy_u_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.u_loss,
var_list=self.u_weights + self.u_biases + self.pde_weights +
self.pde_biases,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps
})
self.scipy_f_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.f_loss,
var_list=self.pde_weights + self.pde_biases,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps
})
# Adam Optimizer
self.adam_u_optimizer = tf.train.AdamOptimizer()
self.adam_f_optimizer = tf.train.AdamOptimizer()
self.adam_u_optimizer_train = self.adam_u_optimizer.minimize(
self.u_loss,
var_list=self.u_weights + self.u_biases + self.pde_weights +
self.pde_biases)
self.adam_f_optimizer_train = self.adam_f_optimizer.minimize(
self.f_loss, var_list=self.pde_weights + self.pde_biases)
def identifier_net(self, t, x):
X = tf.concat([t, x], 1)
H = 2. * (X - self.idn_lb) / (self.idn_ub - self.idn_lb) - 1.
u = nn.neural_net(H, self.u_weights, self.u_biases, ACTIVATION)
return u
def pde_net(self, terms):
pde = nn.neural_net(terms, self.pde_weights, self.pde_biases,
ACTIVATION)
return pde
def identifier_f(self, t, x):
u = self.identifier_net(t, x)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
terms = tf.concat([u, u_x, u_xx], 1)
f = u_t - self.pde_net(terms)
return f
def train_u(self, N_iter, model_path, scipy_opt=False):
tf_dict = {
self.t_placeholder: self.t,
self.x_placeholder: self.x,
self.u_placeholder: self.u
}
start_time = time.time()
for i in range(N_iter):
self.sess.run(self.adam_u_optimizer_train, tf_dict)
if i % INTERVAL == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.u_loss, tf_dict)
self.logger.info(
f"u, It: {i}, Loss: {loss_value:.3e}, Time: {elapsed:.2f}")
if model_path:
if os.path.exists(model_path):
os.rmdir(model_path)
self.saver.save(self.sess, model_path)
start_time = time.time()
if scipy_opt:
self.scipy_u_optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.u_loss],
loss_callback=self.callback)
def train_f(self, N_iter, model_path, scipy_opt=False):
tf_dict = {
self.t_placeholder: self.t,
self.x_placeholder: self.x,
self.u_placeholder: self.u
}
start_time = time.time()
for i in range(N_iter):
self.sess.run(self.adam_f_optimizer_train, tf_dict)
if i % INTERVAL == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.f_loss, tf_dict)
self.logger.info(
f"f, It: {i}, Loss: {loss_value:.3e}, Time: {elapsed:.2f}")
if model_path:
if os.path.exists(model_path):
os.rmdir(model_path)
self.saver.save(self.sess, model_path)
start_time = time.time()
if scipy_opt:
self.scipy_f_optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.f_loss],
loss_callback=self.callback)
def identifier_predict(self, t_star, x_star):
tf_dict = {self.t_placeholder: t_star, self.x_placeholder: x_star}
u_star = self.sess.run(self.u_pred, tf_dict)
f_star = self.sess.run(self.f_pred, tf_dict)
return u_star, f_star
def pde_predict(self, terms_star):
tf_dict = {self.terms_placeholder: terms_star}
pde_star = self.sess.run(self.pde_pred, tf_dict)
return pde_star
def change_data(self, idn_lb, idn_ub, t, x, u, model_path):
# Model Restortion
self.saver.restore(self.sess, model_path)
# Assign New Boundary
self.idn_lb = idn_lb
self.idn_ub = idn_ub
# Assign New Data
self.t = t
self.x = x
self.u = u
def solver_init(self, x0, u0, tb, X_f, layers):
# Initialize the Vector
X0 = np.concatenate((0 * x0, x0), 1)
X_lb = np.concatenate((tb, 0 * tb + self.sol_lb[1]), 1)
X_ub = np.concatenate((tb, 0 * tb + self.sol_ub[1]), 1)
self.X_f = X_f
self.t0 = X0[:, 0:1] # Initial Data (time)
self.x0 = X0[:, 1:2] # Initial Data (space)
self.t_lb = X_lb[:, 0:1] # Lower Boundary Data (time)
self.t_ub = X_ub[:, 0:1] # Upper Boundary Data (time)
self.x_lb = X_lb[:, 1:2] # Lower Boundary Data (space)
self.x_ub = X_ub[:, 1:2] # Upper Boundary Data (space)
self.t_f = X_f[:, 0:1] # Collocation Points (time)
self.x_f = X_f[:, 1:2] # Collocation Points (space)
self.u0 = u0 # Boundary Data
# Layers for Solution
self.layers = layers
# Initialize NNs for SSolution
self.weights, self.biases = nn.initialize_nn(layers)
# TF placeholders for Solution
self.t0_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x0_placeholder = tf.placeholder(tf.float32, [None, 1])
self.u0_placeholder = tf.placeholder(tf.float32, [None, 1])
self.t_lb_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_lb_placeholder = tf.placeholder(tf.float32, [None, 1])
self.t_ub_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_ub_placeholder = tf.placeholder(tf.float32, [None, 1])
self.t_f_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_f_placeholder = tf.placeholder(tf.float32, [None, 1])
# TF graphs for Solution
self.u0_pred, _ = self.solver_net_u(self.t0_placeholder,
self.x0_placeholder)
self.u_lb_pred, self.u_x_lb_pred = self.solver_net_u(
self.t_lb_placeholder, self.x_lb_placeholder)
self.u_ub_pred, self.u_x_ub_pred = self.solver_net_u(
self.t_ub_placeholder, self.x_ub_placeholder)
self.solver_f_pred = self.solver_net_f(self.t_f_placeholder,
self.x_f_placeholder)
# Loss for Solution
self.solver_loss = \
tf.reduce_sum(tf.square(self.u0_placeholder - self.u0_pred)) + \
tf.reduce_sum(tf.square(self.u_lb_pred - self.u_ub_pred)) + \
tf.reduce_sum(tf.square(self.u_x_lb_pred - self.u_x_ub_pred)) + \
tf.reduce_sum(tf.square(self.solver_f_pred))
# Scipy Optimizer for Solution
self.scipy_solver_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.solver_loss,
var_list=self.weights + self.biases,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps
})
# Adam Optimizer for Solution
self.adam_solver_optimizer = tf.train.AdamOptimizer()
self.sol_train_op_Adam = self.adam_solver_optimizer.minimize(
self.solver_loss, var_list=self.weights + self.biases)
def solver_net_u(self, t, x):
X = tf.concat([t, x], 1)
H = 2.0 * (X - self.sol_lb) / (self.sol_ub - self.sol_lb) - 1.0
u = nn.neural_net(H, self.weights, self.biases, ACTIVATION)
u_x = tf.gradients(u, x)[0]
return u, u_x
def solver_net_f(self, t, x):
u, _ = self.solver_net_u(t, x)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
terms = tf.concat([u, u_x, u_xx], 1)
f = u_t - self.pde_net(terms)
return f
def callback(self, loss):
self.logger.info(f"'L-BFGS-B' Optimizer Loss: {loss:.3e}")
def train_solver(self, N_iter, scipy_opt=False):
tf_dict = {
self.t0_placeholder: self.t0,
self.x0_placeholder: self.x0,
self.u0_placeholder: self.u0,
self.t_lb_placeholder: self.t_lb,
self.x_lb_placeholder: self.x_lb,
self.t_ub_placeholder: self.t_ub,
self.x_ub_placeholder: self.x_ub,
self.t_f_placeholder: self.t_f,
self.x_f_placeholder: self.x_f
}
start_time = time.time()
for i in range(N_iter):
self.sess.run(self.sol_train_op_Adam, tf_dict)
if i % INTERVAL == 10:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.solver_loss, tf_dict)
self.logger.info(f"""
solver, It: {i},
Loss: {loss_value:.3e},
Time: {elapsed:.2f}""")
start_time = time.time()
if scipy_opt:
self.scipy_solver_optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.solver_loss],
loss_callback=self.callback)
def solver_predict(self, t_star, x_star):
u_star = self.sess.run(self.u0_pred, {
self.t0_placeholder: t_star,
self.x0_placeholder: x_star
})
f_star = self.sess.run(self.solver_f_pred, {
self.t_f_placeholder: t_star,
self.x_f_placeholder: x_star
})
return u_star, f_star | Mine/core/model.py | import os
import time
import numpy as np
import tensorflow as tf
import core.nn as nn
from config.constants import ACTIVATION, INTERVAL, LOG_PATH
from core.log import get_logger
class DeepHPM:
def __init__(self, idn_lb, idn_ub, t, x, u, tb, x0, u0, X_f, layers,
sol_lb, sol_ub, u_layers, pde_layers):
# Identifier Boundary
self.idn_lb = idn_lb
self.idn_ub = idn_ub
# Solver Boundary
self.sol_lb = sol_lb
self.sol_ub = sol_ub
# Initialization for Identification
self.identifier_init(t, x, u, u_layers, pde_layers)
# Initialization for Solver
self.solver_init(x0, u0, tb, X_f, layers)
# Model saver
self.saver = tf.train.Saver()
# Logging Tool
self.logger = get_logger(LOG_PATH)
# TF session
self.sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
gpu_options=tf.GPUOptions(
per_process_gpu_memory_fraction=0.95,
visible_device_list="0")))
init = tf.global_variables_initializer()
self.sess.run(init)
def identifier_init(self, t, x, u, u_layers, pde_layers):
# Training Data for Identification
self.t = t
self.x = x
self.u = u
# Layers for Identification
self.u_layers = u_layers
self.pde_layers = pde_layers
# Initialize NNs for Identification
self.u_weights, self.u_biases = nn.initialize_nn(u_layers)
self.pde_weights, self.pde_biases = nn.initialize_nn(pde_layers)
# TF placeholders
self.t_placeholder = tf.placeholder(tf.float32, [None, 1])
self.u_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_placeholder = tf.placeholder(tf.float32, [None, 1])
self.terms_placeholder = tf.placeholder(tf.float32,
[None, pde_layers[0]])
# TF graphs
self.u_pred = self.identifier_net(self.t_placeholder,
self.x_placeholder)
self.pde_pred = self.pde_net(self.terms_placeholder)
self.f_pred = self.identifier_f(self.t_placeholder, self.x_placeholder)
# Loss
self.u_loss = tf.reduce_sum(
tf.square(self.u_pred - self.u_placeholder) +
tf.square(self.f_pred))
self.f_loss = tf.reduce_sum(tf.square(self.f_pred))
# Scipy Optimizer
self.scipy_u_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.u_loss,
var_list=self.u_weights + self.u_biases + self.pde_weights +
self.pde_biases,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps
})
self.scipy_f_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.f_loss,
var_list=self.pde_weights + self.pde_biases,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps
})
# Adam Optimizer
self.adam_u_optimizer = tf.train.AdamOptimizer()
self.adam_f_optimizer = tf.train.AdamOptimizer()
self.adam_u_optimizer_train = self.adam_u_optimizer.minimize(
self.u_loss,
var_list=self.u_weights + self.u_biases + self.pde_weights +
self.pde_biases)
self.adam_f_optimizer_train = self.adam_f_optimizer.minimize(
self.f_loss, var_list=self.pde_weights + self.pde_biases)
def identifier_net(self, t, x):
X = tf.concat([t, x], 1)
H = 2. * (X - self.idn_lb) / (self.idn_ub - self.idn_lb) - 1.
u = nn.neural_net(H, self.u_weights, self.u_biases, ACTIVATION)
return u
def pde_net(self, terms):
pde = nn.neural_net(terms, self.pde_weights, self.pde_biases,
ACTIVATION)
return pde
def identifier_f(self, t, x):
u = self.identifier_net(t, x)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
terms = tf.concat([u, u_x, u_xx], 1)
f = u_t - self.pde_net(terms)
return f
def train_u(self, N_iter, model_path, scipy_opt=False):
tf_dict = {
self.t_placeholder: self.t,
self.x_placeholder: self.x,
self.u_placeholder: self.u
}
start_time = time.time()
for i in range(N_iter):
self.sess.run(self.adam_u_optimizer_train, tf_dict)
if i % INTERVAL == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.u_loss, tf_dict)
self.logger.info(
f"u, It: {i}, Loss: {loss_value:.3e}, Time: {elapsed:.2f}")
if model_path:
if os.path.exists(model_path):
os.rmdir(model_path)
self.saver.save(self.sess, model_path)
start_time = time.time()
if scipy_opt:
self.scipy_u_optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.u_loss],
loss_callback=self.callback)
def train_f(self, N_iter, model_path, scipy_opt=False):
tf_dict = {
self.t_placeholder: self.t,
self.x_placeholder: self.x,
self.u_placeholder: self.u
}
start_time = time.time()
for i in range(N_iter):
self.sess.run(self.adam_f_optimizer_train, tf_dict)
if i % INTERVAL == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.f_loss, tf_dict)
self.logger.info(
f"f, It: {i}, Loss: {loss_value:.3e}, Time: {elapsed:.2f}")
if model_path:
if os.path.exists(model_path):
os.rmdir(model_path)
self.saver.save(self.sess, model_path)
start_time = time.time()
if scipy_opt:
self.scipy_f_optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.f_loss],
loss_callback=self.callback)
def identifier_predict(self, t_star, x_star):
tf_dict = {self.t_placeholder: t_star, self.x_placeholder: x_star}
u_star = self.sess.run(self.u_pred, tf_dict)
f_star = self.sess.run(self.f_pred, tf_dict)
return u_star, f_star
def pde_predict(self, terms_star):
tf_dict = {self.terms_placeholder: terms_star}
pde_star = self.sess.run(self.pde_pred, tf_dict)
return pde_star
def change_data(self, idn_lb, idn_ub, t, x, u, model_path):
# Model Restortion
self.saver.restore(self.sess, model_path)
# Assign New Boundary
self.idn_lb = idn_lb
self.idn_ub = idn_ub
# Assign New Data
self.t = t
self.x = x
self.u = u
def solver_init(self, x0, u0, tb, X_f, layers):
# Initialize the Vector
X0 = np.concatenate((0 * x0, x0), 1)
X_lb = np.concatenate((tb, 0 * tb + self.sol_lb[1]), 1)
X_ub = np.concatenate((tb, 0 * tb + self.sol_ub[1]), 1)
self.X_f = X_f
self.t0 = X0[:, 0:1] # Initial Data (time)
self.x0 = X0[:, 1:2] # Initial Data (space)
self.t_lb = X_lb[:, 0:1] # Lower Boundary Data (time)
self.t_ub = X_ub[:, 0:1] # Upper Boundary Data (time)
self.x_lb = X_lb[:, 1:2] # Lower Boundary Data (space)
self.x_ub = X_ub[:, 1:2] # Upper Boundary Data (space)
self.t_f = X_f[:, 0:1] # Collocation Points (time)
self.x_f = X_f[:, 1:2] # Collocation Points (space)
self.u0 = u0 # Boundary Data
# Layers for Solution
self.layers = layers
# Initialize NNs for SSolution
self.weights, self.biases = nn.initialize_nn(layers)
# TF placeholders for Solution
self.t0_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x0_placeholder = tf.placeholder(tf.float32, [None, 1])
self.u0_placeholder = tf.placeholder(tf.float32, [None, 1])
self.t_lb_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_lb_placeholder = tf.placeholder(tf.float32, [None, 1])
self.t_ub_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_ub_placeholder = tf.placeholder(tf.float32, [None, 1])
self.t_f_placeholder = tf.placeholder(tf.float32, [None, 1])
self.x_f_placeholder = tf.placeholder(tf.float32, [None, 1])
# TF graphs for Solution
self.u0_pred, _ = self.solver_net_u(self.t0_placeholder,
self.x0_placeholder)
self.u_lb_pred, self.u_x_lb_pred = self.solver_net_u(
self.t_lb_placeholder, self.x_lb_placeholder)
self.u_ub_pred, self.u_x_ub_pred = self.solver_net_u(
self.t_ub_placeholder, self.x_ub_placeholder)
self.solver_f_pred = self.solver_net_f(self.t_f_placeholder,
self.x_f_placeholder)
# Loss for Solution
self.solver_loss = \
tf.reduce_sum(tf.square(self.u0_placeholder - self.u0_pred)) + \
tf.reduce_sum(tf.square(self.u_lb_pred - self.u_ub_pred)) + \
tf.reduce_sum(tf.square(self.u_x_lb_pred - self.u_x_ub_pred)) + \
tf.reduce_sum(tf.square(self.solver_f_pred))
# Scipy Optimizer for Solution
self.scipy_solver_optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.solver_loss,
var_list=self.weights + self.biases,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps
})
# Adam Optimizer for Solution
self.adam_solver_optimizer = tf.train.AdamOptimizer()
self.sol_train_op_Adam = self.adam_solver_optimizer.minimize(
self.solver_loss, var_list=self.weights + self.biases)
def solver_net_u(self, t, x):
X = tf.concat([t, x], 1)
H = 2.0 * (X - self.sol_lb) / (self.sol_ub - self.sol_lb) - 1.0
u = nn.neural_net(H, self.weights, self.biases, ACTIVATION)
u_x = tf.gradients(u, x)[0]
return u, u_x
def solver_net_f(self, t, x):
u, _ = self.solver_net_u(t, x)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
terms = tf.concat([u, u_x, u_xx], 1)
f = u_t - self.pde_net(terms)
return f
def callback(self, loss):
self.logger.info(f"'L-BFGS-B' Optimizer Loss: {loss:.3e}")
def train_solver(self, N_iter, scipy_opt=False):
tf_dict = {
self.t0_placeholder: self.t0,
self.x0_placeholder: self.x0,
self.u0_placeholder: self.u0,
self.t_lb_placeholder: self.t_lb,
self.x_lb_placeholder: self.x_lb,
self.t_ub_placeholder: self.t_ub,
self.x_ub_placeholder: self.x_ub,
self.t_f_placeholder: self.t_f,
self.x_f_placeholder: self.x_f
}
start_time = time.time()
for i in range(N_iter):
self.sess.run(self.sol_train_op_Adam, tf_dict)
if i % INTERVAL == 10:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.solver_loss, tf_dict)
self.logger.info(f"""
solver, It: {i},
Loss: {loss_value:.3e},
Time: {elapsed:.2f}""")
start_time = time.time()
if scipy_opt:
self.scipy_solver_optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.solver_loss],
loss_callback=self.callback)
def solver_predict(self, t_star, x_star):
u_star = self.sess.run(self.u0_pred, {
self.t0_placeholder: t_star,
self.x0_placeholder: x_star
})
f_star = self.sess.run(self.solver_f_pred, {
self.t_f_placeholder: t_star,
self.x_f_placeholder: x_star
})
return u_star, f_star | 0.694613 | 0.149252 |
import backtrader.indicators as btind
from . import compare_price as compare
from .base_indicator import iBaseIndicator
class iZlindCompare(iBaseIndicator):
'''
因子:平均移动线比较数值
传入参数:
rule = {"args": [5,10], # 周期, 增益
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 周期结果比较
}
'''
lines = ('zlind',)
params = dict(rule=list())
def __init__(self):
super(iZlindCompare, self).__init__()
self.zlind = btind.ZLIndicator(self.data.close, period=self.args[0],gainlimit=self.args[1])
def next(self):
self.lines.zlind[0] = compare(self.zlind[0], self.logic)
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iZlindCrossGolden(iBaseIndicator):
'''
因子:金叉
传入参数:
rule = {"args": [5,10, 10,50], # 短周期, 短增益,长周期, 长增益
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 金叉情况比较大小, 短比长高多少
}
'''
lines = ('goldencross', )
params = dict(rule=list())
def __init__(self):
super(iZlindCrossGolden, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0],gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2],gainlimit=self.args[3])
self.cross = btind.CrossOver(self.zlind_short, self.zlind_long)
def next(self):
if self.cross[0] == 1:
self.lines.goldencross[0] = compare(self.zlind_short[0]-self.zlind_long[0], self.logic)
else:
self.lines.goldencross[0] = False
# print(self.zlind_short[0], self.zlind_long[0], "===", self.data.datetime.datetime())
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iZlindCrossDie(iBaseIndicator):
'''
因子:死叉
传入参数:
rule = {"args":[5,10, 10,50], # 短周期, 短增益,长周期, 长增益
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 金叉情况比较大小, 短比长高多少
}
'''
lines = ('goldencross', )
params = dict(rule=list())
def __init__(self):
super(iZlindCrossDie, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2], gainlimit=self.args[3])
self.cross = btind.CrossOver(self.zlind_short, self.zlind_long)
def next(self):
if self.cross[0] == -1:
self.lines.goldencross[0] = compare(self.zlind_long[0]-self.zlind_short[0], self.logic)
else:
self.lines.goldencross[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iZlindLong(iBaseIndicator):
'''
因子:zlind多头
传入参数:
rule = {"args": [5,10, 10,50,3], # 短周期, 短增益,长周期, 长增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindlong',)
params = dict(rule=list())
def __init__(self):
super(iZlindLong, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2], gainlimit=self.args[3])
def next(self):
zlindlong = set([self.data.close[i] > self.zlind_short[i] > self.zlind_long[i] for i in range(1-self.args[4],1)])
if len(zlindlong) == 1 and True in zlindlong:
self.lines.zlindlong[0] = True
else:
self.lines.zlindlong[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][1]) + int(cond['args'][2])
class iZlindShort(iBaseIndicator):
'''
因子:zlind空头
传入参数:
rule = {"args": [5,10, 10,50,3], # 短周期, 短增益,长周期, 长增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindshort',)
params = dict(rule=list())
def __init__(self):
super(iZlindShort, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2], gainlimit=self.args[3])
def next(self):
zlindshort = set([self.data.close[i] < self.zlind_short[i] < self.zlind_long[i] for i in range(1-self.args[4],1)])
if len(zlindshort) == 1 and True in zlindshort:
self.lines.zlindshort[0] = True
else:
self.lines.zlindshort[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][1]) + int(cond['args'][2])
class iZlindTop(iBaseIndicator):
'''
因子:最近 n 天 最高点
传入参数:
rule = {"args": [5,10, 5], # 周期, 增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindtop',)
params = dict(rule=list())
def __init__(self):
super(iZlindTop, self).__init__()
self.zlind = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
def next(self):
_list = list(self.zlind.get(size=self.args[2]))
if len(_list) == self.args[1] and self.zlind[0] == max(_list):
self.lines.zlindtop[0] = True
else:
self.lines.zlindtop[0] = False
class iZlindBottom(iBaseIndicator):
'''
因子:最近 n 天 最低点
传入参数:
rule = {"args": [5,10, 5], # 周期, 增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindbottom',)
params = dict(rule=list())
def __init__(self):
super(iZlindBottom, self).__init__()
self.zlind = btind.ZLIndicator(self.data.close, period=self.args[0],gainlimit=self.args[1])
def next(self):
_list = list(self.zlind.get(size=self.args[2]))
if len(_list) == self.args[1] and self.zlind[0] == min(_list):
self.lines.zlindbottom[0] = True
else:
self.lines.zlindbottom[0] = False | ENIAC/api/loop_stack/loop_indicators/zlind_indicator.py | import backtrader.indicators as btind
from . import compare_price as compare
from .base_indicator import iBaseIndicator
class iZlindCompare(iBaseIndicator):
'''
因子:平均移动线比较数值
传入参数:
rule = {"args": [5,10], # 周期, 增益
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 周期结果比较
}
'''
lines = ('zlind',)
params = dict(rule=list())
def __init__(self):
super(iZlindCompare, self).__init__()
self.zlind = btind.ZLIndicator(self.data.close, period=self.args[0],gainlimit=self.args[1])
def next(self):
self.lines.zlind[0] = compare(self.zlind[0], self.logic)
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iZlindCrossGolden(iBaseIndicator):
'''
因子:金叉
传入参数:
rule = {"args": [5,10, 10,50], # 短周期, 短增益,长周期, 长增益
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 金叉情况比较大小, 短比长高多少
}
'''
lines = ('goldencross', )
params = dict(rule=list())
def __init__(self):
super(iZlindCrossGolden, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0],gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2],gainlimit=self.args[3])
self.cross = btind.CrossOver(self.zlind_short, self.zlind_long)
def next(self):
if self.cross[0] == 1:
self.lines.goldencross[0] = compare(self.zlind_short[0]-self.zlind_long[0], self.logic)
else:
self.lines.goldencross[0] = False
# print(self.zlind_short[0], self.zlind_long[0], "===", self.data.datetime.datetime())
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iZlindCrossDie(iBaseIndicator):
'''
因子:死叉
传入参数:
rule = {"args":[5,10, 10,50], # 短周期, 短增益,长周期, 长增益
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 金叉情况比较大小, 短比长高多少
}
'''
lines = ('goldencross', )
params = dict(rule=list())
def __init__(self):
super(iZlindCrossDie, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2], gainlimit=self.args[3])
self.cross = btind.CrossOver(self.zlind_short, self.zlind_long)
def next(self):
if self.cross[0] == -1:
self.lines.goldencross[0] = compare(self.zlind_long[0]-self.zlind_short[0], self.logic)
else:
self.lines.goldencross[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iZlindLong(iBaseIndicator):
'''
因子:zlind多头
传入参数:
rule = {"args": [5,10, 10,50,3], # 短周期, 短增益,长周期, 长增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindlong',)
params = dict(rule=list())
def __init__(self):
super(iZlindLong, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2], gainlimit=self.args[3])
def next(self):
zlindlong = set([self.data.close[i] > self.zlind_short[i] > self.zlind_long[i] for i in range(1-self.args[4],1)])
if len(zlindlong) == 1 and True in zlindlong:
self.lines.zlindlong[0] = True
else:
self.lines.zlindlong[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][1]) + int(cond['args'][2])
class iZlindShort(iBaseIndicator):
'''
因子:zlind空头
传入参数:
rule = {"args": [5,10, 10,50,3], # 短周期, 短增益,长周期, 长增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindshort',)
params = dict(rule=list())
def __init__(self):
super(iZlindShort, self).__init__()
self.zlind_short = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
self.zlind_long = btind.ZLIndicator(self.data.close, period=self.args[2], gainlimit=self.args[3])
def next(self):
zlindshort = set([self.data.close[i] < self.zlind_short[i] < self.zlind_long[i] for i in range(1-self.args[4],1)])
if len(zlindshort) == 1 and True in zlindshort:
self.lines.zlindshort[0] = True
else:
self.lines.zlindshort[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][1]) + int(cond['args'][2])
class iZlindTop(iBaseIndicator):
'''
因子:最近 n 天 最高点
传入参数:
rule = {"args": [5,10, 5], # 周期, 增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindtop',)
params = dict(rule=list())
def __init__(self):
super(iZlindTop, self).__init__()
self.zlind = btind.ZLIndicator(self.data.close, period=self.args[0], gainlimit=self.args[1])
def next(self):
_list = list(self.zlind.get(size=self.args[2]))
if len(_list) == self.args[1] and self.zlind[0] == max(_list):
self.lines.zlindtop[0] = True
else:
self.lines.zlindtop[0] = False
class iZlindBottom(iBaseIndicator):
'''
因子:最近 n 天 最低点
传入参数:
rule = {"args": [5,10, 5], # 周期, 增益, 连续N天
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
}
'''
lines = ('zlindbottom',)
params = dict(rule=list())
def __init__(self):
super(iZlindBottom, self).__init__()
self.zlind = btind.ZLIndicator(self.data.close, period=self.args[0],gainlimit=self.args[1])
def next(self):
_list = list(self.zlind.get(size=self.args[2]))
if len(_list) == self.args[1] and self.zlind[0] == min(_list):
self.lines.zlindbottom[0] = True
else:
self.lines.zlindbottom[0] = False | 0.180431 | 0.267024 |
from collections import OrderedDict
_glades = [
("Unusually Sharp Spike", "Twice as deadly as the other spikes."),
("Withered Fruit", "Gazing at it evokes memories of happier times."),
("Fil's Bracelet", "A simple band made of tightly-woven plant fibers."),
("Redcap Mushroom", "Eating these is said to help you grow taller."),
("Fronkeysbane", "Fronkeys are mildly allergic to this small flower.")
]
_grove = [
("Unhatched Spider Egg", "Hopefully it stays unhatched."),
("Fallen Branch", "A small, faintly glowing branch of the Spirit Tree."),
("Seed Stash", "An innocent squirrel was saving these. What kind of devil are you?"),
("Sunpetal", "A beautiful flower native to the Grove area."),
("Tiny Spirit Tree", "This replica is incredibly detailed, with a piece of energy crystal in the center.")
]
_grotto = [
("Slick Stone", "So smooth and slippery, you can barely hang on to it."),
("Broken Keystones", "A pair of keystones, snapped in half"),
("Strange Carving", "A creature with snakes on its head, staring at a large plant"),
("#*Water Vein* #(Fake)", "It looks just like the real thing! Did Gumo make this?"),
("Cloth Mask", "A simple face covering. Belonged to the spirit Leru.")
]
_blackroot = [
("Sol's Defused Grenade", "Safe enough to use as a ball! ...right?"),
("Torn Friendship Bracelet", "A bond that was made would soon be dissolved."),
("Ike's Boots of Fleetness", "He moved swifter than the wind."),
("Naru's Chisel", "A skilled artisan could sculpt great works with this tool."),
("Glowing Mushroom", "Doubles as a light source and a tasty snack."),
]
_swamp = [
("Polluted Water Canteen", "Who would want to drink this?"),
("Gold-eyed Frog", "Insects stand no chance against its deft tongue."),
("Ilo's Training Weights", "Solid rock, nearly too heavy to carry."),
("Spooky Drawing", "Some kind of ghost frog, spitting through walls."),
("Rhino Fossil", "Both smaller and cuter than the modern specimen.")
]
_ginso = [
("Lightning-Scarred Branch", "As a mother to her child, the Ginso Tree protects the rest of the forest."),
("Reem's Lucky Coin", "Said to help you escape the notice of predators."),
("Gheemur Shell", "This tough carapace is covered in spikes and seems impervious to harm."),
("Hardy Tuber", "Seems to thrive in the moisture here."),
("Spirit Lamp", "Glows with a soft, warm light. The string it used to hang from is snapped off.")
]
_valley = [
("Treasure Map", "A map depicting a treasure found after a long swim."),
("White Raven Feather", "A bit too small to be used as a parachute."),
("Comfy Earmuffs", "Softens the sounds of screaming birds and frogs."),
("Strange Drawing", "A figure in blue walking through golden fields."),
("Abandoned Nest", "Looks like a small family of birds used to live here.")
]
_misty = [
("Atsu's Candle", "Does little good in these heavy mists."),
("Tatsu's Glasses", "Strange spiral patterns cover both eyes"),
("Mushroom Sample", "Still glowing: probably not safe to eat."),
("Angry Scribbles", "Left behind by a frustrated cartographer"),
("Sister's Lament", "A poem written by Tatsu, mourning her brother Atsu")
]
_forlorn = [
("Furtive Fritter", "A favorite snack of the Gumon."),
("Mathematical Reference", "Only used by the most cerebral forest denizens."),
("Crystal Lens", "Focuses energy into deadly beams of light."),
("Magnetic Alloy", "Used by the Gumon to construct floating platforms."),
("Complex Tool", "Looks like it might have had several different uses")
]
_sorrow = [
("Drained Light Vessel", "The light of the Spirit Tree once filled this orb."),
("Tattered Leaf", "Riddled with puncture marks."),
("Nir's Sketchbook", "Contains a beautiful drawing of Nibel from the top of Sorrow Pass."),
("Tumble Seed", "A small pod dropped by an unusual airborne plant."),
("Rock Sharpener", "Extremely worn down. Whoever owned this must have used it a lot.")
]
_horu = [
("Obsidian Fragment", "Chipped off of an ancient lava flow."),
("Ancient Sketch", "A drawing of what appears to be the Water Vein."),
("\"The Fish Stratagem\"", "A record of many tasty recipes involving fish."),
("Flask of Fire", "Full of lava! Maybe the locals drink this stuff?"),
("Ancient Stone", "Primordial rock from deep beneath the forest's surface, brought upwards by the shifting rocks.")
]
relics = OrderedDict([
("Glades", _glades),
("Grove", _grove),
("Grotto", _grotto),
("Blackroot", _blackroot),
("Swamp", _swamp),
("Ginso", _ginso),
("Valley", _valley),
("Misty", _misty),
("Forlorn", _forlorn),
("Sorrow", _sorrow),
("Horu", _horu)
]) | seedbuilder/relics.py | from collections import OrderedDict
_glades = [
("Unusually Sharp Spike", "Twice as deadly as the other spikes."),
("Withered Fruit", "Gazing at it evokes memories of happier times."),
("Fil's Bracelet", "A simple band made of tightly-woven plant fibers."),
("Redcap Mushroom", "Eating these is said to help you grow taller."),
("Fronkeysbane", "Fronkeys are mildly allergic to this small flower.")
]
_grove = [
("Unhatched Spider Egg", "Hopefully it stays unhatched."),
("Fallen Branch", "A small, faintly glowing branch of the Spirit Tree."),
("Seed Stash", "An innocent squirrel was saving these. What kind of devil are you?"),
("Sunpetal", "A beautiful flower native to the Grove area."),
("Tiny Spirit Tree", "This replica is incredibly detailed, with a piece of energy crystal in the center.")
]
_grotto = [
("Slick Stone", "So smooth and slippery, you can barely hang on to it."),
("Broken Keystones", "A pair of keystones, snapped in half"),
("Strange Carving", "A creature with snakes on its head, staring at a large plant"),
("#*Water Vein* #(Fake)", "It looks just like the real thing! Did Gumo make this?"),
("Cloth Mask", "A simple face covering. Belonged to the spirit Leru.")
]
_blackroot = [
("Sol's Defused Grenade", "Safe enough to use as a ball! ...right?"),
("Torn Friendship Bracelet", "A bond that was made would soon be dissolved."),
("Ike's Boots of Fleetness", "He moved swifter than the wind."),
("Naru's Chisel", "A skilled artisan could sculpt great works with this tool."),
("Glowing Mushroom", "Doubles as a light source and a tasty snack."),
]
_swamp = [
("Polluted Water Canteen", "Who would want to drink this?"),
("Gold-eyed Frog", "Insects stand no chance against its deft tongue."),
("Ilo's Training Weights", "Solid rock, nearly too heavy to carry."),
("Spooky Drawing", "Some kind of ghost frog, spitting through walls."),
("Rhino Fossil", "Both smaller and cuter than the modern specimen.")
]
_ginso = [
("Lightning-Scarred Branch", "As a mother to her child, the Ginso Tree protects the rest of the forest."),
("Reem's Lucky Coin", "Said to help you escape the notice of predators."),
("Gheemur Shell", "This tough carapace is covered in spikes and seems impervious to harm."),
("Hardy Tuber", "Seems to thrive in the moisture here."),
("Spirit Lamp", "Glows with a soft, warm light. The string it used to hang from is snapped off.")
]
_valley = [
("Treasure Map", "A map depicting a treasure found after a long swim."),
("White Raven Feather", "A bit too small to be used as a parachute."),
("Comfy Earmuffs", "Softens the sounds of screaming birds and frogs."),
("Strange Drawing", "A figure in blue walking through golden fields."),
("Abandoned Nest", "Looks like a small family of birds used to live here.")
]
_misty = [
("Atsu's Candle", "Does little good in these heavy mists."),
("Tatsu's Glasses", "Strange spiral patterns cover both eyes"),
("Mushroom Sample", "Still glowing: probably not safe to eat."),
("Angry Scribbles", "Left behind by a frustrated cartographer"),
("Sister's Lament", "A poem written by Tatsu, mourning her brother Atsu")
]
_forlorn = [
("Furtive Fritter", "A favorite snack of the Gumon."),
("Mathematical Reference", "Only used by the most cerebral forest denizens."),
("Crystal Lens", "Focuses energy into deadly beams of light."),
("Magnetic Alloy", "Used by the Gumon to construct floating platforms."),
("Complex Tool", "Looks like it might have had several different uses")
]
_sorrow = [
("Drained Light Vessel", "The light of the Spirit Tree once filled this orb."),
("Tattered Leaf", "Riddled with puncture marks."),
("Nir's Sketchbook", "Contains a beautiful drawing of Nibel from the top of Sorrow Pass."),
("Tumble Seed", "A small pod dropped by an unusual airborne plant."),
("Rock Sharpener", "Extremely worn down. Whoever owned this must have used it a lot.")
]
_horu = [
("Obsidian Fragment", "Chipped off of an ancient lava flow."),
("Ancient Sketch", "A drawing of what appears to be the Water Vein."),
("\"The Fish Stratagem\"", "A record of many tasty recipes involving fish."),
("Flask of Fire", "Full of lava! Maybe the locals drink this stuff?"),
("Ancient Stone", "Primordial rock from deep beneath the forest's surface, brought upwards by the shifting rocks.")
]
relics = OrderedDict([
("Glades", _glades),
("Grove", _grove),
("Grotto", _grotto),
("Blackroot", _blackroot),
("Swamp", _swamp),
("Ginso", _ginso),
("Valley", _valley),
("Misty", _misty),
("Forlorn", _forlorn),
("Sorrow", _sorrow),
("Horu", _horu)
]) | 0.465387 | 0.572753 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0014_orderitem_total_price'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=100)),
('apartment_address', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('zip', models.CharField(max_length=100)),
('default', models.BooleanField(default=False)),
('congo_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_charge_id', models.CharField(max_length=50)),
('amount', models.FloatField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('congo_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered_date', models.DateTimeField()),
('ordered', models.BooleanField(default=False)),
('being_delivered', models.BooleanField(default=False)),
('received', models.BooleanField(default=False)),
('refund_requested', models.BooleanField(default=False)),
('refund_granted', models.BooleanField(default=False)),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('items', models.ManyToManyField(to='core.OrderItem')),
('payment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Payment')),
('shipping_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Address')),
],
),
] | core/migrations/0015_address_order_payment.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0014_orderitem_total_price'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=100)),
('apartment_address', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('zip', models.CharField(max_length=100)),
('default', models.BooleanField(default=False)),
('congo_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_charge_id', models.CharField(max_length=50)),
('amount', models.FloatField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('congo_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered_date', models.DateTimeField()),
('ordered', models.BooleanField(default=False)),
('being_delivered', models.BooleanField(default=False)),
('received', models.BooleanField(default=False)),
('refund_requested', models.BooleanField(default=False)),
('refund_granted', models.BooleanField(default=False)),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('items', models.ManyToManyField(to='core.OrderItem')),
('payment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Payment')),
('shipping_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Address')),
],
),
] | 0.522446 | 0.12544 |
from .models import Subscription, SubscriptionPlan, SubscriptionPlanDescription, DiscountCode
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db import transaction
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from gopay.enums import PaymentStatus
from proso.django.request import get_language
from proso.django.response import render_json
def plans(request):
lang = get_language(request)
discount_code = get_discount_code(request)
if discount_code is not None:
discount_code.is_valid(request.user, throw_exception=True)
return render_json(
request,
[p.to_json(lang=lang, discount_code=discount_code) for p in SubscriptionPlan.objects.prepare_related().filter(active=True)],
template='subscription_json.html'
)
@staff_member_required
def revenue_per_month(request, currency):
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
import pandas
import seaborn as sns
except ImportError:
return HttpResponse('Can not import python packages for analysis.', status=503)
now = datetime.now()
ago = int(request.GET.get('ago', 0))
today_month = now.replace(hour=0, minute=0, second=0, microsecond=0, day=1) - relativedelta(months=ago)
subscriptions = Subscription.objects.prepare_related().filter(
payment__state=PaymentStatus.PAID,
expiration__gte=today_month
)
data = []
for sub in subscriptions:
expiration = sub.payment.updated + relativedelta(months=sub.plan_description.plan.months_validity)
record = {
'paid': sub.payment.updated,
'expiration': expiration,
'revenue': sub.payment.status['amount'] / 100,
'currency': sub.payment.status['currency'],
'months': sub.plan_description.plan.months_validity,
}
data.append(record)
data = pandas.DataFrame(data)
if len(data) == 0:
raise Http404("There are no active subscriptions.")
print(data)
data = data[data['currency'] == currency]
data['year_month'] = data['paid'].apply(lambda x: pandas.to_datetime(str(x)).strftime('%Y-%m'))
def _apply(group):
return pandas.DataFrame([{
'revenue': group['revenue'].sum(),
'count': len(group),
}])
result = data.groupby('year_month').apply(_apply).reset_index()
counts = []
for year_month in [today_month + relativedelta(months=i) for i in range(12 + ago)]:
year = year_month.year
month = year_month.month
year_month_data = data[
data['paid'].apply(lambda p: p.year < year or (p.month <= month and p.year == year)) &
data['expiration'].apply(lambda e: e.year > year or (e.month >= month and e.year == year))
]
counts.append({
'year_month': year_month.strftime('%Y-%m'),
'count_dist': len(year_month_data),
})
result = pandas.merge(pandas.DataFrame(counts), result, on='year_month', how='left').fillna(0)
print(result)
sns.set(style='white')
fig = plt.figure()
sns.barplot(x='year_month', y='revenue', data=result, color=sns.color_palette()[0], label='Revenue')
plt.legend()
plt.xticks(rotation=90)
plt.xlabel('Year-Month')
plt.ylabel('Revenue ({})'.format(currency))
plt.twinx()
sns.pointplot(result['year_month'], result['count'], linestyles='--', color='black', label='Number of subscriptions')
sns.pointplot(result['year_month'], result['count_dist'], linestyles=':', color='red', label='Number of subscriptions (dist)')
plt.ylim(0, 1.2 * max(result['count'].max(), result['count_dist'].max()))
plt.ylabel('Number of subscriptions')
plt.tight_layout()
plt.title('Total revenue: {}'.format(result['revenue'].sum()))
response = HttpResponse(content_type='image/png')
canvas = FigureCanvas(fig)
canvas.print_png(response)
return response
def discount_code_view(request, code):
return render_json(
request,
get_object_or_404(DiscountCode, code=DiscountCode.objects.prepare_code(code), active=True).to_json(),
template='subscription_json.html'
)
@login_required()
def my_referrals(request):
return render_json(
request,
[s.to_json(confidential=True) for s in request.user.referred_subscriptions.order_by('-created').filter(payment__state=PaymentStatus.PAID)],
template='subscription_json.html'
)
@login_required()
@transaction.atomic
def subscribe(request, description_id):
return_url = request.GET.get('return_url', request.META['HTTP_HOST'])
description = get_object_or_404(SubscriptionPlanDescription, id=description_id)
discount_code = get_discount_code(request)
subscription = Subscription.objects.subscribe(
request.user, description, discount_code,
get_referral_user(request), return_url
)
return render_json(request, subscription.to_json(), template='subscription_json.html', status=202)
@login_required()
def my_subscriptions(request):
return render_json(
request,
[s.to_json() for s in Subscription.objects.prepare_related().filter(user_id=request.user.id).order_by('-created')],
template='subscription_json.html'
)
def get_referral_user(request):
if 'referral_user' in request.GET:
return get_object_or_404(User, pk=int(request.GET['referral_user']))
if 'referral_username' in request.GET:
return get_object_or_404(User, username=request.GET['referral_username'])
if 'referral_email' in request.GET:
return get_object_or_404(User, email=request.GET['referral_email'])
return None
def get_discount_code(request):
return get_object_or_404(DiscountCode, code=DiscountCode.objects.prepare_code(request.GET.get('discount_code')), active=True) if 'discount_code' in request.GET else None | proso_subscription/views.py | from .models import Subscription, SubscriptionPlan, SubscriptionPlanDescription, DiscountCode
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db import transaction
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from gopay.enums import PaymentStatus
from proso.django.request import get_language
from proso.django.response import render_json
def plans(request):
lang = get_language(request)
discount_code = get_discount_code(request)
if discount_code is not None:
discount_code.is_valid(request.user, throw_exception=True)
return render_json(
request,
[p.to_json(lang=lang, discount_code=discount_code) for p in SubscriptionPlan.objects.prepare_related().filter(active=True)],
template='subscription_json.html'
)
@staff_member_required
def revenue_per_month(request, currency):
try:
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
import pandas
import seaborn as sns
except ImportError:
return HttpResponse('Can not import python packages for analysis.', status=503)
now = datetime.now()
ago = int(request.GET.get('ago', 0))
today_month = now.replace(hour=0, minute=0, second=0, microsecond=0, day=1) - relativedelta(months=ago)
subscriptions = Subscription.objects.prepare_related().filter(
payment__state=PaymentStatus.PAID,
expiration__gte=today_month
)
data = []
for sub in subscriptions:
expiration = sub.payment.updated + relativedelta(months=sub.plan_description.plan.months_validity)
record = {
'paid': sub.payment.updated,
'expiration': expiration,
'revenue': sub.payment.status['amount'] / 100,
'currency': sub.payment.status['currency'],
'months': sub.plan_description.plan.months_validity,
}
data.append(record)
data = pandas.DataFrame(data)
if len(data) == 0:
raise Http404("There are no active subscriptions.")
print(data)
data = data[data['currency'] == currency]
data['year_month'] = data['paid'].apply(lambda x: pandas.to_datetime(str(x)).strftime('%Y-%m'))
def _apply(group):
return pandas.DataFrame([{
'revenue': group['revenue'].sum(),
'count': len(group),
}])
result = data.groupby('year_month').apply(_apply).reset_index()
counts = []
for year_month in [today_month + relativedelta(months=i) for i in range(12 + ago)]:
year = year_month.year
month = year_month.month
year_month_data = data[
data['paid'].apply(lambda p: p.year < year or (p.month <= month and p.year == year)) &
data['expiration'].apply(lambda e: e.year > year or (e.month >= month and e.year == year))
]
counts.append({
'year_month': year_month.strftime('%Y-%m'),
'count_dist': len(year_month_data),
})
result = pandas.merge(pandas.DataFrame(counts), result, on='year_month', how='left').fillna(0)
print(result)
sns.set(style='white')
fig = plt.figure()
sns.barplot(x='year_month', y='revenue', data=result, color=sns.color_palette()[0], label='Revenue')
plt.legend()
plt.xticks(rotation=90)
plt.xlabel('Year-Month')
plt.ylabel('Revenue ({})'.format(currency))
plt.twinx()
sns.pointplot(result['year_month'], result['count'], linestyles='--', color='black', label='Number of subscriptions')
sns.pointplot(result['year_month'], result['count_dist'], linestyles=':', color='red', label='Number of subscriptions (dist)')
plt.ylim(0, 1.2 * max(result['count'].max(), result['count_dist'].max()))
plt.ylabel('Number of subscriptions')
plt.tight_layout()
plt.title('Total revenue: {}'.format(result['revenue'].sum()))
response = HttpResponse(content_type='image/png')
canvas = FigureCanvas(fig)
canvas.print_png(response)
return response
def discount_code_view(request, code):
return render_json(
request,
get_object_or_404(DiscountCode, code=DiscountCode.objects.prepare_code(code), active=True).to_json(),
template='subscription_json.html'
)
@login_required()
def my_referrals(request):
return render_json(
request,
[s.to_json(confidential=True) for s in request.user.referred_subscriptions.order_by('-created').filter(payment__state=PaymentStatus.PAID)],
template='subscription_json.html'
)
@login_required()
@transaction.atomic
def subscribe(request, description_id):
return_url = request.GET.get('return_url', request.META['HTTP_HOST'])
description = get_object_or_404(SubscriptionPlanDescription, id=description_id)
discount_code = get_discount_code(request)
subscription = Subscription.objects.subscribe(
request.user, description, discount_code,
get_referral_user(request), return_url
)
return render_json(request, subscription.to_json(), template='subscription_json.html', status=202)
@login_required()
def my_subscriptions(request):
return render_json(
request,
[s.to_json() for s in Subscription.objects.prepare_related().filter(user_id=request.user.id).order_by('-created')],
template='subscription_json.html'
)
def get_referral_user(request):
if 'referral_user' in request.GET:
return get_object_or_404(User, pk=int(request.GET['referral_user']))
if 'referral_username' in request.GET:
return get_object_or_404(User, username=request.GET['referral_username'])
if 'referral_email' in request.GET:
return get_object_or_404(User, email=request.GET['referral_email'])
return None
def get_discount_code(request):
return get_object_or_404(DiscountCode, code=DiscountCode.objects.prepare_code(request.GET.get('discount_code')), active=True) if 'discount_code' in request.GET else None | 0.532668 | 0.172834 |
import boto3
import botocore
import json
import os
running_locally = True
if os.getenv("RUN_LOCALLY") == "false":
running_locally = False
if running_locally:
lambda_client = boto3.client('lambda',
region_name="us-east-1",
endpoint_url="http://127.0.0.1:3001",
use_ssl=False,
verify=False,
config=botocore.client.Config(
signature_version=botocore.UNSIGNED,
read_timeout=300,
retries={'max_attempts': 0},
)
)
else:
lambda_client = boto3.client('lambda')
ou_id = ''
child_id = ''
account_id = ''
test_account_name = 'TestAccount'
if 'TEST_ACCOUNT_NAME' in os.environ:
test_account_name = os.environ['TEST_ACCOUNT_NAME']
test_account_email = ''
if 'TEST_ACCOUNT_EMAIL' in os.environ:
test_account_email = os.environ['TEST_ACCOUNT_EMAIL']
else:
raise Exception('TEST_ACCOUNT_EMAIL not set')
test_account_original_ou_id = ''
if 'TEST_ACCOUNT_ORIGINAL_OU' in os.environ:
test_account_original_ou_id = os.environ['TEST_ACCOUNT_ORIGINAL_OU']
else:
raise Exception('TEST_ACCOUNT_ORIGINAL_OU not set')
if 'OU_LAMBDA_FUNCTION_NAME' in os.environ:
print('OU_LAMBDA_FUNCTION_NAME: ' + os.environ['OU_LAMBDA_FUNCTION_NAME'])
else:
raise Exception('OU_LAMBDA_FUNCTION_NAME not set')
if 'ACCOUNT_LAMBDA_FUNCTION_NAME' in os.environ:
print('ACCOUNT_LAMBDA_FUNCTION_NAME: ' + os.environ['ACCOUNT_LAMBDA_FUNCTION_NAME'])
else:
raise Exception('ACCOUNT_LAMBDA_FUNCTION_NAME not set')
def get_root():
organizations = boto3.client('organizations')
response = organizations.list_roots()
return response['Roots'][0]['Id']
parent_id = get_root()
# Updates the payload loaded from the events folder with the ou id created during the tests and the root from the current account
def update_ou_payload(payload, ou, parent_id, update_all_parents=True):
payload_str = json.load(payload)
if 'PhysicalResourceId' in payload_str:
payload_str['PhysicalResourceId'] = ou
payload_str['ResourceProperties']['Parent'] = parent_id
if 'OldResourceProperties' in payload_str and update_all_parents:
payload_str['OldResourceProperties']['Parent'] = parent_id
payload_bytes_arr = bytes(json.dumps(payload_str), encoding="utf8")
print('PAYLOAD: ' + json.dumps(payload_str))
return payload_bytes_arr
def update_account_payload(payload, account_id, account_name, account_email, ou_id, old_ou_id, update_all_props=False):
payload_str = json.load(payload)
if 'PhysicalResourceId' in payload_str:
payload_str['PhysicalResourceId'] = account_id
payload_str['ResourceProperties']['Email'] = account_email
payload_str['ResourceProperties']['Name'] = account_name
if update_all_props:
payload_str['OldResourceProperties']['Name'] = account_name
payload_str['OldResourceProperties']['Email'] = account_email
if 'OldResourceProperties' in payload_str:
payload_str['OldResourceProperties']['Parent'] = old_ou_id
payload_str['ResourceProperties']['Parent'] = ou_id
payload_bytes_arr = bytes(json.dumps(payload_str), encoding="utf8")
print('PAYLOAD: ' + json.dumps(payload_str))
return payload_bytes_arr
def test_create_with_import_should_create_or_import_ou():
global ou_id
f = open('events/ou/create-with-import.json', 'r')
global parent_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, '', parent_id)
)
response_json = json.loads(response["Payload"].read())
ou_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Created new OU: TestOULib' or 'Imported existing OU with same properties: ou-' in response_json['Data']['Message']
def test_create_without_import_should_fail_to_create_ou_with_exception():
f = open('events/ou/create-no-import.json', 'r')
global parent_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, '', parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'OU already exists: TestOULib'
def test_delete_should_delete_ou():
f = open('events/ou/delete.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Deleted OU: TestOULib'
def test_delete_again_should_notify_ou_already_deleted():
f = open('events/ou/delete.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'OU has already been deleted: TestOULib'
def test_update_ou_when_deleted_should_fail_with_exception():
f = open('events/ou/update.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'The OU you are trying to update, TestOULib, does not exist.'
def test_update_ou_with_recreate_should_create_when_old_does_not_exist():
f = open('events/ou/update-with-recreate.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
ou_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Created new OU: TestOULib'
def test_creating_a_child_ou_should_create_ou():
f = open('events/ou/create-child.json', 'r')
global child_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, '', ou_id)
)
response_json = json.loads(response["Payload"].read())
child_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Created new OU: TestOULibChild'
def test_deleting_a_parent_ou_with_child_ou_should_fail_with_exception():
f = open('events/ou/delete.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'OU has children and cannot be deleted: TestOULib'
def test_changing_an_ou_parent_should_fail_with_exception():
f = open('events/ou/update-parent.json', 'r')
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=f
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'OU parent changed. Organizations does not support moving an OU'
def test_creating_or_importing_account_should_fail_if_existing_is_in_another_ou_and_move_disabled():
f = open('events/account/create-with-import.json', 'r')
global child_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, '', test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert 'Account already exists, but in a different OU, will NOT import' in response_json['errorMessage']
def test_creating_or_importing_account_should_move_existing_during_import_with_move_enabled_or_create_new():
f = open('events/account/create-with-import-and-move.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, '', test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
account_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert 'Account created with id' in response_json['Data']['Message'] or 'Account moved from' in response_json['Data']['Message']
def test_create_account_with_no_import_should_fail_with_exception():
f = open('events/account/create-no-import.json', 'r')
global child_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, '', test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert 'Account already exists, will NOT import' in response_json['errorMessage']
def test_changing_account_email_should_fail_with_exception():
f = open('events/account/change-email.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, 'TestAccount', '<EMAIL>', child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'Cannot update account email. You must update the account email manually.'
def test_changing_account_name_should_fail_with_exception():
f = open('events/account/change-name.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, 'TestAccount', '<EMAIL>', child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'Cannot update account name. You must update the account name manually.'
def test_changing_account_email_should_succeed_when_its_already_changed():
f = open('events/account/change-email.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['EmailChange'] == 'Email has already been updated. No action required.'
def test_changing_account_name_should_succeed_when_its_already_changed():
f = open('events/account/change-name.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['NameChange'] == 'Account name has already been updated. No action required.'
def test_moving_account_when_already_moved_should_succeed_with_message():
f = open('events/account/move-with-disable.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, 'r-a1b2', update_all_props=True)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['MoveInfo'] == 'Account is already in the expected OU. No action required.'
def test_moving_account_with_disable_should_fail_with_exception():
f = open('events/account/move-with-disable.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, 'r-a1b2', 'r-a1b3', update_all_props=True)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'Account needs to move OUs, but moving OUs is not allowed for this account.'
def test_moving_account_with_allow_should_succeed_with_message():
f = open('events/account/move-with-allow.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, test_account_original_ou_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['MoveInfo'] == 'Account moved from {} to {}'.format(child_id, test_account_original_ou_id)
def test_delete_account_with_disable_should_raise_exception():
f = open('events/account/delete-with-disable.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'AWS does not allow deleting of accounts programmatically and removing this account as a resource is disabled by DisableDelete.'
def test_delete_account_should_return_response_about_cant_delete_but_will_remove_resource():
f = open('events/account/delete.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'AWS does not allow deleting of accounts programmatically, but this account will be removed as a resource: {}'.format(account_id)
def test_cleanup_child():
f = open('events/ou/delete-child.json', 'r')
global ou_id
global child_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, child_id, ou_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Deleted OU: TestOULibChild'
def test_cleanup_ou():
f = open('events/ou/delete.json', 'r')
global ou_id
global parent_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Deleted OU: TestOULib' | handler_tests/ou_and_account_tests.py | import boto3
import botocore
import json
import os
running_locally = True
if os.getenv("RUN_LOCALLY") == "false":
running_locally = False
if running_locally:
lambda_client = boto3.client('lambda',
region_name="us-east-1",
endpoint_url="http://127.0.0.1:3001",
use_ssl=False,
verify=False,
config=botocore.client.Config(
signature_version=botocore.UNSIGNED,
read_timeout=300,
retries={'max_attempts': 0},
)
)
else:
lambda_client = boto3.client('lambda')
ou_id = ''
child_id = ''
account_id = ''
test_account_name = 'TestAccount'
if 'TEST_ACCOUNT_NAME' in os.environ:
test_account_name = os.environ['TEST_ACCOUNT_NAME']
test_account_email = ''
if 'TEST_ACCOUNT_EMAIL' in os.environ:
test_account_email = os.environ['TEST_ACCOUNT_EMAIL']
else:
raise Exception('TEST_ACCOUNT_EMAIL not set')
test_account_original_ou_id = ''
if 'TEST_ACCOUNT_ORIGINAL_OU' in os.environ:
test_account_original_ou_id = os.environ['TEST_ACCOUNT_ORIGINAL_OU']
else:
raise Exception('TEST_ACCOUNT_ORIGINAL_OU not set')
if 'OU_LAMBDA_FUNCTION_NAME' in os.environ:
print('OU_LAMBDA_FUNCTION_NAME: ' + os.environ['OU_LAMBDA_FUNCTION_NAME'])
else:
raise Exception('OU_LAMBDA_FUNCTION_NAME not set')
if 'ACCOUNT_LAMBDA_FUNCTION_NAME' in os.environ:
print('ACCOUNT_LAMBDA_FUNCTION_NAME: ' + os.environ['ACCOUNT_LAMBDA_FUNCTION_NAME'])
else:
raise Exception('ACCOUNT_LAMBDA_FUNCTION_NAME not set')
def get_root():
organizations = boto3.client('organizations')
response = organizations.list_roots()
return response['Roots'][0]['Id']
parent_id = get_root()
# Updates the payload loaded from the events folder with the ou id created during the tests and the root from the current account
def update_ou_payload(payload, ou, parent_id, update_all_parents=True):
payload_str = json.load(payload)
if 'PhysicalResourceId' in payload_str:
payload_str['PhysicalResourceId'] = ou
payload_str['ResourceProperties']['Parent'] = parent_id
if 'OldResourceProperties' in payload_str and update_all_parents:
payload_str['OldResourceProperties']['Parent'] = parent_id
payload_bytes_arr = bytes(json.dumps(payload_str), encoding="utf8")
print('PAYLOAD: ' + json.dumps(payload_str))
return payload_bytes_arr
def update_account_payload(payload, account_id, account_name, account_email, ou_id, old_ou_id, update_all_props=False):
payload_str = json.load(payload)
if 'PhysicalResourceId' in payload_str:
payload_str['PhysicalResourceId'] = account_id
payload_str['ResourceProperties']['Email'] = account_email
payload_str['ResourceProperties']['Name'] = account_name
if update_all_props:
payload_str['OldResourceProperties']['Name'] = account_name
payload_str['OldResourceProperties']['Email'] = account_email
if 'OldResourceProperties' in payload_str:
payload_str['OldResourceProperties']['Parent'] = old_ou_id
payload_str['ResourceProperties']['Parent'] = ou_id
payload_bytes_arr = bytes(json.dumps(payload_str), encoding="utf8")
print('PAYLOAD: ' + json.dumps(payload_str))
return payload_bytes_arr
def test_create_with_import_should_create_or_import_ou():
global ou_id
f = open('events/ou/create-with-import.json', 'r')
global parent_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, '', parent_id)
)
response_json = json.loads(response["Payload"].read())
ou_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Created new OU: TestOULib' or 'Imported existing OU with same properties: ou-' in response_json['Data']['Message']
def test_create_without_import_should_fail_to_create_ou_with_exception():
f = open('events/ou/create-no-import.json', 'r')
global parent_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, '', parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'OU already exists: TestOULib'
def test_delete_should_delete_ou():
f = open('events/ou/delete.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Deleted OU: TestOULib'
def test_delete_again_should_notify_ou_already_deleted():
f = open('events/ou/delete.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'OU has already been deleted: TestOULib'
def test_update_ou_when_deleted_should_fail_with_exception():
f = open('events/ou/update.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'The OU you are trying to update, TestOULib, does not exist.'
def test_update_ou_with_recreate_should_create_when_old_does_not_exist():
f = open('events/ou/update-with-recreate.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
ou_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Created new OU: TestOULib'
def test_creating_a_child_ou_should_create_ou():
f = open('events/ou/create-child.json', 'r')
global child_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, '', ou_id)
)
response_json = json.loads(response["Payload"].read())
child_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Created new OU: TestOULibChild'
def test_deleting_a_parent_ou_with_child_ou_should_fail_with_exception():
f = open('events/ou/delete.json', 'r')
global parent_id
global ou_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'OU has children and cannot be deleted: TestOULib'
def test_changing_an_ou_parent_should_fail_with_exception():
f = open('events/ou/update-parent.json', 'r')
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=f
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'OU parent changed. Organizations does not support moving an OU'
def test_creating_or_importing_account_should_fail_if_existing_is_in_another_ou_and_move_disabled():
f = open('events/account/create-with-import.json', 'r')
global child_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, '', test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert 'Account already exists, but in a different OU, will NOT import' in response_json['errorMessage']
def test_creating_or_importing_account_should_move_existing_during_import_with_move_enabled_or_create_new():
f = open('events/account/create-with-import-and-move.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, '', test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
account_id = response_json['PhysicalResourceId']
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert 'Account created with id' in response_json['Data']['Message'] or 'Account moved from' in response_json['Data']['Message']
def test_create_account_with_no_import_should_fail_with_exception():
f = open('events/account/create-no-import.json', 'r')
global child_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, '', test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert 'Account already exists, will NOT import' in response_json['errorMessage']
def test_changing_account_email_should_fail_with_exception():
f = open('events/account/change-email.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, 'TestAccount', '<EMAIL>', child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'Cannot update account email. You must update the account email manually.'
def test_changing_account_name_should_fail_with_exception():
f = open('events/account/change-name.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, 'TestAccount', '<EMAIL>', child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'Cannot update account name. You must update the account name manually.'
def test_changing_account_email_should_succeed_when_its_already_changed():
f = open('events/account/change-email.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['EmailChange'] == 'Email has already been updated. No action required.'
def test_changing_account_name_should_succeed_when_its_already_changed():
f = open('events/account/change-name.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['NameChange'] == 'Account name has already been updated. No action required.'
def test_moving_account_when_already_moved_should_succeed_with_message():
f = open('events/account/move-with-disable.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, 'r-a1b2', update_all_props=True)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['MoveInfo'] == 'Account is already in the expected OU. No action required.'
def test_moving_account_with_disable_should_fail_with_exception():
f = open('events/account/move-with-disable.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, 'r-a1b2', 'r-a1b3', update_all_props=True)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'Account needs to move OUs, but moving OUs is not allowed for this account.'
def test_moving_account_with_allow_should_succeed_with_message():
f = open('events/account/move-with-allow.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, test_account_original_ou_id, child_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['MoveInfo'] == 'Account moved from {} to {}'.format(child_id, test_account_original_ou_id)
def test_delete_account_with_disable_should_raise_exception():
f = open('events/account/delete-with-disable.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['errorMessage'] == 'AWS does not allow deleting of accounts programmatically and removing this account as a resource is disabled by DisableDelete.'
def test_delete_account_should_return_response_about_cant_delete_but_will_remove_resource():
f = open('events/account/delete.json', 'r')
global child_id
global account_id
response = lambda_client.invoke(
FunctionName=os.getenv("ACCOUNT_LAMBDA_FUNCTION_NAME"),
Payload=update_account_payload(f, account_id, test_account_name, test_account_email, child_id, test_account_original_ou_id)
)
response_json = json.loads(response["Payload"].read())
print(response_json)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'AWS does not allow deleting of accounts programmatically, but this account will be removed as a resource: {}'.format(account_id)
def test_cleanup_child():
f = open('events/ou/delete-child.json', 'r')
global ou_id
global child_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, child_id, ou_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Deleted OU: TestOULibChild'
def test_cleanup_ou():
f = open('events/ou/delete.json', 'r')
global ou_id
global parent_id
response = lambda_client.invoke(
FunctionName=os.getenv("OU_LAMBDA_FUNCTION_NAME"),
Payload=update_ou_payload(f, ou_id, parent_id)
)
response_json = json.loads(response["Payload"].read())
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert response_json['Data']['Message'] == 'Deleted OU: TestOULib' | 0.171373 | 0.087252 |
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from ._chartobject import ChartObject
from ..objects import ColumnDataSource, Range1d
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class Scatter(ChartObject):
"""This is the Scatter class and it is in charge of plotting
Scatter charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (markers) taking the
references from the source.
Examples:
from collections import OrderedDict
from bokeh.charts import Scatter
from bokeh.sampledata.iris import flowers
setosa = flowers[(flowers.species == "setosa")][["petal_length", "petal_width"]]
versicolor = flowers[(flowers.species == "versicolor")][["petal_length", "petal_width"]]
virginica = flowers[(flowers.species == "virginica")][["petal_length", "petal_width"]]
xyvalues = OrderedDict([("setosa", setosa.values),
("versicolor", versicolor.values),
("virginica", virginica.values)])
scatter = Scatter(xyvalues)
scatter.title("iris dataset, dict_input").xlabel("petal_length").ylabel("petal_width")\
.legend("top_left").width(600).height(400).notebook().show()
"""
def __init__(self, pairs,
title=None, xlabel=None, ylabel=None, legend=False,
xscale="linear", yscale="linear", width=800, height=600,
tools=True, filename=False, server=False, notebook=False):
"""
Args:
pairs (dict): a dict containing the data with names as a key
and the data as a value.
title (str, optional): the title of your plot. Defaults to None.
xlabel (str, optional): the x-axis label of your plot.
Defaults to None.
ylabel (str, optional): the y-axis label of your plot.
Defaults to None.
legend (str, optional): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Defaults to None.
xscale (str, optional): the x-axis type scale of your plot. It can be
``linear``, ``datetime`` or ``categorical``.
Defaults to ``linear``.
yscale (str, optional): the y-axis type scale of your plot. It can be
``linear``, ``datetime`` or ``categorical``.
Defaults to ``linear``.
width (int, optional): the width of your plot in pixels.
Defaults to 800.
height (int, optional): the height of you plot in pixels.
Defaults to 600.
tools (bool, optional): to enable or disable the tools in your plot.
Defaults to True
filename (str or bool, optional): the name of the file where your plot.
will be written. If you pass True to this argument, it will use
``untitled`` as a filename.
Defaults to False.
server (str or bool, optional): the name of your plot in the server.
If you pass True to this argument, it will use ``untitled``
as the name in the server.
Defaults to False.
notebook (bool, optional):if you want to output (or not) your plot into the
IPython notebook.
Defaults to False.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
xdr (obj): x-associated datarange object for you plot,
initialized as a dummy None.
ydr (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
self.pairs = pairs
self.source = None
self.xdr = None
self.ydr = None
self.groups = []
self.data = dict()
self.attr = []
super(Scatter, self).__init__(title, xlabel, ylabel, legend,
xscale, yscale, width, height,
tools, filename, server, notebook)
def check_attr(self):
"""Check if any of the chained method were used.
If they were not used, it assign the init parameters content by default.
"""
super(Scatter, self).check_attr()
def get_data(self, **pairs):
"""Take the x/y data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the marker glyph inside the ``draw`` method.
Args:
pairs (dict): a dict containing the data with names as a key
and the data as a value.
"""
self.data = dict()
# assuming value is an ordered dict
self.pairs = pairs
# list to save all the attributes we are going to create
self.attr = []
# list to save all the groups available in the incomming input
self.groups.extend(self.pairs.keys())
# Grouping
for i, val in enumerate(self.pairs.keys()):
xy = self.pairs[val]
self._set_and_get("x_", val, xy[:, 0])
self._set_and_get("y_", val, xy[:, 1])
def get_source(self):
"Push the Scatter data into the ColumnDataSource and calculate the proper ranges."
self.source = ColumnDataSource(self.data)
x_names, y_names = self.attr[::2], self.attr[1::2]
endx = max(max(self.data[i]) for i in x_names)
startx = min(min(self.data[i]) for i in x_names)
self.xdr = Range1d(start=startx - 0.1 * (endx - startx), end=endx + 0.1 * (endx - startx))
endy = max(max(self.data[i]) for i in y_names)
starty = min(min(self.data[i]) for i in y_names)
self.ydr = Range1d(start=starty - 0.1 * (endy - starty), end=endy + 0.1 * (endy - starty))
def draw(self):
"""Use the marker glyphs to display the points.
Takes reference points from data loaded at the ColumnDataSurce.
"""
self.duplet = list(self._chunker(self.attr, 2))
colors = self._set_colors(self.duplet)
for i, duplet in enumerate(self.duplet, start=1):
self.chart.make_scatter(self.source, duplet[0], duplet[1], i, colors[i - 1])
def show(self):
"""Main Scatter show method.
It essentially checks for chained methods, creates the chart,
pass data into the plot object, draws the glyphs according
to the data and shows the chart in the selected output.
.. note:: the show method can not be chained. It has to be called
at the end of the chain.
"""
# asumming we get an hierchiral pandas object
if isinstance(self.pairs, pd.DataFrame):
self.labels = self.pairs.columns.levels[1].values
from collections import OrderedDict
pdict = OrderedDict()
for i in self.pairs.columns.levels[0].values:
pdict[i] = self.pairs[i].dropna().values
self.pairs = pdict
# asumming we get an groupby object
if isinstance(self.pairs, pd.core.groupby.DataFrameGroupBy):
from collections import OrderedDict
pdict = OrderedDict()
for i in self.pairs.groups.keys():
self.labels = self.pairs.get_group(i).columns
xname = self.pairs.get_group(i).columns[0]
yname = self.pairs.get_group(i).columns[1]
x = getattr(self.pairs.get_group(i), xname)
y = getattr(self.pairs.get_group(i), yname)
pdict[i] = np.array([x.values, y.values]).T
self.pairs = pdict
# we need to check the chained method attr
self.check_attr()
if self._xlabel is None:
self._xlabel = self.labels[0]
if self._ylabel is None:
self._ylabel = self.labels[1]
# we create the chart object
self.create_chart()
# we start the plot (adds axis, grids and tools)
self.start_plot()
# we get the data from the incoming input
self.get_data(**self.pairs)
# we filled the source and ranges with the calculated data
self.get_source()
# we dynamically inject the source and ranges into the plot
self.add_data_plot(self.xdr, self.ydr)
# we add the glyphs into the plot
self.draw()
# we pass info to build the legend
self.end_plot(self.groups)
# and finally we show it
self.show_chart()
# Some helper methods
def _set_and_get(self, prefix, val, content):
"""Set a new attr and then get it to fill the self.data dict.
Keep track of the attributes created.
Args:
prefix (str): prefix of the new attribute
val (string): name of the new attribute
content (obj): content of the new attribute
"""
setattr(self, prefix + val, content)
self.data[prefix + val] = getattr(self, prefix + val)
self.attr.append(prefix + val) | bokeh/charts/scatter.py |
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from ._chartobject import ChartObject
from ..objects import ColumnDataSource, Range1d
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class Scatter(ChartObject):
"""This is the Scatter class and it is in charge of plotting
Scatter charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (markers) taking the
references from the source.
Examples:
from collections import OrderedDict
from bokeh.charts import Scatter
from bokeh.sampledata.iris import flowers
setosa = flowers[(flowers.species == "setosa")][["petal_length", "petal_width"]]
versicolor = flowers[(flowers.species == "versicolor")][["petal_length", "petal_width"]]
virginica = flowers[(flowers.species == "virginica")][["petal_length", "petal_width"]]
xyvalues = OrderedDict([("setosa", setosa.values),
("versicolor", versicolor.values),
("virginica", virginica.values)])
scatter = Scatter(xyvalues)
scatter.title("iris dataset, dict_input").xlabel("petal_length").ylabel("petal_width")\
.legend("top_left").width(600).height(400).notebook().show()
"""
def __init__(self, pairs,
title=None, xlabel=None, ylabel=None, legend=False,
xscale="linear", yscale="linear", width=800, height=600,
tools=True, filename=False, server=False, notebook=False):
"""
Args:
pairs (dict): a dict containing the data with names as a key
and the data as a value.
title (str, optional): the title of your plot. Defaults to None.
xlabel (str, optional): the x-axis label of your plot.
Defaults to None.
ylabel (str, optional): the y-axis label of your plot.
Defaults to None.
legend (str, optional): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Defaults to None.
xscale (str, optional): the x-axis type scale of your plot. It can be
``linear``, ``datetime`` or ``categorical``.
Defaults to ``linear``.
yscale (str, optional): the y-axis type scale of your plot. It can be
``linear``, ``datetime`` or ``categorical``.
Defaults to ``linear``.
width (int, optional): the width of your plot in pixels.
Defaults to 800.
height (int, optional): the height of you plot in pixels.
Defaults to 600.
tools (bool, optional): to enable or disable the tools in your plot.
Defaults to True
filename (str or bool, optional): the name of the file where your plot.
will be written. If you pass True to this argument, it will use
``untitled`` as a filename.
Defaults to False.
server (str or bool, optional): the name of your plot in the server.
If you pass True to this argument, it will use ``untitled``
as the name in the server.
Defaults to False.
notebook (bool, optional):if you want to output (or not) your plot into the
IPython notebook.
Defaults to False.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
xdr (obj): x-associated datarange object for you plot,
initialized as a dummy None.
ydr (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
self.pairs = pairs
self.source = None
self.xdr = None
self.ydr = None
self.groups = []
self.data = dict()
self.attr = []
super(Scatter, self).__init__(title, xlabel, ylabel, legend,
xscale, yscale, width, height,
tools, filename, server, notebook)
def check_attr(self):
"""Check if any of the chained method were used.
If they were not used, it assign the init parameters content by default.
"""
super(Scatter, self).check_attr()
def get_data(self, **pairs):
"""Take the x/y data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the marker glyph inside the ``draw`` method.
Args:
pairs (dict): a dict containing the data with names as a key
and the data as a value.
"""
self.data = dict()
# assuming value is an ordered dict
self.pairs = pairs
# list to save all the attributes we are going to create
self.attr = []
# list to save all the groups available in the incomming input
self.groups.extend(self.pairs.keys())
# Grouping
for i, val in enumerate(self.pairs.keys()):
xy = self.pairs[val]
self._set_and_get("x_", val, xy[:, 0])
self._set_and_get("y_", val, xy[:, 1])
def get_source(self):
"Push the Scatter data into the ColumnDataSource and calculate the proper ranges."
self.source = ColumnDataSource(self.data)
x_names, y_names = self.attr[::2], self.attr[1::2]
endx = max(max(self.data[i]) for i in x_names)
startx = min(min(self.data[i]) for i in x_names)
self.xdr = Range1d(start=startx - 0.1 * (endx - startx), end=endx + 0.1 * (endx - startx))
endy = max(max(self.data[i]) for i in y_names)
starty = min(min(self.data[i]) for i in y_names)
self.ydr = Range1d(start=starty - 0.1 * (endy - starty), end=endy + 0.1 * (endy - starty))
def draw(self):
"""Use the marker glyphs to display the points.
Takes reference points from data loaded at the ColumnDataSurce.
"""
self.duplet = list(self._chunker(self.attr, 2))
colors = self._set_colors(self.duplet)
for i, duplet in enumerate(self.duplet, start=1):
self.chart.make_scatter(self.source, duplet[0], duplet[1], i, colors[i - 1])
def show(self):
"""Main Scatter show method.
It essentially checks for chained methods, creates the chart,
pass data into the plot object, draws the glyphs according
to the data and shows the chart in the selected output.
.. note:: the show method can not be chained. It has to be called
at the end of the chain.
"""
# asumming we get an hierchiral pandas object
if isinstance(self.pairs, pd.DataFrame):
self.labels = self.pairs.columns.levels[1].values
from collections import OrderedDict
pdict = OrderedDict()
for i in self.pairs.columns.levels[0].values:
pdict[i] = self.pairs[i].dropna().values
self.pairs = pdict
# asumming we get an groupby object
if isinstance(self.pairs, pd.core.groupby.DataFrameGroupBy):
from collections import OrderedDict
pdict = OrderedDict()
for i in self.pairs.groups.keys():
self.labels = self.pairs.get_group(i).columns
xname = self.pairs.get_group(i).columns[0]
yname = self.pairs.get_group(i).columns[1]
x = getattr(self.pairs.get_group(i), xname)
y = getattr(self.pairs.get_group(i), yname)
pdict[i] = np.array([x.values, y.values]).T
self.pairs = pdict
# we need to check the chained method attr
self.check_attr()
if self._xlabel is None:
self._xlabel = self.labels[0]
if self._ylabel is None:
self._ylabel = self.labels[1]
# we create the chart object
self.create_chart()
# we start the plot (adds axis, grids and tools)
self.start_plot()
# we get the data from the incoming input
self.get_data(**self.pairs)
# we filled the source and ranges with the calculated data
self.get_source()
# we dynamically inject the source and ranges into the plot
self.add_data_plot(self.xdr, self.ydr)
# we add the glyphs into the plot
self.draw()
# we pass info to build the legend
self.end_plot(self.groups)
# and finally we show it
self.show_chart()
# Some helper methods
def _set_and_get(self, prefix, val, content):
"""Set a new attr and then get it to fill the self.data dict.
Keep track of the attributes created.
Args:
prefix (str): prefix of the new attribute
val (string): name of the new attribute
content (obj): content of the new attribute
"""
setattr(self, prefix + val, content)
self.data[prefix + val] = getattr(self, prefix + val)
self.attr.append(prefix + val) | 0.83957 | 0.51501 |
import os
import numpy as np
import pandas as pd
from sklearn.externals import joblib
import AraVib
from AraVib_modules.AraVibS_def import growth_trait_selection, freq_file_selection
from AraVib_modules.AraVibS_def import model_selection, LR_difference
def main():
print("************ Step 1: Please select growth-trait data ************")
local_path = os.getcwd()
summery_path = local_path + "/mov/summary/"
growth_trait_path = growth_trait_selection()
df_gt = pd.read_csv(growth_trait_path)
print("************ Step 2: Please select your model ************")
model_fname,local_param,scale_param = model_selection()
LR_model = joblib.load(model_fname)
print("******** Step 3: Do you want to analyze ωd by AraVib? ********")
loop = True
while loop:
print("If you want, enter y.")
print("Or if you use the existing ωd-data file, enter n.")
aravib_raw = input("(y/n):")
aravib_ = str(aravib_raw)
if aravib_.lower() == "y":
aravib = True
loop = False
elif aravib_.lower() == "n":
aravib = False
loop = False
else:
pass
loop = True
while loop:
if aravib:
freq_path, freq_fname = AraVib.main()
else:
freq_path, freq_fname = freq_file_selection()
try:
df_freq = pd.read_csv(freq_path)
freq_data = df_freq["Freq_Hz"]
loop = False
except:
pass
if len(freq_data) == len(df_gt):
dif, p_value = LR_difference(df_gt,freq_data,LR_model,model_fname,local_param,scale_param)
clf = np.array(p_value) < 0.01
df_freq2 = df_freq.copy()
df_freq2["H"] = df_gt["H"]
df_freq2["FW"] = df_gt["FW"]
df_freq2["Dif"] = dif
df_freq2["p_value"] = p_value
df_freq2["Mutant?(p_value<0.01)"] = clf
result_path = "{}/mov/AraVibS_result/{}+Identify.csv".format(local_path,freq_fname)
print("*"*50)
print("Mutant?(p_value<0.01)")
print(df_freq2["Mutant?(p_value<0.01)"])
print("*"*50)
print("Details")
print(df_freq2)
print("")
print("File_path:{}".format(result_path))
df_freq2.to_csv(result_path)
else:
print("Error: Growth-trait data is not corresponding to freq_data")
if __name__ == '__main__':
main() | AraVibS.py | import os
import numpy as np
import pandas as pd
from sklearn.externals import joblib
import AraVib
from AraVib_modules.AraVibS_def import growth_trait_selection, freq_file_selection
from AraVib_modules.AraVibS_def import model_selection, LR_difference
def main():
print("************ Step 1: Please select growth-trait data ************")
local_path = os.getcwd()
summery_path = local_path + "/mov/summary/"
growth_trait_path = growth_trait_selection()
df_gt = pd.read_csv(growth_trait_path)
print("************ Step 2: Please select your model ************")
model_fname,local_param,scale_param = model_selection()
LR_model = joblib.load(model_fname)
print("******** Step 3: Do you want to analyze ωd by AraVib? ********")
loop = True
while loop:
print("If you want, enter y.")
print("Or if you use the existing ωd-data file, enter n.")
aravib_raw = input("(y/n):")
aravib_ = str(aravib_raw)
if aravib_.lower() == "y":
aravib = True
loop = False
elif aravib_.lower() == "n":
aravib = False
loop = False
else:
pass
loop = True
while loop:
if aravib:
freq_path, freq_fname = AraVib.main()
else:
freq_path, freq_fname = freq_file_selection()
try:
df_freq = pd.read_csv(freq_path)
freq_data = df_freq["Freq_Hz"]
loop = False
except:
pass
if len(freq_data) == len(df_gt):
dif, p_value = LR_difference(df_gt,freq_data,LR_model,model_fname,local_param,scale_param)
clf = np.array(p_value) < 0.01
df_freq2 = df_freq.copy()
df_freq2["H"] = df_gt["H"]
df_freq2["FW"] = df_gt["FW"]
df_freq2["Dif"] = dif
df_freq2["p_value"] = p_value
df_freq2["Mutant?(p_value<0.01)"] = clf
result_path = "{}/mov/AraVibS_result/{}+Identify.csv".format(local_path,freq_fname)
print("*"*50)
print("Mutant?(p_value<0.01)")
print(df_freq2["Mutant?(p_value<0.01)"])
print("*"*50)
print("Details")
print(df_freq2)
print("")
print("File_path:{}".format(result_path))
df_freq2.to_csv(result_path)
else:
print("Error: Growth-trait data is not corresponding to freq_data")
if __name__ == '__main__':
main() | 0.149438 | 0.108142 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input=logits, target=labels,reduction = "none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
def CB_loss(labels, logits, samples_per_cls, no_of_classes, beta, gamma, loss_type='focal'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
loss_type: string. One of "sigmoid", "focal", "softmax".
Returns:
cb_loss: A float tensor representing class balanced loss
"""
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * no_of_classes
labels_one_hot = F.one_hot(labels, no_of_classes).float()
weights = torch.tensor(weights).float().cuda()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,no_of_classes)
# print(weights)
if loss_type == "focal":
cb_loss = focal_loss(labels_one_hot, logits, weights, gamma)
elif loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input=logits,target=labels_one_hot, weight=weights)
elif loss_type == "softmax":
pred = logits.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input=pred, target=labels_one_hot, weight=weights)
return cb_loss
class CBLoss(nn.Module):
def __init__(self,samples_per_cls, no_of_classes, beta=0.9999, gamma=2.0, loss_type='focal'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
loss_type: string. One of "sigmoid", "focal", "softmax".
Returns:
cb_loss: A float tensor representing class balanced loss
"""
super(CBLoss, self).__init__()
self.samples_per_cls = samples_per_cls
self.no_of_classes = no_of_classes
self.beta = beta
self.gamma = gamma
self.loss_type = loss_type
def forward(self,logits,labels):
effective_num = 1.0 - np.power(self.beta, self.samples_per_cls)
weights = (1.0 - self.beta) / np.array(effective_num)
weights = weights / np.sum(weights) * self.no_of_classes
labels_one_hot = F.one_hot(labels, self.no_of_classes).float()
weights = torch.tensor(weights).float().cuda()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,self.no_of_classes)
if self.loss_type == "focal":
cb_loss = focal_loss(labels_one_hot, logits, weights, self.gamma)
elif self.loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input=logits,target=labels_one_hot, weight=weights)
elif self.loss_type == "softmax":
# pred = logits.softmax(dim = 1)
pred = F.log_softmax(logits,dim=-1)
cb_loss = F.binary_cross_entropy(input=pred, target=labels_one_hot, weight=weights)
return cb_loss
class ClassBalancedLabelSmoothingCrossEntropy(nn.Module):
"""
Class balanced loss with label smoothing.
"""
def __init__(self, samples_per_cls, no_of_classes, beta=0.9999, gamma=2.0, loss_type='softmax', smoothing=0.1):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(ClassBalancedLabelSmoothingCrossEntropy, self).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1. - smoothing
self.samples_per_cls = samples_per_cls
self.no_of_classes = no_of_classes
self.beta = beta
self.gamma = gamma
self.loss_type = loss_type
def forward(self, x, target):
logprobs = F.log_softmax(x, dim=-1)
cb_loss = CB_loss(target, x, self.samples_per_cls, self.no_of_classes, self.beta, self.gamma, loss_type=self.loss_type)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * cb_loss + self.smoothing * smooth_loss
return loss.mean() | src/loss/cb_loss.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input=logits, target=labels,reduction = "none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
def CB_loss(labels, logits, samples_per_cls, no_of_classes, beta, gamma, loss_type='focal'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
loss_type: string. One of "sigmoid", "focal", "softmax".
Returns:
cb_loss: A float tensor representing class balanced loss
"""
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * no_of_classes
labels_one_hot = F.one_hot(labels, no_of_classes).float()
weights = torch.tensor(weights).float().cuda()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,no_of_classes)
# print(weights)
if loss_type == "focal":
cb_loss = focal_loss(labels_one_hot, logits, weights, gamma)
elif loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input=logits,target=labels_one_hot, weight=weights)
elif loss_type == "softmax":
pred = logits.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input=pred, target=labels_one_hot, weight=weights)
return cb_loss
class CBLoss(nn.Module):
def __init__(self,samples_per_cls, no_of_classes, beta=0.9999, gamma=2.0, loss_type='focal'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
loss_type: string. One of "sigmoid", "focal", "softmax".
Returns:
cb_loss: A float tensor representing class balanced loss
"""
super(CBLoss, self).__init__()
self.samples_per_cls = samples_per_cls
self.no_of_classes = no_of_classes
self.beta = beta
self.gamma = gamma
self.loss_type = loss_type
def forward(self,logits,labels):
effective_num = 1.0 - np.power(self.beta, self.samples_per_cls)
weights = (1.0 - self.beta) / np.array(effective_num)
weights = weights / np.sum(weights) * self.no_of_classes
labels_one_hot = F.one_hot(labels, self.no_of_classes).float()
weights = torch.tensor(weights).float().cuda()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,self.no_of_classes)
if self.loss_type == "focal":
cb_loss = focal_loss(labels_one_hot, logits, weights, self.gamma)
elif self.loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input=logits,target=labels_one_hot, weight=weights)
elif self.loss_type == "softmax":
# pred = logits.softmax(dim = 1)
pred = F.log_softmax(logits,dim=-1)
cb_loss = F.binary_cross_entropy(input=pred, target=labels_one_hot, weight=weights)
return cb_loss
class ClassBalancedLabelSmoothingCrossEntropy(nn.Module):
"""
Class balanced loss with label smoothing.
"""
def __init__(self, samples_per_cls, no_of_classes, beta=0.9999, gamma=2.0, loss_type='softmax', smoothing=0.1):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(ClassBalancedLabelSmoothingCrossEntropy, self).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1. - smoothing
self.samples_per_cls = samples_per_cls
self.no_of_classes = no_of_classes
self.beta = beta
self.gamma = gamma
self.loss_type = loss_type
def forward(self, x, target):
logprobs = F.log_softmax(x, dim=-1)
cb_loss = CB_loss(target, x, self.samples_per_cls, self.no_of_classes, self.beta, self.gamma, loss_type=self.loss_type)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * cb_loss + self.smoothing * smooth_loss
return loss.mean() | 0.940647 | 0.771219 |
from math import atan, atan2, cos, sin, sqrt
"""Python port of MapBox's cheap-ruler module."""
MATH_PI = 3.14159265359
MATH_E = 2.71828182846
FACTORS = {
"kilometers": 1,
"miles": 1000 / 1609.344,
"nauticalmiles": 1000 / 1852,
"meters": 1000,
"metres": 1000,
"yards": 1000 / 0.9144,
"feet": 1000 / 0.3048,
"inches": 1000 / 0.0254
}
def from_tile(y, z, units):
n = MATH_PI * (1 - 2 * (y + 0.5) / pow(2, z))
lat = atan(0.5 * (pow(MATH_E, n) - pow(MATH_E, -n))) * 180 / MATH_PI
return CheapRuler(lat, units)
class CheapRuler():
# cdef double kx
# cdef double ky
def __init__(self, lat, units="kilometers"):
if units not in FACTORS:
raise ValueError("Unknown unit %s. Use one of: %s" % (units, ", ".join(FACTORS.keys())))
# cdef double m
m = FACTORS[units]
# # cdef double c, c2, c3, c4, c5
c = cos(lat * MATH_PI / 180)
c2 = 2 * c * c - 1
c3 = 2 * c * c2 - c
c4 = 2 * c * c3 - c2
c5 = 2 * c * c4 - c3
self.kx = m * (111.41513 * c - 0.09455 * c3 + 0.00012 * c5) # longitude correction
self.ky = m * (111.13209 - 0.56605 * c2 + 0.0012 * c4) # latitude correction
def distance(self, a, b):
# cdef double dx, dy
dx = (a[0] - b[0]) * self.kx
dy = (a[1] - b[1]) * self.ky
return sqrt(dx * dx + dy * dy)
def bearing(self, a, b):
# cdef double dx, dy, bearing
dx = (b[0] - a[0]) * self.kx
dy = (b[1] - a[1]) * self.ky
if dx == 0 and dy == 0:
return 0
bearing = atan2(-dy, dx) * 180 / MATH_PI + 90
if bearing > 180:
bearing -= 360
return bearing
def destination(self, p, dist, bearing):
a = (90 - bearing) * MATH_PI / 180
return (p[0] + cos(a) * dist / self.kx,
p[1] + sin(a) * dist / self.ky)
def line_distance(self, points):
total = 0
for i in range(len(points) - 1):
total += self.distance(points[i], points[i+1])
return total
def area(self, polygon):
total = 0
for i in range(len(polygon)):
ring = polygon[i]
k = len(ring) - 1
for j in range(len(ring)):
total += ((ring[j][0] - ring[k][0]) *
(ring[j][1] + ring[k][1]) *
(-1 if i > 0 else 1))
k = j
return total
def along(self, line, dist):
total = 0
if dist <= 0:
return line[0]
for i in range(len(line) - 1):
p0 = line[0]
p1 = line[i + 1]
d = self.distance(p0, p1)
total += d
if total > dist:
return interpolate(p0, p1, (dist - (total - d)) / d)
return line[-1]
def point_on_line(self, line, p):
minDist = float("inf")
for i in range(len(line) - 1):
x = line[i][0]
y = line[i][1]
dx = (line[i + 1][0] - x) * self.kx
dy = (line[i + 1][1] - y) * self.ky
if dx != 0 or dy != 0:
t = ((p[0] - x) * self.kx * dx + (p[1] - y) * self.ky * dy) / (dx * dx + dy * dy)
if t > 1:
x = line[i + 1][0]
y = line[i + 1][1]
elif t > 0:
x += (dx / self.kx) * t
y += (dy / self.ky) * t
dx = (p[0] - x) * self.kx
dy = (p[1] - y) * self.ky
sqDist = dx * dx + dy * dy
if sqDist < minDist:
minDist = sqDist
minX = x
minY = y
minI = i
minT = t
return {
"point": (minX, minY),
"index": minI,
"t": minT
}
def line_slice(self, start, stop, line):
p1 = self.point_on_line(line, start)
p2 = self.point_on_line(line, stop)
if p1['index'] > p2['index'] or (p1['index'] == p2['index'] and p1['t'] > p2['t']):
tmp = p1
p1 = p2
p2 = tmp
_slice = [p1['point']]
l = p1['index'] + 1
r = p2['index']
if not line[l] != _slice[0] and l <= r:
_slice.append(line[l])
for i in range(l+1, r+1):
_slice.append(line[i])
if not line[r] != p2['point']:
_slice.append(p2['point'])
return _slice
def line_slice_along(self, start, stop, line):
total = 0
_slice = []
for i in range(len(line) - 1):
p0 = line[i]
p1 = line[i + 1]
d = self.distance(p0, p1)
total += d
if total > start and not _slice:
_slice.append(interpolate(p0, p1, (start - (total - d)) / d))
if total >= stop:
_slice.append(interpolate(p0, p1, (stop - (total - d)) / d))
return _slice
if total > start:
_slice.append(p1)
return _slice
def buffer_point(self, p, buff):
v = buff / self.ky
h = buff / self.kx
return (
p[0] - h,
p[1] - v,
p[0] + h,
p[1] + v
)
def buffer_bbox(self, bbox, buff):
v = buff / self.ky
h = buff / self.kx
return (
bbox[0] - h,
bbox[1] - v,
bbox[2] + h,
bbox[3] + v
)
def inside_bbox(self, p, bbox):
return (p[0] >= bbox[0] and
p[0] <= bbox[2] and
p[1] >= bbox[1] and
p[1] <= bbox[3])
def interpolate(a, b, t):
dx = b[0] - a[0]
dy = b[1] - a[1]
return (a[0] + dx * t, a[1] + dy * t) | cheapruler.py | from math import atan, atan2, cos, sin, sqrt
"""Python port of MapBox's cheap-ruler module."""
MATH_PI = 3.14159265359
MATH_E = 2.71828182846
FACTORS = {
"kilometers": 1,
"miles": 1000 / 1609.344,
"nauticalmiles": 1000 / 1852,
"meters": 1000,
"metres": 1000,
"yards": 1000 / 0.9144,
"feet": 1000 / 0.3048,
"inches": 1000 / 0.0254
}
def from_tile(y, z, units):
n = MATH_PI * (1 - 2 * (y + 0.5) / pow(2, z))
lat = atan(0.5 * (pow(MATH_E, n) - pow(MATH_E, -n))) * 180 / MATH_PI
return CheapRuler(lat, units)
class CheapRuler():
# cdef double kx
# cdef double ky
def __init__(self, lat, units="kilometers"):
if units not in FACTORS:
raise ValueError("Unknown unit %s. Use one of: %s" % (units, ", ".join(FACTORS.keys())))
# cdef double m
m = FACTORS[units]
# # cdef double c, c2, c3, c4, c5
c = cos(lat * MATH_PI / 180)
c2 = 2 * c * c - 1
c3 = 2 * c * c2 - c
c4 = 2 * c * c3 - c2
c5 = 2 * c * c4 - c3
self.kx = m * (111.41513 * c - 0.09455 * c3 + 0.00012 * c5) # longitude correction
self.ky = m * (111.13209 - 0.56605 * c2 + 0.0012 * c4) # latitude correction
def distance(self, a, b):
# cdef double dx, dy
dx = (a[0] - b[0]) * self.kx
dy = (a[1] - b[1]) * self.ky
return sqrt(dx * dx + dy * dy)
def bearing(self, a, b):
# cdef double dx, dy, bearing
dx = (b[0] - a[0]) * self.kx
dy = (b[1] - a[1]) * self.ky
if dx == 0 and dy == 0:
return 0
bearing = atan2(-dy, dx) * 180 / MATH_PI + 90
if bearing > 180:
bearing -= 360
return bearing
def destination(self, p, dist, bearing):
a = (90 - bearing) * MATH_PI / 180
return (p[0] + cos(a) * dist / self.kx,
p[1] + sin(a) * dist / self.ky)
def line_distance(self, points):
total = 0
for i in range(len(points) - 1):
total += self.distance(points[i], points[i+1])
return total
def area(self, polygon):
total = 0
for i in range(len(polygon)):
ring = polygon[i]
k = len(ring) - 1
for j in range(len(ring)):
total += ((ring[j][0] - ring[k][0]) *
(ring[j][1] + ring[k][1]) *
(-1 if i > 0 else 1))
k = j
return total
def along(self, line, dist):
total = 0
if dist <= 0:
return line[0]
for i in range(len(line) - 1):
p0 = line[0]
p1 = line[i + 1]
d = self.distance(p0, p1)
total += d
if total > dist:
return interpolate(p0, p1, (dist - (total - d)) / d)
return line[-1]
def point_on_line(self, line, p):
minDist = float("inf")
for i in range(len(line) - 1):
x = line[i][0]
y = line[i][1]
dx = (line[i + 1][0] - x) * self.kx
dy = (line[i + 1][1] - y) * self.ky
if dx != 0 or dy != 0:
t = ((p[0] - x) * self.kx * dx + (p[1] - y) * self.ky * dy) / (dx * dx + dy * dy)
if t > 1:
x = line[i + 1][0]
y = line[i + 1][1]
elif t > 0:
x += (dx / self.kx) * t
y += (dy / self.ky) * t
dx = (p[0] - x) * self.kx
dy = (p[1] - y) * self.ky
sqDist = dx * dx + dy * dy
if sqDist < minDist:
minDist = sqDist
minX = x
minY = y
minI = i
minT = t
return {
"point": (minX, minY),
"index": minI,
"t": minT
}
def line_slice(self, start, stop, line):
p1 = self.point_on_line(line, start)
p2 = self.point_on_line(line, stop)
if p1['index'] > p2['index'] or (p1['index'] == p2['index'] and p1['t'] > p2['t']):
tmp = p1
p1 = p2
p2 = tmp
_slice = [p1['point']]
l = p1['index'] + 1
r = p2['index']
if not line[l] != _slice[0] and l <= r:
_slice.append(line[l])
for i in range(l+1, r+1):
_slice.append(line[i])
if not line[r] != p2['point']:
_slice.append(p2['point'])
return _slice
def line_slice_along(self, start, stop, line):
total = 0
_slice = []
for i in range(len(line) - 1):
p0 = line[i]
p1 = line[i + 1]
d = self.distance(p0, p1)
total += d
if total > start and not _slice:
_slice.append(interpolate(p0, p1, (start - (total - d)) / d))
if total >= stop:
_slice.append(interpolate(p0, p1, (stop - (total - d)) / d))
return _slice
if total > start:
_slice.append(p1)
return _slice
def buffer_point(self, p, buff):
v = buff / self.ky
h = buff / self.kx
return (
p[0] - h,
p[1] - v,
p[0] + h,
p[1] + v
)
def buffer_bbox(self, bbox, buff):
v = buff / self.ky
h = buff / self.kx
return (
bbox[0] - h,
bbox[1] - v,
bbox[2] + h,
bbox[3] + v
)
def inside_bbox(self, p, bbox):
return (p[0] >= bbox[0] and
p[0] <= bbox[2] and
p[1] >= bbox[1] and
p[1] <= bbox[3])
def interpolate(a, b, t):
dx = b[0] - a[0]
dy = b[1] - a[1]
return (a[0] + dx * t, a[1] + dy * t) | 0.743075 | 0.432123 |
from django import forms
from django.contrib import auth
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Blog
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].widget = forms.TextInput(
attrs={'placeholder': 'Username', 'class':'form-control'})
self.fields['username'].label = "Username"
self.fields['email'].widget = forms.TextInput(
attrs={'placeholder': 'Email', 'class':'form-control'})
self.fields['email'].label = "Email"
self.fields['password1'].widget = forms.PasswordInput(
attrs={'placeholder': 'Password', 'class': 'form-control'})
self.fields['password1'].label = "Password"
self.fields['password2'].widget = forms.PasswordInput(
attrs={'placeholder': 'Re-enter your password', 'class': 'form-control'})
self.fields['password2'].label = "Confirm Password"
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email=self.cleaned_data['email']
if commit:
user.save()
return user
def clean_email(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
email = self.cleaned_data["email"]
try:
User._default_manager.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError('An account with this email already exists.')
class LoginForm(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(LoginForm, self).__init__(*args, **kwargs)
self.fields['username'].widget = forms.TextInput(
attrs={'placeholder': 'Username', 'class':'form-control'})
self.fields['username'].label = "Username"
self.fields['password'].widget = forms.PasswordInput(
attrs={'placeholder': 'Password', 'class': 'form-control'})
self.fields['password'].label = "Password"
class BlogForm(forms.ModelForm):
class Meta:
model = Blog
exclude = ('user',)
def __init__(self, *args, **kwargs):
super(BlogForm, self).__init__(*args, **kwargs)
self.fields['title'].widget = forms.TextInput(
attrs={'placeholder': 'Title', 'class':'form-control'})
self.fields['title'].label = "Title"
self.fields['description'].widget = forms.Textarea(
attrs={'class': 'form-control', 'rows': 30, 'cols': 90 })
self.fields['description'].label = "Enter Description" | blog/blogging/forms.py | from django import forms
from django.contrib import auth
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Blog
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].widget = forms.TextInput(
attrs={'placeholder': 'Username', 'class':'form-control'})
self.fields['username'].label = "Username"
self.fields['email'].widget = forms.TextInput(
attrs={'placeholder': 'Email', 'class':'form-control'})
self.fields['email'].label = "Email"
self.fields['password1'].widget = forms.PasswordInput(
attrs={'placeholder': 'Password', 'class': 'form-control'})
self.fields['password1'].label = "Password"
self.fields['password2'].widget = forms.PasswordInput(
attrs={'placeholder': 'Re-enter your password', 'class': 'form-control'})
self.fields['password2'].label = "Confirm Password"
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email=self.cleaned_data['email']
if commit:
user.save()
return user
def clean_email(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
email = self.cleaned_data["email"]
try:
User._default_manager.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError('An account with this email already exists.')
class LoginForm(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(LoginForm, self).__init__(*args, **kwargs)
self.fields['username'].widget = forms.TextInput(
attrs={'placeholder': 'Username', 'class':'form-control'})
self.fields['username'].label = "Username"
self.fields['password'].widget = forms.PasswordInput(
attrs={'placeholder': 'Password', 'class': 'form-control'})
self.fields['password'].label = "Password"
class BlogForm(forms.ModelForm):
class Meta:
model = Blog
exclude = ('user',)
def __init__(self, *args, **kwargs):
super(BlogForm, self).__init__(*args, **kwargs)
self.fields['title'].widget = forms.TextInput(
attrs={'placeholder': 'Title', 'class':'form-control'})
self.fields['title'].label = "Title"
self.fields['description'].widget = forms.Textarea(
attrs={'class': 'form-control', 'rows': 30, 'cols': 90 })
self.fields['description'].label = "Enter Description" | 0.446857 | 0.068506 |
import os
import posixpath
import StringIO
import sys
import textwrap
import mozdevice
from optparse import OptionParser
class DMCli(object):
def __init__(self, args=sys.argv[1:]):
self.commands = { 'install': { 'function': self.install,
'min_args': 1,
'max_args': 1,
'help_args': '<file>',
'help': 'push this package file to the device and install it' },
'killapp': { 'function': self.killapp,
'min_args': 1,
'max_args': 1,
'help_args': '<process name>',
'help': 'kills any processes with a particular name on device' },
'launchapp': { 'function': self.launchapp,
'min_args': 4,
'max_args': 4,
'help_args': '<appname> <activity name> <intent> <URL>',
'help': 'launches application on device' },
'push': { 'function': self.push,
'min_args': 2,
'max_args': 2,
'help_args': '<local> <remote>',
'help': 'copy file/dir to device' },
'pull': { 'function': self.pull,
'min_args': 1,
'max_args': 2,
'help_args': '<local> [remote]',
'help': 'copy file/dir from device' },
'shell': { 'function': self.shell,
'min_args': 1,
'max_args': None,
'help_args': '<command>',
'help': 'run shell command on device' },
'info': { 'function': self.getinfo,
'min_args': None,
'max_args': 1,
'help_args': '[os|id|uptime|systime|screen|memory|processes]',
'help': 'get information on a specified '
'aspect of the device (if no argument '
'given, print all available information)'
},
'ps': { 'function': self.processlist,
'min_args': None,
'max_args': 0,
'help_args': '',
'help': 'get information on running processes on device'
},
'ls': { 'function': self.listfiles,
'min_args': 1,
'max_args': 1,
'help_args': '<remote>',
'help': 'list files on device'
},
'rm': { 'function': lambda f: self.dm.removeFile(f),
'min_args': 1,
'max_args': 1,
'help_args': '<remote>',
'help': 'remove file from device'
},
'rmdir': { 'function': lambda d: self.dm.removeDir(d),
'min_args': 1,
'max_args': 1,
'help_args': '<remote>',
'help': 'recursively remove directory from device'
},
'screencap': { 'function': lambda f: self.dm.saveScreenshot(f),
'min_args': 1,
'max_args': 1,
'help_args': '<png file>',
'help': 'capture screenshot of device in action'
}
}
usage = "usage: %prog [options] <command> [<args>]\n\ndevice commands:\n"
usage += "\n".join([textwrap.fill("%s %s - %s" %
(cmdname, cmd['help_args'],
cmd['help']),
initial_indent=" ",
subsequent_indent=" ")
for (cmdname, cmd) in
sorted(self.commands.iteritems())])
self.parser = OptionParser(usage)
self.add_options(self.parser)
(self.options, self.args) = self.parser.parse_args(args)
if len(self.args) < 1:
self.parser.error("must specify command")
if self.options.dmtype == "sut" and not self.options.host and \
not self.options.hwid:
self.parser.error("Must specify device ip in TEST_DEVICE or "
"with --host option with SUT")
(command_name, command_args) = (self.args[0], self.args[1:])
if command_name not in self.commands:
self.parser.error("Invalid command. Valid commands: %s" %
" ".join(self.commands.keys()))
command = self.commands[command_name]
if command['min_args'] and len(command_args) < command['min_args'] or \
command['max_args'] and len(command_args) > command['max_args']:
self.parser.error("Wrong number of arguments")
self.dm = self.getDevice(dmtype=self.options.dmtype,
hwid=self.options.hwid,
host=self.options.host,
port=self.options.port)
command['function'](*command_args)
def add_options(self, parser):
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose",
help="Verbose output from DeviceManager",
default=False)
parser.add_option("--host", action="store",
type="string", dest="host",
help="Device hostname (only if using TCP/IP)",
default=os.environ.get('TEST_DEVICE'))
parser.add_option("-p", "--port", action="store",
type="int", dest="port",
help="Custom device port (if using SUTAgent or "
"adb-over-tcp)", default=None)
parser.add_option("-m", "--dmtype", action="store",
type="string", dest="dmtype",
help="DeviceManager type (adb or sut, defaults " \
"to adb)", default=os.environ.get('DM_TRANS',
'adb'))
parser.add_option("-d", "--hwid", action="store",
type="string", dest="hwid",
help="HWID", default=None)
parser.add_option("--package-name", action="store",
type="string", dest="packagename",
help="Packagename (if using DeviceManagerADB)",
default=None)
def getDevice(self, dmtype="adb", hwid=None, host=None, port=None):
'''
Returns a device with the specified parameters
'''
if self.options.verbose:
mozdevice.DroidSUT.debug = 4
if hwid:
return mozdevice.DroidConnectByHWID(hwid)
if dmtype == "adb":
if host and not port:
port = 5555
return mozdevice.DroidADB(packageName=self.options.packagename,
host=host, port=port)
elif dmtype == "sut":
if not host:
self.parser.error("Must specify host with SUT!")
if not port:
port = 20701
return mozdevice.DroidSUT(host=host, port=port)
else:
self.parser.error("Unknown device manager type: %s" % type)
def push(self, src, dest):
if os.path.isdir(src):
self.dm.pushDir(src, dest)
else:
dest_is_dir = dest[-1] == '/' or self.dm.dirExists(dest)
dest = posixpath.normpath(dest)
if dest_is_dir:
dest = posixpath.join(dest, os.path.basename(src))
self.dm.pushFile(src, dest)
def pull(self, src, dest=None):
if not self.dm.fileExists(src):
print 'No such file or directory'
return
if not dest:
dest = posixpath.basename(src)
if self.dm.dirExists(src):
self.dm.getDirectory(src, dest)
else:
self.dm.getFile(src, dest)
def install(self, apkfile):
basename = os.path.basename(apkfile)
app_path_on_device = posixpath.join(self.dm.getDeviceRoot(),
basename)
self.dm.pushFile(apkfile, app_path_on_device)
self.dm.installApp(app_path_on_device)
def launchapp(self, appname, activity, intent, url):
self.dm.launchApplication(appname, activity, intent, url)
def killapp(self, *args):
for appname in args:
self.dm.killProcess(appname)
def shell(self, *args):
buf = StringIO.StringIO()
self.dm.shell(args, buf)
print str(buf.getvalue()[0:-1]).rstrip()
def getinfo(self, *args):
directive=None
if args:
directive=args[0]
info = self.dm.getInfo(directive=directive)
for (infokey, infoitem) in sorted(info.iteritems()):
if infokey == "process":
pass # skip process list: get that through ps
elif not directive and not infoitem:
print "%s:" % infokey.upper()
elif not directive:
for line in infoitem:
print "%s: %s" % (infokey.upper(), line)
else:
print "%s" % "\n".join(infoitem)
def processlist(self):
pslist = self.dm.getProcessList()
for ps in pslist:
print " ".join(str(i) for i in ps)
def listfiles(self, dir):
filelist = self.dm.listFiles(dir)
for file in filelist:
print file
def cli(args=sys.argv[1:]):
# process the command line
cli = DMCli(args)
if __name__ == '__main__':
cli() | B2G/gecko/testing/mozbase/mozdevice/mozdevice/dmcli.py | import os
import posixpath
import StringIO
import sys
import textwrap
import mozdevice
from optparse import OptionParser
class DMCli(object):
def __init__(self, args=sys.argv[1:]):
self.commands = { 'install': { 'function': self.install,
'min_args': 1,
'max_args': 1,
'help_args': '<file>',
'help': 'push this package file to the device and install it' },
'killapp': { 'function': self.killapp,
'min_args': 1,
'max_args': 1,
'help_args': '<process name>',
'help': 'kills any processes with a particular name on device' },
'launchapp': { 'function': self.launchapp,
'min_args': 4,
'max_args': 4,
'help_args': '<appname> <activity name> <intent> <URL>',
'help': 'launches application on device' },
'push': { 'function': self.push,
'min_args': 2,
'max_args': 2,
'help_args': '<local> <remote>',
'help': 'copy file/dir to device' },
'pull': { 'function': self.pull,
'min_args': 1,
'max_args': 2,
'help_args': '<local> [remote]',
'help': 'copy file/dir from device' },
'shell': { 'function': self.shell,
'min_args': 1,
'max_args': None,
'help_args': '<command>',
'help': 'run shell command on device' },
'info': { 'function': self.getinfo,
'min_args': None,
'max_args': 1,
'help_args': '[os|id|uptime|systime|screen|memory|processes]',
'help': 'get information on a specified '
'aspect of the device (if no argument '
'given, print all available information)'
},
'ps': { 'function': self.processlist,
'min_args': None,
'max_args': 0,
'help_args': '',
'help': 'get information on running processes on device'
},
'ls': { 'function': self.listfiles,
'min_args': 1,
'max_args': 1,
'help_args': '<remote>',
'help': 'list files on device'
},
'rm': { 'function': lambda f: self.dm.removeFile(f),
'min_args': 1,
'max_args': 1,
'help_args': '<remote>',
'help': 'remove file from device'
},
'rmdir': { 'function': lambda d: self.dm.removeDir(d),
'min_args': 1,
'max_args': 1,
'help_args': '<remote>',
'help': 'recursively remove directory from device'
},
'screencap': { 'function': lambda f: self.dm.saveScreenshot(f),
'min_args': 1,
'max_args': 1,
'help_args': '<png file>',
'help': 'capture screenshot of device in action'
}
}
usage = "usage: %prog [options] <command> [<args>]\n\ndevice commands:\n"
usage += "\n".join([textwrap.fill("%s %s - %s" %
(cmdname, cmd['help_args'],
cmd['help']),
initial_indent=" ",
subsequent_indent=" ")
for (cmdname, cmd) in
sorted(self.commands.iteritems())])
self.parser = OptionParser(usage)
self.add_options(self.parser)
(self.options, self.args) = self.parser.parse_args(args)
if len(self.args) < 1:
self.parser.error("must specify command")
if self.options.dmtype == "sut" and not self.options.host and \
not self.options.hwid:
self.parser.error("Must specify device ip in TEST_DEVICE or "
"with --host option with SUT")
(command_name, command_args) = (self.args[0], self.args[1:])
if command_name not in self.commands:
self.parser.error("Invalid command. Valid commands: %s" %
" ".join(self.commands.keys()))
command = self.commands[command_name]
if command['min_args'] and len(command_args) < command['min_args'] or \
command['max_args'] and len(command_args) > command['max_args']:
self.parser.error("Wrong number of arguments")
self.dm = self.getDevice(dmtype=self.options.dmtype,
hwid=self.options.hwid,
host=self.options.host,
port=self.options.port)
command['function'](*command_args)
def add_options(self, parser):
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose",
help="Verbose output from DeviceManager",
default=False)
parser.add_option("--host", action="store",
type="string", dest="host",
help="Device hostname (only if using TCP/IP)",
default=os.environ.get('TEST_DEVICE'))
parser.add_option("-p", "--port", action="store",
type="int", dest="port",
help="Custom device port (if using SUTAgent or "
"adb-over-tcp)", default=None)
parser.add_option("-m", "--dmtype", action="store",
type="string", dest="dmtype",
help="DeviceManager type (adb or sut, defaults " \
"to adb)", default=os.environ.get('DM_TRANS',
'adb'))
parser.add_option("-d", "--hwid", action="store",
type="string", dest="hwid",
help="HWID", default=None)
parser.add_option("--package-name", action="store",
type="string", dest="packagename",
help="Packagename (if using DeviceManagerADB)",
default=None)
def getDevice(self, dmtype="adb", hwid=None, host=None, port=None):
'''
Returns a device with the specified parameters
'''
if self.options.verbose:
mozdevice.DroidSUT.debug = 4
if hwid:
return mozdevice.DroidConnectByHWID(hwid)
if dmtype == "adb":
if host and not port:
port = 5555
return mozdevice.DroidADB(packageName=self.options.packagename,
host=host, port=port)
elif dmtype == "sut":
if not host:
self.parser.error("Must specify host with SUT!")
if not port:
port = 20701
return mozdevice.DroidSUT(host=host, port=port)
else:
self.parser.error("Unknown device manager type: %s" % type)
def push(self, src, dest):
if os.path.isdir(src):
self.dm.pushDir(src, dest)
else:
dest_is_dir = dest[-1] == '/' or self.dm.dirExists(dest)
dest = posixpath.normpath(dest)
if dest_is_dir:
dest = posixpath.join(dest, os.path.basename(src))
self.dm.pushFile(src, dest)
def pull(self, src, dest=None):
if not self.dm.fileExists(src):
print 'No such file or directory'
return
if not dest:
dest = posixpath.basename(src)
if self.dm.dirExists(src):
self.dm.getDirectory(src, dest)
else:
self.dm.getFile(src, dest)
def install(self, apkfile):
basename = os.path.basename(apkfile)
app_path_on_device = posixpath.join(self.dm.getDeviceRoot(),
basename)
self.dm.pushFile(apkfile, app_path_on_device)
self.dm.installApp(app_path_on_device)
def launchapp(self, appname, activity, intent, url):
self.dm.launchApplication(appname, activity, intent, url)
def killapp(self, *args):
for appname in args:
self.dm.killProcess(appname)
def shell(self, *args):
buf = StringIO.StringIO()
self.dm.shell(args, buf)
print str(buf.getvalue()[0:-1]).rstrip()
def getinfo(self, *args):
directive=None
if args:
directive=args[0]
info = self.dm.getInfo(directive=directive)
for (infokey, infoitem) in sorted(info.iteritems()):
if infokey == "process":
pass # skip process list: get that through ps
elif not directive and not infoitem:
print "%s:" % infokey.upper()
elif not directive:
for line in infoitem:
print "%s: %s" % (infokey.upper(), line)
else:
print "%s" % "\n".join(infoitem)
def processlist(self):
pslist = self.dm.getProcessList()
for ps in pslist:
print " ".join(str(i) for i in ps)
def listfiles(self, dir):
filelist = self.dm.listFiles(dir)
for file in filelist:
print file
def cli(args=sys.argv[1:]):
# process the command line
cli = DMCli(args)
if __name__ == '__main__':
cli() | 0.269806 | 0.071364 |
import mock
class SharedMock(mock.MagicMock):
"""
A MagicMock whose children are all itself.
>>> m = SharedMock()
>>> m is m.foo is m.bar is m.foo.bar.baz.qux
True
>>> m.foo.side_effect = ['hello from foo']
>>> m.bar()
'hello from foo'
'Magic' methods are not shared.
>>> m.__getitem__ is m.__len__
False
Neither are attributes you assign.
>>> m.explicitly_assigned_attribute = 1
>>> m.explicitly_assigned_attribute is m.foo
False
"""
def __init__(self, *args, **kwargs):
reserved = kwargs.pop('reserved', [])
# XXX: we cannot bind to self until after the mock is initialized
super(SharedMock, self).__init__(*args, **kwargs)
parent = mock.MagicMock()
parent.child = self
self.__parent = parent
self.__reserved = reserved
def _get_child_mock(self, **kwargs):
name = kwargs.get('name', '')
if (name[:2] == name[-2:] == '__') or name in self.__reserved:
return super(SharedMock, self)._get_child_mock(**kwargs)
return self
def __getattr__(self, name):
result = super(SharedMock, self).__getattr__(name)
if result is self:
result._mock_name = result._mock_new_name = name
return result
def assert_chain_calls(self, *calls):
"""
Asserts that a chained method was called (parents in the chain do not
matter, nor are they tracked). Use with `mock.call`.
>>> obj.filter(foo='bar').select_related('baz')
>>> obj.assert_chain_calls(mock.call.filter(foo='bar'))
>>> obj.assert_chain_calls(mock.call.select_related('baz'))
>>> obj.assert_chain_calls(mock.call.reverse())
*** AssertionError: [call.reverse()] not all found in call list, ...
"""
all_calls = self.__parent.mock_calls[:]
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
if self.__parent.mock_calls:
message = '%r not all found in call list, %d other(s) were:\n%r' % (not_found, len(self.__parent.mock_calls), self.__parent.mock_calls)
else:
message = 'no calls were found'
raise AssertionError(message) | mock_django/shared.py | import mock
class SharedMock(mock.MagicMock):
"""
A MagicMock whose children are all itself.
>>> m = SharedMock()
>>> m is m.foo is m.bar is m.foo.bar.baz.qux
True
>>> m.foo.side_effect = ['hello from foo']
>>> m.bar()
'hello from foo'
'Magic' methods are not shared.
>>> m.__getitem__ is m.__len__
False
Neither are attributes you assign.
>>> m.explicitly_assigned_attribute = 1
>>> m.explicitly_assigned_attribute is m.foo
False
"""
def __init__(self, *args, **kwargs):
reserved = kwargs.pop('reserved', [])
# XXX: we cannot bind to self until after the mock is initialized
super(SharedMock, self).__init__(*args, **kwargs)
parent = mock.MagicMock()
parent.child = self
self.__parent = parent
self.__reserved = reserved
def _get_child_mock(self, **kwargs):
name = kwargs.get('name', '')
if (name[:2] == name[-2:] == '__') or name in self.__reserved:
return super(SharedMock, self)._get_child_mock(**kwargs)
return self
def __getattr__(self, name):
result = super(SharedMock, self).__getattr__(name)
if result is self:
result._mock_name = result._mock_new_name = name
return result
def assert_chain_calls(self, *calls):
"""
Asserts that a chained method was called (parents in the chain do not
matter, nor are they tracked). Use with `mock.call`.
>>> obj.filter(foo='bar').select_related('baz')
>>> obj.assert_chain_calls(mock.call.filter(foo='bar'))
>>> obj.assert_chain_calls(mock.call.select_related('baz'))
>>> obj.assert_chain_calls(mock.call.reverse())
*** AssertionError: [call.reverse()] not all found in call list, ...
"""
all_calls = self.__parent.mock_calls[:]
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
if self.__parent.mock_calls:
message = '%r not all found in call list, %d other(s) were:\n%r' % (not_found, len(self.__parent.mock_calls), self.__parent.mock_calls)
else:
message = 'no calls were found'
raise AssertionError(message) | 0.669745 | 0.291069 |
#LOAD THE DATSET IMAGES
import sys
import subprocess
subprocess.call("apt-get install subversion".split())
subprocess.call("svn export https://github.com/YoniChechik/AI_is_Math/trunk/c_07_camera_calibration/images".split())
#IMPORT ALL THE EQUIRED PACKAGES
import numpy as np
import cv2
from glob import glob
import matplotlib.pyplot as plt
#GET IMAGES FROM THE SESSION STORAGE AND DEFINE THEIR SIZE
square_size = 2.88
img_mask = "./images/*.jpeg"
pattern_size = (9, 6)
figsize = (20, 20)
# DEFINE THE DIMENSIONS OF THE CHESSBOARD AND-
# 1. Create vector to store vectors of 3D points for each chessboard image.
# 2. Create vector to store vectors of 2D points for each chessboard image.
img_names = glob(img_mask)
num_images = len(img_names)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
h, w = cv2.imread(img_names[0]).shape[:2]
#LOAD THE IMAGES AND APPLY LOOP ON THE SET OF IMAGES
#IF A DESIRED NUMBER OF CORNERS ARE FOUND IN THE IMAGE, REFINE THE PIXEL COORDINATES FOR THOSE IMAGES AND DISPLAY THEM ON THE CHESSBOARD
plt.figure(figsize=figsize)
for i, fn in enumerate(img_names):
print("loading images %s" % fn)
imgBGR = cv2.imread(fn)
if imgBGR is None:
print("Failed to load", fn)
continue
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(imgRGB, cv2.COLOR_RGB2GRAY)
assert w == img.shape[1] and h == img.shape[0], f"size: {img.shape[1]} x {img.shape[0]}"
found, corners = cv2.findChessboardCorners(img, pattern_size)
if not found:
print("chessboard not found")
continue
if i < 12:
img_w_corners = cv2.drawChessboardCorners(imgRGB, pattern_size, corners, found)
plt.subplot(4, 3, i + 1)
plt.imshow(img_w_corners)
print(f"{fn}... OK")
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
plt.show()
#CALCULATE THE CAMERA DISTORTION
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None)
print("\nRMS:", rms)
print("camera matrix:\n", camera_matrix)
print("distortion coefficients: ", dist_coefs.ravel())
#UNDISTORT THE IMAGE FROM THE CALCULATED CALIBERATION
plt.figure(figsize=figsize)
for i, fn in enumerate(img_names):
imgBGR = cv2.imread(fn)
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
dst = cv2.undistort(imgRGB, camera_matrix, dist_coefs)
if i < 12:
plt.subplot(4, 3, i + 1)
plt.imshow(dst)
plt.show()
print("ABOVE ARE THE UNDISTORED IMAGES") | ImageProcessingScripts/Image Distortion Correction Using OpenCV/image_distortion_correction.py |
#LOAD THE DATSET IMAGES
import sys
import subprocess
subprocess.call("apt-get install subversion".split())
subprocess.call("svn export https://github.com/YoniChechik/AI_is_Math/trunk/c_07_camera_calibration/images".split())
#IMPORT ALL THE EQUIRED PACKAGES
import numpy as np
import cv2
from glob import glob
import matplotlib.pyplot as plt
#GET IMAGES FROM THE SESSION STORAGE AND DEFINE THEIR SIZE
square_size = 2.88
img_mask = "./images/*.jpeg"
pattern_size = (9, 6)
figsize = (20, 20)
# DEFINE THE DIMENSIONS OF THE CHESSBOARD AND-
# 1. Create vector to store vectors of 3D points for each chessboard image.
# 2. Create vector to store vectors of 2D points for each chessboard image.
img_names = glob(img_mask)
num_images = len(img_names)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
h, w = cv2.imread(img_names[0]).shape[:2]
#LOAD THE IMAGES AND APPLY LOOP ON THE SET OF IMAGES
#IF A DESIRED NUMBER OF CORNERS ARE FOUND IN THE IMAGE, REFINE THE PIXEL COORDINATES FOR THOSE IMAGES AND DISPLAY THEM ON THE CHESSBOARD
plt.figure(figsize=figsize)
for i, fn in enumerate(img_names):
print("loading images %s" % fn)
imgBGR = cv2.imread(fn)
if imgBGR is None:
print("Failed to load", fn)
continue
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(imgRGB, cv2.COLOR_RGB2GRAY)
assert w == img.shape[1] and h == img.shape[0], f"size: {img.shape[1]} x {img.shape[0]}"
found, corners = cv2.findChessboardCorners(img, pattern_size)
if not found:
print("chessboard not found")
continue
if i < 12:
img_w_corners = cv2.drawChessboardCorners(imgRGB, pattern_size, corners, found)
plt.subplot(4, 3, i + 1)
plt.imshow(img_w_corners)
print(f"{fn}... OK")
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
plt.show()
#CALCULATE THE CAMERA DISTORTION
rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None)
print("\nRMS:", rms)
print("camera matrix:\n", camera_matrix)
print("distortion coefficients: ", dist_coefs.ravel())
#UNDISTORT THE IMAGE FROM THE CALCULATED CALIBERATION
plt.figure(figsize=figsize)
for i, fn in enumerate(img_names):
imgBGR = cv2.imread(fn)
imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)
dst = cv2.undistort(imgRGB, camera_matrix, dist_coefs)
if i < 12:
plt.subplot(4, 3, i + 1)
plt.imshow(dst)
plt.show()
print("ABOVE ARE THE UNDISTORED IMAGES") | 0.277767 | 0.284489 |
from utilities import utils
config = utils.config()
try:
admins = config["admins"]
botlog = config["botlog"]
embed = config["embed"]
github = config["github"]
home = config["home"]
owners = config["owners"]
postgres = config["postgres"]
prefix = config["prefix"]
support = config["support"]
tester = config["tester"]
token = config["token"]
except KeyError as e:
print(
f"""
Warning! The key {e} is missing from your ./config.json file.
Add this key or the bot might not function properly.
"""
)
emotes = {
"loading": "<a:loading:819280509007560756>",
"success": "<:checkmark:816534984676081705>",
"failed": "<:failed:816521503554273320>",
"warn": "<:warn:816456396735905844>",
"error": "<:error:836325837871382638>",
"announce": "<:announce:834495346058067998>",
"1234button": "<:1234:816460247777411092>",
"info": "<:info:827428282001260544>",
"exclamation": "<:exclamation:827753511395000351>",
"trash": "<:trash:816463111958560819>",
"forward": "<:forward:816458167835820093>",
"forward2": "<:forward2:816457685905440850>",
"backward": "<:backward:816458218145579049>",
"backward2": "<:backward2:816457785167314987>",
"desktop": "<:desktop:817160032391135262>",
"mobile": "<:mobile:817160232248672256>",
"search": "<:web:817163202877194301>",
"online": "<:online:810650040838258711>",
"offline": "<:offline:810650959859810384>",
"dnd": "<:dnd:810650845007708200>",
"idle": "<:idle:810650560146833429>",
"owner": "<:owner:810678076497068032>",
"emoji": "<:emoji:810678717482532874>",
"members": "<:members:810677596453863444>",
"categories": "<:categories:810671569440473119>",
"textchannel": "<:textchannel:810659118045331517>",
"voicechannel": "<:voicechannel:810659257296879684>",
"messages": "<:messages:816696500314701874>",
"commands": "<:command:816693906951372870>",
"role": "<:role:816699853685522442>",
"invite": "<:invite:816700067632513054>",
"bot": "<:bot:816692223566544946>",
"question": "<:question:817545998506393601>",
"lock": "<:lock:817168229712527360>",
"unlock": "<:unlock:817168258825846815>",
"letter": "<:letter:816520981396193280>",
"num0": "<:num0:827219939583721513>",
"num1": "<:num1:827219939961602098>",
"num2": "<:num2:827219940045226075>",
"num3": "<:num3:827219940541071360>",
"num4": "<:num4:827219940556931093>",
"num5": "<:num5:827219941253709835>",
"num6": "<:num6:827219941790580766>",
"num7": "<:num7:827219942343442502>",
"num8": "<:num8:827219942444236810>",
"num9": "<:num9:827219942758809610>",
"stop": "<:stop:827257105420910652>",
"stopsign": "<:stopsign:841848010690658335>",
"clock": "<:clock:839640961755643915>",
"alarm": "<:alarm:839640804246683648>",
"stopwatch": "<:stopwatch:827075158967189544>",
"log": "<:log:835203679388303400>",
"db": "<:database:839574200506646608>",
"privacy": "<:privacy:839574405541134346>",
"delete": "<:deletedata:839587782091735040>",
"heart": "<:heart:839354647546298399>",
"graph": "<:graph:840046538340040765>",
"upload": "<:upload:840086768497983498>",
"download": "<:download:840086726209961984>",
"right": "<:right:840289355057725520>",
"kick": "<:kick:840490315893702667>", # So its a she 💞
"ban": "<:ban:840474680547606548>",
"robot": "<:robot:840482243218767892>",
"plus": "<:plus:840485455333294080>",
"minus": "<:minus:840485608555020308>",
"undo": "<:undo:840486528110166056>",
"redo": "<:redo:840486303354322962>",
"audioadd": "<:audioadd:840491464928002048>",
"audioremove": "<:audioremove:840491410720948235>",
"pin": "<:pin:840492943226961941>",
"pass": "<:pass:840817730277867541>",
"fail": "<:fail:840817815148953600>",
"snowflake": "<:snowflake:841848061412376596>",
"candy": "<:purplecandy:844724185842712576>",
"cupcake": "<:purplecupcake:844688309195374602>"
} | settings/constants.py | from utilities import utils
config = utils.config()
try:
admins = config["admins"]
botlog = config["botlog"]
embed = config["embed"]
github = config["github"]
home = config["home"]
owners = config["owners"]
postgres = config["postgres"]
prefix = config["prefix"]
support = config["support"]
tester = config["tester"]
token = config["token"]
except KeyError as e:
print(
f"""
Warning! The key {e} is missing from your ./config.json file.
Add this key or the bot might not function properly.
"""
)
emotes = {
"loading": "<a:loading:819280509007560756>",
"success": "<:checkmark:816534984676081705>",
"failed": "<:failed:816521503554273320>",
"warn": "<:warn:816456396735905844>",
"error": "<:error:836325837871382638>",
"announce": "<:announce:834495346058067998>",
"1234button": "<:1234:816460247777411092>",
"info": "<:info:827428282001260544>",
"exclamation": "<:exclamation:827753511395000351>",
"trash": "<:trash:816463111958560819>",
"forward": "<:forward:816458167835820093>",
"forward2": "<:forward2:816457685905440850>",
"backward": "<:backward:816458218145579049>",
"backward2": "<:backward2:816457785167314987>",
"desktop": "<:desktop:817160032391135262>",
"mobile": "<:mobile:817160232248672256>",
"search": "<:web:817163202877194301>",
"online": "<:online:810650040838258711>",
"offline": "<:offline:810650959859810384>",
"dnd": "<:dnd:810650845007708200>",
"idle": "<:idle:810650560146833429>",
"owner": "<:owner:810678076497068032>",
"emoji": "<:emoji:810678717482532874>",
"members": "<:members:810677596453863444>",
"categories": "<:categories:810671569440473119>",
"textchannel": "<:textchannel:810659118045331517>",
"voicechannel": "<:voicechannel:810659257296879684>",
"messages": "<:messages:816696500314701874>",
"commands": "<:command:816693906951372870>",
"role": "<:role:816699853685522442>",
"invite": "<:invite:816700067632513054>",
"bot": "<:bot:816692223566544946>",
"question": "<:question:817545998506393601>",
"lock": "<:lock:817168229712527360>",
"unlock": "<:unlock:817168258825846815>",
"letter": "<:letter:816520981396193280>",
"num0": "<:num0:827219939583721513>",
"num1": "<:num1:827219939961602098>",
"num2": "<:num2:827219940045226075>",
"num3": "<:num3:827219940541071360>",
"num4": "<:num4:827219940556931093>",
"num5": "<:num5:827219941253709835>",
"num6": "<:num6:827219941790580766>",
"num7": "<:num7:827219942343442502>",
"num8": "<:num8:827219942444236810>",
"num9": "<:num9:827219942758809610>",
"stop": "<:stop:827257105420910652>",
"stopsign": "<:stopsign:841848010690658335>",
"clock": "<:clock:839640961755643915>",
"alarm": "<:alarm:839640804246683648>",
"stopwatch": "<:stopwatch:827075158967189544>",
"log": "<:log:835203679388303400>",
"db": "<:database:839574200506646608>",
"privacy": "<:privacy:839574405541134346>",
"delete": "<:deletedata:839587782091735040>",
"heart": "<:heart:839354647546298399>",
"graph": "<:graph:840046538340040765>",
"upload": "<:upload:840086768497983498>",
"download": "<:download:840086726209961984>",
"right": "<:right:840289355057725520>",
"kick": "<:kick:840490315893702667>", # So its a she 💞
"ban": "<:ban:840474680547606548>",
"robot": "<:robot:840482243218767892>",
"plus": "<:plus:840485455333294080>",
"minus": "<:minus:840485608555020308>",
"undo": "<:undo:840486528110166056>",
"redo": "<:redo:840486303354322962>",
"audioadd": "<:audioadd:840491464928002048>",
"audioremove": "<:audioremove:840491410720948235>",
"pin": "<:pin:840492943226961941>",
"pass": "<:pass:840817730277867541>",
"fail": "<:fail:840817815148953600>",
"snowflake": "<:snowflake:841848061412376596>",
"candy": "<:purplecandy:844724185842712576>",
"cupcake": "<:purplecupcake:844688309195374602>"
} | 0.213869 | 0.162015 |
#Below function is inspired from the REVC-Complementing-a-Strand-of-DNA.py
def take_reverse_complement(string):
#Take the reverse
reversed_string = string[::-1]
#Create a dictionary
complement_dict = {'A':'T','T':'A','G':'C','C':'G'}
#Take the compelement of the reverse
complement_reversed_string = ""
for base in reversed_string:
complement_reversed_string += complement_dict[base]
return complement_reversed_string
stop_codons = ['UAG','UGA','UAA']
rna_to_aminoacid_dictionary = {'UUU' : 'F', "UUC": 'F', 'UUA' : 'L', 'UUG' : 'L', 'UCU' : 'S' , 'UCA' :'S', 'UCC' : 'S', 'UCG' : 'S', 'UAU' : 'Y', 'UAC': 'Y', 'UAA': 'STOP' , 'UAG': 'STOP', 'UGU' : 'C', 'UGC': 'C', 'UGA' : 'STOP', 'UGG': 'W', 'CUU' : 'L', 'CUC' : 'L', 'CUA': 'L', 'CUG': 'L', 'CCU': 'P', 'CCC' : 'P', 'CCA' : 'P', 'CCG' : 'P', 'CAU' : 'H', 'CAC':'H', 'CAA':'Q','CAG':'Q','CGU': 'R','CGC':'R','CGA':'R','CGG':'R','AUU':'I','AUC':'I','AUA':'I','AUG':'M','ACU':'T','ACC':'T','ACA':'T','ACG':'T','AAU':'N','AAC':'N','AAA':'K','AAG':'K','AGU':'S','AGC':'S','AGA':'R','AGG':'R','GUU':'V','GUC':'V','GUA':'V','GUG':'V','GCU':'A','GCC':'A','GCA':'A','GCG':'A','GAU':'D','GAC':'D','GAA':'E','GAG':'E','GGU':'G','GGC':'G','GGA':'G','GGG':'G'}
fasta_formated_input = ">Rosalind_8160\nTGTTGATCTCACCCGCTAGGCACGCTAGGTATATAACCCGCAATATGCCGCGCCGAACTCCTTTTGTGGATTCTAAGGAAGAGTGCACGCACCGACTCCCAATCGGGTGCGGGATTCGTGTCGTCCTTGCGTAAGCCTGGGGTTAGTTATCATGACGGATCCCGGGTCTAATCTCCCTTGCTACATGAAGCTCCCCTACCTCAGGGTCCAGCCATAACGGCAAGACGCGTGCTAAGCGTACAGAAGATCTATGTCCATAGTAGACACTCGCACCATAGCCTGGATGACCGCTCTTGGAACCGGTCCGTGCCCCTATGCGCATGATGTCCCGAGCGGGTGATTTGCGTAACCCACTTACGTGGATGAGACAAATTGTAAATGCCTGTGATCGGCCACAAAAGAGTTTCGGAATGAATGAATTCGTCAACGGTGCGAGTGCGGCGTAGCTACGCCGCACTCGCACCGTTGACGAATTCATTCATAAGAGTACATAACCTACTGTATACAAACGCCGCATGAACGTAGGCTTTATGACAAAGTGTCTTTGGCGTGTAACGTTAACTGTAAACTGATTATCCTGCGAGGTTCATTTCTTCTTAGGGCAAGGGGATTACCCTCTCCCGAACCGCATGATAGTCAATGCATGGTTATTGTGTATGAGTGTTCCTTGCGATGCTGTCCACGTTCCAACCTCAAATGTATTAGGTTCATAAAGTTGTTTTGGCCTTTGCGTCGGGAACACAAAGGCGTCGTGGACGCATTGAAGTTTAAGCCTTTGGAGACGAAGATACTTGCGGCCGGCATAGACGGCATGTACGGGTCGCGAGAATGGGATTAGCCAGTGGTTAGACCTCCAGAGTTAAGAAGGGCTTTTACTCCAAGGTTTTTTGTG"
#Coding DNA Sequence is provided
dna_sequence = fasta_formated_input.split("\n")[1]
open_frame_array = []
#Two loops: one for direct one for inverse direction
for reverse_or_direct_index in range(2):
#Second loop will be dedicated for reverse direction
if(reverse_or_direct_index == 1):
dna_sequence = take_reverse_complement(dna_sequence)
#Coding DNA is turned into mRNA
mrna_sequence = dna_sequence.replace("T","U")
length = len(mrna_sequence)
#Three possible reading frame will be checked
for j in range(3):
last_codon_end = length
#Very first loop is the standart one
#In second and third loops, end of the last codon will differ
#Refer this http://www.cs.wustl.edu/~cytron/101Pages/f13/Modules/3/Extensions/frame.jpg
if(j != 0):
last_codon_end -= (3 - j)
result = ""
start_reached = False
#Reading frames start with 'j' end with last_codon_end
for i in range(j, last_codon_end, 3):
#Codon is extracted
codon = mrna_sequence[i: i + 3]
#If we already encountered with a start codon
if start_reached:
#If we see a stop codon than we created a open reading frame
if codon in stop_codons:
#Stop reached
start_reached = False
open_frame_array.append(result)
result = ""
#Otherwise keep on creating a sequence
else:
result += codon
#If start codon not yet seen, look for it
else:
if codon == "AUG":
start_reached = True
result += codon
#Get rid of duplicates
open_frame_array = list(set(open_frame_array))
#Amino acid sequences will be hold in this list
open_frame_protein_array = []
#mRNA sequence is translated into aminoacid sequence
for seq in open_frame_array:
result = ""
for i in range(0, len(seq), 3):
result += rna_to_aminoacid_dictionary[seq[i: i + 3]]
open_frame_protein_array.append(result)
#Multiple start codon situation is handled
for seq in open_frame_protein_array:
for i in range(1, len(seq)):
if seq[i] == "M":
open_frame_protein_array.append(seq[i])
#Get rid of duplicates
open_frame_protein_array = list(set(open_frame_protein_array))
#Let's print those out
for protein in open_frame_protein_array:
print(protein) | bioinformatics-stronghold/ORF-Open-Reading-Frames.py | #Below function is inspired from the REVC-Complementing-a-Strand-of-DNA.py
def take_reverse_complement(string):
#Take the reverse
reversed_string = string[::-1]
#Create a dictionary
complement_dict = {'A':'T','T':'A','G':'C','C':'G'}
#Take the compelement of the reverse
complement_reversed_string = ""
for base in reversed_string:
complement_reversed_string += complement_dict[base]
return complement_reversed_string
stop_codons = ['UAG','UGA','UAA']
rna_to_aminoacid_dictionary = {'UUU' : 'F', "UUC": 'F', 'UUA' : 'L', 'UUG' : 'L', 'UCU' : 'S' , 'UCA' :'S', 'UCC' : 'S', 'UCG' : 'S', 'UAU' : 'Y', 'UAC': 'Y', 'UAA': 'STOP' , 'UAG': 'STOP', 'UGU' : 'C', 'UGC': 'C', 'UGA' : 'STOP', 'UGG': 'W', 'CUU' : 'L', 'CUC' : 'L', 'CUA': 'L', 'CUG': 'L', 'CCU': 'P', 'CCC' : 'P', 'CCA' : 'P', 'CCG' : 'P', 'CAU' : 'H', 'CAC':'H', 'CAA':'Q','CAG':'Q','CGU': 'R','CGC':'R','CGA':'R','CGG':'R','AUU':'I','AUC':'I','AUA':'I','AUG':'M','ACU':'T','ACC':'T','ACA':'T','ACG':'T','AAU':'N','AAC':'N','AAA':'K','AAG':'K','AGU':'S','AGC':'S','AGA':'R','AGG':'R','GUU':'V','GUC':'V','GUA':'V','GUG':'V','GCU':'A','GCC':'A','GCA':'A','GCG':'A','GAU':'D','GAC':'D','GAA':'E','GAG':'E','GGU':'G','GGC':'G','GGA':'G','GGG':'G'}
fasta_formated_input = ">Rosalind_8160\nTGTTGATCTCACCCGCTAGGCACGCTAGGTATATAACCCGCAATATGCCGCGCCGAACTCCTTTTGTGGATTCTAAGGAAGAGTGCACGCACCGACTCCCAATCGGGTGCGGGATTCGTGTCGTCCTTGCGTAAGCCTGGGGTTAGTTATCATGACGGATCCCGGGTCTAATCTCCCTTGCTACATGAAGCTCCCCTACCTCAGGGTCCAGCCATAACGGCAAGACGCGTGCTAAGCGTACAGAAGATCTATGTCCATAGTAGACACTCGCACCATAGCCTGGATGACCGCTCTTGGAACCGGTCCGTGCCCCTATGCGCATGATGTCCCGAGCGGGTGATTTGCGTAACCCACTTACGTGGATGAGACAAATTGTAAATGCCTGTGATCGGCCACAAAAGAGTTTCGGAATGAATGAATTCGTCAACGGTGCGAGTGCGGCGTAGCTACGCCGCACTCGCACCGTTGACGAATTCATTCATAAGAGTACATAACCTACTGTATACAAACGCCGCATGAACGTAGGCTTTATGACAAAGTGTCTTTGGCGTGTAACGTTAACTGTAAACTGATTATCCTGCGAGGTTCATTTCTTCTTAGGGCAAGGGGATTACCCTCTCCCGAACCGCATGATAGTCAATGCATGGTTATTGTGTATGAGTGTTCCTTGCGATGCTGTCCACGTTCCAACCTCAAATGTATTAGGTTCATAAAGTTGTTTTGGCCTTTGCGTCGGGAACACAAAGGCGTCGTGGACGCATTGAAGTTTAAGCCTTTGGAGACGAAGATACTTGCGGCCGGCATAGACGGCATGTACGGGTCGCGAGAATGGGATTAGCCAGTGGTTAGACCTCCAGAGTTAAGAAGGGCTTTTACTCCAAGGTTTTTTGTG"
#Coding DNA Sequence is provided
dna_sequence = fasta_formated_input.split("\n")[1]
open_frame_array = []
#Two loops: one for direct one for inverse direction
for reverse_or_direct_index in range(2):
#Second loop will be dedicated for reverse direction
if(reverse_or_direct_index == 1):
dna_sequence = take_reverse_complement(dna_sequence)
#Coding DNA is turned into mRNA
mrna_sequence = dna_sequence.replace("T","U")
length = len(mrna_sequence)
#Three possible reading frame will be checked
for j in range(3):
last_codon_end = length
#Very first loop is the standart one
#In second and third loops, end of the last codon will differ
#Refer this http://www.cs.wustl.edu/~cytron/101Pages/f13/Modules/3/Extensions/frame.jpg
if(j != 0):
last_codon_end -= (3 - j)
result = ""
start_reached = False
#Reading frames start with 'j' end with last_codon_end
for i in range(j, last_codon_end, 3):
#Codon is extracted
codon = mrna_sequence[i: i + 3]
#If we already encountered with a start codon
if start_reached:
#If we see a stop codon than we created a open reading frame
if codon in stop_codons:
#Stop reached
start_reached = False
open_frame_array.append(result)
result = ""
#Otherwise keep on creating a sequence
else:
result += codon
#If start codon not yet seen, look for it
else:
if codon == "AUG":
start_reached = True
result += codon
#Get rid of duplicates
open_frame_array = list(set(open_frame_array))
#Amino acid sequences will be hold in this list
open_frame_protein_array = []
#mRNA sequence is translated into aminoacid sequence
for seq in open_frame_array:
result = ""
for i in range(0, len(seq), 3):
result += rna_to_aminoacid_dictionary[seq[i: i + 3]]
open_frame_protein_array.append(result)
#Multiple start codon situation is handled
for seq in open_frame_protein_array:
for i in range(1, len(seq)):
if seq[i] == "M":
open_frame_protein_array.append(seq[i])
#Get rid of duplicates
open_frame_protein_array = list(set(open_frame_protein_array))
#Let's print those out
for protein in open_frame_protein_array:
print(protein) | 0.437944 | 0.54819 |
import datetime
from typing import List
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import LoginUser, Base, Content, Client
engine = create_engine('sqlite:///pinet_screens.db?check_same_thread=False')
Base.metadata.bind = engine
DBParent = sessionmaker(bind=engine)
db_session = DBParent()
def create_user(username, hash, salt):
user = LoginUser(username=username, password_hash=hash, password_salt=salt)
db_session.add(user)
db_session.commit()
def get_login_user_from_username(username):
user = db_session.query(LoginUser).filter(LoginUser.username == username).first()
return user
def get_all_content():
content = db_session.query(Content).all()
return content
def get_all_browser_content():
content = db_session.query(Content).filter(Content.browser).all()
return content
def get_all_script_content():
content = db_session.query(Content).filter(Content.script).all()
return content
def create_content(content_name, browser=False, script=False, url=None, script_body=None):
if db_session.query(Content).filter(Content.content_name == content_name).first():
return False # Content already exists with this name
# TODO : Fix scripts being saved without newlines
new_content = Content(content_name=content_name, browser=browser, url=url, script=script, script_body=script_body)
db_session.add(new_content)
db_session.commit()
return True
def get_all_clients():
clients = db_session.query(Client).all()
return clients
def create_client(mac_address, hostname, location, client_id=None):
if not client_id and (db_session.query(Client).filter(Client.mac_address == mac_address).first() or db_session.query(Client).filter(Client.hostname == hostname).first()):
return False
if client_id:
client = db_session.query(Client).filter(Client.client_id == client_id).first()
else:
client = Client()
client.ldm_autologin = False
client.mac_address = mac_address
client.hostname = hostname
client.location = location
db_session.add(client)
db_session.commit()
return client.client_id
def update_client_content(client_id, content_id):
client = db_session.query(Client).filter(Client.client_id == int(client_id)).first()
client.content_id = content_id
db_session.commit()
def get_content_from_id(content_id):
content = db_session.query(Content).filter(Content.content_id == int(content_id)).first()
return content
def remove_content_from_id(content_id):
content = get_content_from_id(content_id)
db_session.delete(content)
db_session.commit()
def get_client_from_id(client_id):
client = db_session.query(Client).filter(Client.client_id == int(client_id)).first()
return client
def remove_client_from_id(client_id):
client = get_client_from_id(client_id)
db_session.delete(client)
db_session.commit()
def update_ldm_autologin(client_id, ldm_autologin):
client = get_client_from_id(client_id)
client.ldm_autologin = ldm_autologin
db_session.commit()
def get_login_user_from_id(user_id):
login_user = db_session.query(LoginUser).filter(LoginUser.user_id == int(user_id)).first()
return login_user
def get_all_users() -> List[LoginUser]:
users = db_session.query(LoginUser).all()
return users
def update_client_check_in(client_id):
client = get_client_from_id(client_id)
client.last_checked_in = datetime.datetime.now()
db_session.commit()
def remove_user(user_id):
db_session.query(LoginUser).filter(LoginUser.user_id == user_id).delete()
db_session.commit()
def change_password(user_id, password_hash, password_salt):
user = db_session.query(LoginUser).filter(LoginUser.user_id == user_id).first()
user.password_hash = <PASSWORD>
user.password_salt = <PASSWORD>
db_session.commit() | pinet_screens/database.py | import datetime
from typing import List
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import LoginUser, Base, Content, Client
engine = create_engine('sqlite:///pinet_screens.db?check_same_thread=False')
Base.metadata.bind = engine
DBParent = sessionmaker(bind=engine)
db_session = DBParent()
def create_user(username, hash, salt):
user = LoginUser(username=username, password_hash=hash, password_salt=salt)
db_session.add(user)
db_session.commit()
def get_login_user_from_username(username):
user = db_session.query(LoginUser).filter(LoginUser.username == username).first()
return user
def get_all_content():
content = db_session.query(Content).all()
return content
def get_all_browser_content():
content = db_session.query(Content).filter(Content.browser).all()
return content
def get_all_script_content():
content = db_session.query(Content).filter(Content.script).all()
return content
def create_content(content_name, browser=False, script=False, url=None, script_body=None):
if db_session.query(Content).filter(Content.content_name == content_name).first():
return False # Content already exists with this name
# TODO : Fix scripts being saved without newlines
new_content = Content(content_name=content_name, browser=browser, url=url, script=script, script_body=script_body)
db_session.add(new_content)
db_session.commit()
return True
def get_all_clients():
clients = db_session.query(Client).all()
return clients
def create_client(mac_address, hostname, location, client_id=None):
if not client_id and (db_session.query(Client).filter(Client.mac_address == mac_address).first() or db_session.query(Client).filter(Client.hostname == hostname).first()):
return False
if client_id:
client = db_session.query(Client).filter(Client.client_id == client_id).first()
else:
client = Client()
client.ldm_autologin = False
client.mac_address = mac_address
client.hostname = hostname
client.location = location
db_session.add(client)
db_session.commit()
return client.client_id
def update_client_content(client_id, content_id):
client = db_session.query(Client).filter(Client.client_id == int(client_id)).first()
client.content_id = content_id
db_session.commit()
def get_content_from_id(content_id):
content = db_session.query(Content).filter(Content.content_id == int(content_id)).first()
return content
def remove_content_from_id(content_id):
content = get_content_from_id(content_id)
db_session.delete(content)
db_session.commit()
def get_client_from_id(client_id):
client = db_session.query(Client).filter(Client.client_id == int(client_id)).first()
return client
def remove_client_from_id(client_id):
client = get_client_from_id(client_id)
db_session.delete(client)
db_session.commit()
def update_ldm_autologin(client_id, ldm_autologin):
client = get_client_from_id(client_id)
client.ldm_autologin = ldm_autologin
db_session.commit()
def get_login_user_from_id(user_id):
login_user = db_session.query(LoginUser).filter(LoginUser.user_id == int(user_id)).first()
return login_user
def get_all_users() -> List[LoginUser]:
users = db_session.query(LoginUser).all()
return users
def update_client_check_in(client_id):
client = get_client_from_id(client_id)
client.last_checked_in = datetime.datetime.now()
db_session.commit()
def remove_user(user_id):
db_session.query(LoginUser).filter(LoginUser.user_id == user_id).delete()
db_session.commit()
def change_password(user_id, password_hash, password_salt):
user = db_session.query(LoginUser).filter(LoginUser.user_id == user_id).first()
user.password_hash = <PASSWORD>
user.password_salt = <PASSWORD>
db_session.commit() | 0.282691 | 0.068913 |
from __future__ import annotations # allows using a class as typing inside the same class
from typing import List
def sort_by_name(name: str):
""" function needed to sort signal groups by name """
return len(name), name
class GreenYellowPhase:
def __init__(self, signalgroup_id: str, interval_index: int) -> None:
"""
Refers to the (interval_index + 1)th greenyellow interval of the signal group with id signalgroup_id
:param signalgroup_id:
:param interval_index:
"""
# explicit type conversion ensures correct types are used
self.signalgroup_id = str(signalgroup_id)
self.interval_index = int(interval_index)
def to_json(self) -> List:
"""get json-serializable structure that can be stored as json with json.dumps()"""
return [self.signalgroup_id, self.interval_index]
@staticmethod
def from_json(json_list: List) -> GreenYellowPhase:
"""Loading greenyellow phase from json (expected same json structure as generated with to_json)"""
return GreenYellowPhase(signalgroup_id=json_list[0], interval_index=json_list[1])
def __str__(self):
"""string representation of object"""
return f"(id={self.signalgroup_id}, index={self.interval_index})"
class Phase:
def __init__(self, greenyellow_phases: List[GreenYellowPhase]) -> None:
"""A phase represents a number of greenyellow intervals that (may) occur at the same time"""
self.greenyellow_phases = greenyellow_phases
self._validate()
def to_json(self) -> List[List]:
"""get json-serializable structure that can be stored as json with json.dumps()"""
return [greenyellow_phase.to_json() for greenyellow_phase in self.greenyellow_phases]
@staticmethod
def from_json(phase_list: List[List]) -> Phase:
"""Loading phase from json (expected same json structure as generated with to_json)"""
return Phase(greenyellow_phases=[GreenYellowPhase.from_json(greenyellow_phase)
for greenyellow_phase in phase_list])
def _validate(self):
""" Validate arguments of Phase object"""
error_message = "greenyellow_phases should be a list of GreenYellowPhase-objects"
if not isinstance(self.greenyellow_phases, list):
raise ValueError(error_message)
for greenyellow_phase in self.greenyellow_phases:
if not isinstance(greenyellow_phase, GreenYellowPhase):
raise ValueError(error_message)
def __str__(self) -> str:
"""string representation of object"""
string = "["
# visualize in sorted (by name) order
greenyellow_phases = sorted(self.greenyellow_phases,
key=lambda _greenyellow_phase: sort_by_name(_greenyellow_phase.signalgroup_id))
for index, greenyellow_phase in enumerate(greenyellow_phases):
if index > 0:
string += ", "
string += str(greenyellow_phase)
string += "]"
return string
class PhaseDiagram:
def __init__(self, phases: List[Phase]) -> None:
"""A phasediagram is a sequence of periodically repeating Phases; a phase diagram specifies the sequence in
which the signal groups receive a greenyellow interval. """
self.phases = phases
self._validate()
def to_json(self) -> List[List[List]]:
"""get json_serializable structure that can be stored as json with json.dumps()"""
return [phase.to_json() for phase in self.phases]
@staticmethod
def from_json(phase_lists: List[List[List]]) -> PhaseDiagram:
"""Loading phase diagram from json (expected same json structure as generated with to_json)"""
return PhaseDiagram(phases=[Phase.from_json(phase_list=phase_list) for phase_list in phase_lists])
def _validate(self):
""" Validate arguments of PhaseDiagram object"""
error_message = "phases should be a list of Phase-objects"
if not isinstance(self.phases, list):
raise ValueError(error_message)
for phase in self.phases:
if not isinstance(phase, Phase):
raise ValueError(error_message)
def __str__(self) -> str:
"""string representation of object"""
string = f"phase diagram:"
for phase in self.phases:
string += "\n"
string += f"\t{str(phase)}"
return string | swift_cloud_py/entities/control_output/phase_diagram.py | from __future__ import annotations # allows using a class as typing inside the same class
from typing import List
def sort_by_name(name: str):
""" function needed to sort signal groups by name """
return len(name), name
class GreenYellowPhase:
def __init__(self, signalgroup_id: str, interval_index: int) -> None:
"""
Refers to the (interval_index + 1)th greenyellow interval of the signal group with id signalgroup_id
:param signalgroup_id:
:param interval_index:
"""
# explicit type conversion ensures correct types are used
self.signalgroup_id = str(signalgroup_id)
self.interval_index = int(interval_index)
def to_json(self) -> List:
"""get json-serializable structure that can be stored as json with json.dumps()"""
return [self.signalgroup_id, self.interval_index]
@staticmethod
def from_json(json_list: List) -> GreenYellowPhase:
"""Loading greenyellow phase from json (expected same json structure as generated with to_json)"""
return GreenYellowPhase(signalgroup_id=json_list[0], interval_index=json_list[1])
def __str__(self):
"""string representation of object"""
return f"(id={self.signalgroup_id}, index={self.interval_index})"
class Phase:
def __init__(self, greenyellow_phases: List[GreenYellowPhase]) -> None:
"""A phase represents a number of greenyellow intervals that (may) occur at the same time"""
self.greenyellow_phases = greenyellow_phases
self._validate()
def to_json(self) -> List[List]:
"""get json-serializable structure that can be stored as json with json.dumps()"""
return [greenyellow_phase.to_json() for greenyellow_phase in self.greenyellow_phases]
@staticmethod
def from_json(phase_list: List[List]) -> Phase:
"""Loading phase from json (expected same json structure as generated with to_json)"""
return Phase(greenyellow_phases=[GreenYellowPhase.from_json(greenyellow_phase)
for greenyellow_phase in phase_list])
def _validate(self):
""" Validate arguments of Phase object"""
error_message = "greenyellow_phases should be a list of GreenYellowPhase-objects"
if not isinstance(self.greenyellow_phases, list):
raise ValueError(error_message)
for greenyellow_phase in self.greenyellow_phases:
if not isinstance(greenyellow_phase, GreenYellowPhase):
raise ValueError(error_message)
def __str__(self) -> str:
"""string representation of object"""
string = "["
# visualize in sorted (by name) order
greenyellow_phases = sorted(self.greenyellow_phases,
key=lambda _greenyellow_phase: sort_by_name(_greenyellow_phase.signalgroup_id))
for index, greenyellow_phase in enumerate(greenyellow_phases):
if index > 0:
string += ", "
string += str(greenyellow_phase)
string += "]"
return string
class PhaseDiagram:
def __init__(self, phases: List[Phase]) -> None:
"""A phasediagram is a sequence of periodically repeating Phases; a phase diagram specifies the sequence in
which the signal groups receive a greenyellow interval. """
self.phases = phases
self._validate()
def to_json(self) -> List[List[List]]:
"""get json_serializable structure that can be stored as json with json.dumps()"""
return [phase.to_json() for phase in self.phases]
@staticmethod
def from_json(phase_lists: List[List[List]]) -> PhaseDiagram:
"""Loading phase diagram from json (expected same json structure as generated with to_json)"""
return PhaseDiagram(phases=[Phase.from_json(phase_list=phase_list) for phase_list in phase_lists])
def _validate(self):
""" Validate arguments of PhaseDiagram object"""
error_message = "phases should be a list of Phase-objects"
if not isinstance(self.phases, list):
raise ValueError(error_message)
for phase in self.phases:
if not isinstance(phase, Phase):
raise ValueError(error_message)
def __str__(self) -> str:
"""string representation of object"""
string = f"phase diagram:"
for phase in self.phases:
string += "\n"
string += f"\t{str(phase)}"
return string | 0.948799 | 0.518729 |
# Test whether the broker reduces the message expiry interval when republishing
# a retained message, and eventually removes it.
# MQTT v5
# Helper publishes a message, with a medium length expiry with retained set. It
# publishes a second message with retained set but no expiry.
# Client connects, subscribes, gets messages, disconnects.
# We wait until the expiry will have expired.
# Client connects, subscribes, doesn't get expired message, does get
# non-expired message.
from mosq_test_helper import *
def do_test():
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub", keepalive=keepalive, proto_ver=5)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=5)
mid = 1
subscribe1_packet = mosq_test.gen_subscribe(mid, "subpub/expired", 1, proto_ver=5)
suback1_packet = mosq_test.gen_suback(mid, 1, proto_ver=5)
mid = 2
subscribe2_packet = mosq_test.gen_subscribe(mid, "subpub/kept", 1, proto_ver=5)
suback2_packet = mosq_test.gen_suback(mid, 1, proto_ver=5)
helper_connect = mosq_test.gen_connect("helper", proto_ver=5)
helper_connack = mosq_test.gen_connack(rc=0, proto_ver=5)
mid=1
props = mqtt5_props.gen_uint32_prop(mqtt5_props.PROP_MESSAGE_EXPIRY_INTERVAL, 4)
publish1_packet = mosq_test.gen_publish("subpub/expired", mid=mid, qos=1, retain=True, payload="message1", proto_ver=5, properties=props)
puback1_packet = mosq_test.gen_puback(mid, proto_ver=5, reason_code=mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
mid=2
publish2s_packet = mosq_test.gen_publish("subpub/kept", mid=mid, qos=1, retain=True, payload="message2", proto_ver=5)
puback2s_packet = mosq_test.gen_puback(mid, proto_ver=5, reason_code=mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
mid=1
publish2r_packet = mosq_test.gen_publish("subpub/kept", mid=mid, qos=1, retain=True, payload="message2", proto_ver=5)
puback2r_packet = mosq_test.gen_puback(mid, proto_ver=5, reason_code=mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
helper = mosq_test.do_client_connect(helper_connect, helper_connack, timeout=20, port=port)
mosq_test.do_send_receive(helper, publish1_packet, puback1_packet, "puback 1")
mosq_test.do_send_receive(helper, publish2s_packet, puback2s_packet, "puback 2")
helper.close()
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe1_packet, suback1_packet, "suback 1-1")
mosq_test.expect_packet(sock, "publish 1", publish1_packet)
sock.send(puback1_packet)
mosq_test.do_send_receive(sock, subscribe2_packet, suback2_packet, "suback 2-1")
mosq_test.expect_packet(sock, "publish 2", publish2s_packet)
sock.send(puback2s_packet)
sock.close()
time.sleep(5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe1_packet, suback1_packet, "suback 1-2")
# We shouldn't receive a publish here
# This will fail if we do receive a publish
mosq_test.do_send_receive(sock, subscribe2_packet, suback2_packet, "suback 2-2")
mosq_test.expect_packet(sock, "publish 2", publish2r_packet)
sock.send(puback2r_packet)
sock.close()
rc = 0
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
print("proto_ver=%d" % (proto_ver))
exit(rc)
do_test()
exit(0) | eclipse-mosquitto/test/broker/02-subpub-qos1-message-expiry-retain.py |
# Test whether the broker reduces the message expiry interval when republishing
# a retained message, and eventually removes it.
# MQTT v5
# Helper publishes a message, with a medium length expiry with retained set. It
# publishes a second message with retained set but no expiry.
# Client connects, subscribes, gets messages, disconnects.
# We wait until the expiry will have expired.
# Client connects, subscribes, doesn't get expired message, does get
# non-expired message.
from mosq_test_helper import *
def do_test():
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub", keepalive=keepalive, proto_ver=5)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=5)
mid = 1
subscribe1_packet = mosq_test.gen_subscribe(mid, "subpub/expired", 1, proto_ver=5)
suback1_packet = mosq_test.gen_suback(mid, 1, proto_ver=5)
mid = 2
subscribe2_packet = mosq_test.gen_subscribe(mid, "subpub/kept", 1, proto_ver=5)
suback2_packet = mosq_test.gen_suback(mid, 1, proto_ver=5)
helper_connect = mosq_test.gen_connect("helper", proto_ver=5)
helper_connack = mosq_test.gen_connack(rc=0, proto_ver=5)
mid=1
props = mqtt5_props.gen_uint32_prop(mqtt5_props.PROP_MESSAGE_EXPIRY_INTERVAL, 4)
publish1_packet = mosq_test.gen_publish("subpub/expired", mid=mid, qos=1, retain=True, payload="message1", proto_ver=5, properties=props)
puback1_packet = mosq_test.gen_puback(mid, proto_ver=5, reason_code=mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
mid=2
publish2s_packet = mosq_test.gen_publish("subpub/kept", mid=mid, qos=1, retain=True, payload="message2", proto_ver=5)
puback2s_packet = mosq_test.gen_puback(mid, proto_ver=5, reason_code=mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
mid=1
publish2r_packet = mosq_test.gen_publish("subpub/kept", mid=mid, qos=1, retain=True, payload="message2", proto_ver=5)
puback2r_packet = mosq_test.gen_puback(mid, proto_ver=5, reason_code=mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
helper = mosq_test.do_client_connect(helper_connect, helper_connack, timeout=20, port=port)
mosq_test.do_send_receive(helper, publish1_packet, puback1_packet, "puback 1")
mosq_test.do_send_receive(helper, publish2s_packet, puback2s_packet, "puback 2")
helper.close()
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe1_packet, suback1_packet, "suback 1-1")
mosq_test.expect_packet(sock, "publish 1", publish1_packet)
sock.send(puback1_packet)
mosq_test.do_send_receive(sock, subscribe2_packet, suback2_packet, "suback 2-1")
mosq_test.expect_packet(sock, "publish 2", publish2s_packet)
sock.send(puback2s_packet)
sock.close()
time.sleep(5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe1_packet, suback1_packet, "suback 1-2")
# We shouldn't receive a publish here
# This will fail if we do receive a publish
mosq_test.do_send_receive(sock, subscribe2_packet, suback2_packet, "suback 2-2")
mosq_test.expect_packet(sock, "publish 2", publish2r_packet)
sock.send(puback2r_packet)
sock.close()
rc = 0
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
print("proto_ver=%d" % (proto_ver))
exit(rc)
do_test()
exit(0) | 0.474875 | 0.364976 |
import blackbook.database
from flask import current_app
from flask.views import MethodView
__all__ = ['basecollection', 'errors']
__author__ = 'ievans3024'
API_URI_PREFIX = current_app.config.get('API_ROOT') or '/api'
class APIType(object):
"""Descriptor for properties that need to a class or a subclass of such."""
def __init__(self, cls):
if isinstance(cls, type):
self.cls = cls
else:
raise TypeError("Parameter 'cls' must be a class.")
def __get__(self, instance, owner):
if instance is None:
return self
else:
if self.get_own_name(owner) in instance.__dict__.keys():
return instance.__dict__.get(self.get_own_name(owner))
else:
raise AttributeError(
"'{cls}' object has no attribute '{name}'".format(
cls=owner.__name__,
name=self.get_own_name(owner)
)
)
def __set__(self, instance, value):
if instance:
if not ((value is self.cls) or (issubclass(value, self.cls))):
raise ValueError(
"Value must be {cls} or a subclass of it.".format(
cls=".".join([self.cls.__module__, self.cls.__name__])
)
)
instance.__dict__[self.get_own_name(type(instance))] = value
def __delete__(self, instance):
if instance:
del instance.__dict__[self.get_own_name(type(instance))]
def get_own_name(self, owner):
for attr in dir(owner):
if getattr(owner, attr) is self:
return attr
class APIField(APIType):
"""Descriptor for properties that need to be an instance of a class or subclass of such."""
def __set__(self, instance, value):
if not isinstance(value, self.cls):
raise TypeError(
"Value must be an instance of {cls} or one of its subclasses.".format(
cls=".".join([self.cls.__module__, self.cls.__name__])
)
)
instance.__dict__[self.get_own_name(type(instance))] = value
class API(MethodView):
"""Abstract Base Class for API Method Views"""
db = APIField(object)
model = APIType(blackbook.database.Model)
def __init__(self, db, model):
"""
Constructor
:param db: The couch database to draw data from.
:param model: The couch document class to represent data with.
:return:
"""
super(API, self).__init__()
self.db = db
self.model = model
def _generate_document(self, *args, href='/', **kwargs):
"""
Generate a document
Implementations should return a collection+json document object.
"""
raise NotImplementedError()
def _get_authenticated_user(self, user_api, session_api):
raise NotImplementedError()
def delete(self, *args, **kwargs):
raise NotImplementedError()
def get(self, *args, **kwargs):
raise NotImplementedError()
def head(self, *args, **kwargs):
raise NotImplementedError()
def options(self, *args, **kwargs):
raise NotImplementedError()
def patch(self, *args, **kwargs):
raise NotImplementedError()
def post(self, *args, **kwargs):
raise NotImplementedError()
def put(self, *args, **kwargs):
raise NotImplementedError()
def search(self, *args, **kwargs):
raise NotImplementedError() | blackbook/api/__init__.py | import blackbook.database
from flask import current_app
from flask.views import MethodView
__all__ = ['basecollection', 'errors']
__author__ = 'ievans3024'
API_URI_PREFIX = current_app.config.get('API_ROOT') or '/api'
class APIType(object):
"""Descriptor for properties that need to a class or a subclass of such."""
def __init__(self, cls):
if isinstance(cls, type):
self.cls = cls
else:
raise TypeError("Parameter 'cls' must be a class.")
def __get__(self, instance, owner):
if instance is None:
return self
else:
if self.get_own_name(owner) in instance.__dict__.keys():
return instance.__dict__.get(self.get_own_name(owner))
else:
raise AttributeError(
"'{cls}' object has no attribute '{name}'".format(
cls=owner.__name__,
name=self.get_own_name(owner)
)
)
def __set__(self, instance, value):
if instance:
if not ((value is self.cls) or (issubclass(value, self.cls))):
raise ValueError(
"Value must be {cls} or a subclass of it.".format(
cls=".".join([self.cls.__module__, self.cls.__name__])
)
)
instance.__dict__[self.get_own_name(type(instance))] = value
def __delete__(self, instance):
if instance:
del instance.__dict__[self.get_own_name(type(instance))]
def get_own_name(self, owner):
for attr in dir(owner):
if getattr(owner, attr) is self:
return attr
class APIField(APIType):
"""Descriptor for properties that need to be an instance of a class or subclass of such."""
def __set__(self, instance, value):
if not isinstance(value, self.cls):
raise TypeError(
"Value must be an instance of {cls} or one of its subclasses.".format(
cls=".".join([self.cls.__module__, self.cls.__name__])
)
)
instance.__dict__[self.get_own_name(type(instance))] = value
class API(MethodView):
"""Abstract Base Class for API Method Views"""
db = APIField(object)
model = APIType(blackbook.database.Model)
def __init__(self, db, model):
"""
Constructor
:param db: The couch database to draw data from.
:param model: The couch document class to represent data with.
:return:
"""
super(API, self).__init__()
self.db = db
self.model = model
def _generate_document(self, *args, href='/', **kwargs):
"""
Generate a document
Implementations should return a collection+json document object.
"""
raise NotImplementedError()
def _get_authenticated_user(self, user_api, session_api):
raise NotImplementedError()
def delete(self, *args, **kwargs):
raise NotImplementedError()
def get(self, *args, **kwargs):
raise NotImplementedError()
def head(self, *args, **kwargs):
raise NotImplementedError()
def options(self, *args, **kwargs):
raise NotImplementedError()
def patch(self, *args, **kwargs):
raise NotImplementedError()
def post(self, *args, **kwargs):
raise NotImplementedError()
def put(self, *args, **kwargs):
raise NotImplementedError()
def search(self, *args, **kwargs):
raise NotImplementedError() | 0.693369 | 0.067701 |
import datetime
import os
import random
import string
import tempfile
import typing as t
import unittest
from dataclasses import dataclass
from freezegun import freeze_time
from hmalib.common.models.pipeline import HashRecord
from hmalib.common.timebucketizer import CSViable, TimeBucketizer
@dataclass(eq=True)
class SampleCSViableClass(CSViable):
"""
Example class used for testing purposes.
"""
def __init__(self):
self.a = "a"
self.b = "b"
def to_csv(self):
return [self.a, self.b]
@classmethod
def from_csv(cls, value: t.List[str]):
return SampleCSViableClass()
class TestTimeBuckets(unittest.TestCase):
def get_file_count(self, directory_path):
file_count = 0
for _, _, files in os.walk(directory_path):
file_count += len(files)
return file_count
def test_correct_file_content(self):
with tempfile.TemporaryDirectory() as td:
initial_datetime = datetime.datetime(
year=2012, month=8, day=13, hour=14, minute=4
)
other_datetime = datetime.datetime(
year=2012, month=8, day=13, hour=14, minute=5
)
with freeze_time(initial_datetime) as frozen_datetime:
sample = TimeBucketizer(
datetime.timedelta(minutes=1), td, "hasher", "2"
)
sample.add_record(SampleCSViableClass())
sample.add_record(SampleCSViableClass())
frozen_datetime.move_to(other_datetime)
sample.add_record(SampleCSViableClass())
fileContent = sample.get_records(
initial_datetime,
other_datetime,
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 2
self.assertEqual(fileContent, to_compare, "File content does not match")
def test_multiple_files_and_content(self):
with tempfile.TemporaryDirectory() as td:
initial_datetime = datetime.datetime(
year=2012, month=8, day=13, hour=14, minute=4
)
with freeze_time(initial_datetime) as frozen_datetime:
sample = TimeBucketizer(
datetime.timedelta(minutes=1), td, "hasher", "3"
)
for _ in range(5):
for _ in range(3):
sample.add_record(SampleCSViableClass())
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
sample.add_record(SampleCSViableClass())
fileContent = sample.get_records(
initial_datetime,
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 5 * 3
self.assertEqual(fileContent, to_compare, "Invalid data")
@freeze_time("2012-08-13 14:04:00")
def test_buffer_overload(self):
with tempfile.TemporaryDirectory() as td:
sample = TimeBucketizer(datetime.timedelta(minutes=1), td, "hasher", "4")
for _ in range(3201):
sample.add_record(SampleCSViableClass())
fileContent = sample.get_records(
datetime.datetime.now(),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 3200
self.assertEqual(
fileContent,
to_compare,
"Buffer overload, did not write the file and reset the buffer.",
)
def test_force_flush(self):
with tempfile.TemporaryDirectory() as td:
sample = TimeBucketizer(datetime.timedelta(minutes=1), td, "hasher", "4")
for _ in range(5):
sample.add_record(SampleCSViableClass())
sample.force_flush()
fileContent = sample.get_records(
datetime.datetime.now(),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 5
self.assertEqual(
fileContent,
to_compare,
"Destroy method did not flush the remaining files stored in the buffer",
)
def test_destroy_empty_buffer(self):
with tempfile.TemporaryDirectory() as td:
sample = TimeBucketizer(datetime.timedelta(minutes=1), td, "hasher", "4")
sample.force_flush()
fileContent = sample.get_records(
datetime.datetime.now(),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
self.assertEqual(
fileContent,
[],
"Destroy method should not have executed as the buffer is empty",
)
@freeze_time("2012-08-13 14:04:00")
def test_squash_content(self):
with tempfile.TemporaryDirectory() as td:
with freeze_time(datetime.datetime.now()) as frozen_datetime:
VALUE_1 = 5
VALUE_2 = 10
VALUE_3 = 3
expected_records = []
for i in range(VALUE_1):
for i in range(VALUE_2):
sample = TimeBucketizer(
datetime.timedelta(minutes=1), td, "hasher", str(i)
)
for i in range(VALUE_3):
content = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
new_record = HashRecord(content, str(i))
sample.add_record(new_record)
expected_records.append(new_record)
sample.force_flush()
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
file_count_prev = self.get_file_count(td)
TimeBucketizer.squash_content(
"hasher",
td,
datetime.timedelta(minutes=1),
datetime.datetime.now() - datetime.timedelta(days=1),
datetime.datetime.now() - datetime.timedelta(minutes=2),
)
records = TimeBucketizer.get_records(
datetime.datetime.now() - datetime.timedelta(minutes=10),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
HashRecord,
)
now = datetime.datetime(2012, 8, 13, 14, 4, 0)
file_count = self.get_file_count(td)
self.assertEqual(len(records), VALUE_1 * VALUE_2 * VALUE_3)
self.assertCountEqual(records, expected_records)
self.assertEqual(file_count_prev, VALUE_1 * VALUE_2)
self.assertEqual(file_count, VALUE_1) | hasher-matcher-actioner/hmalib/common/tests/test_timebucket.py | import datetime
import os
import random
import string
import tempfile
import typing as t
import unittest
from dataclasses import dataclass
from freezegun import freeze_time
from hmalib.common.models.pipeline import HashRecord
from hmalib.common.timebucketizer import CSViable, TimeBucketizer
@dataclass(eq=True)
class SampleCSViableClass(CSViable):
"""
Example class used for testing purposes.
"""
def __init__(self):
self.a = "a"
self.b = "b"
def to_csv(self):
return [self.a, self.b]
@classmethod
def from_csv(cls, value: t.List[str]):
return SampleCSViableClass()
class TestTimeBuckets(unittest.TestCase):
def get_file_count(self, directory_path):
file_count = 0
for _, _, files in os.walk(directory_path):
file_count += len(files)
return file_count
def test_correct_file_content(self):
with tempfile.TemporaryDirectory() as td:
initial_datetime = datetime.datetime(
year=2012, month=8, day=13, hour=14, minute=4
)
other_datetime = datetime.datetime(
year=2012, month=8, day=13, hour=14, minute=5
)
with freeze_time(initial_datetime) as frozen_datetime:
sample = TimeBucketizer(
datetime.timedelta(minutes=1), td, "hasher", "2"
)
sample.add_record(SampleCSViableClass())
sample.add_record(SampleCSViableClass())
frozen_datetime.move_to(other_datetime)
sample.add_record(SampleCSViableClass())
fileContent = sample.get_records(
initial_datetime,
other_datetime,
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 2
self.assertEqual(fileContent, to_compare, "File content does not match")
def test_multiple_files_and_content(self):
with tempfile.TemporaryDirectory() as td:
initial_datetime = datetime.datetime(
year=2012, month=8, day=13, hour=14, minute=4
)
with freeze_time(initial_datetime) as frozen_datetime:
sample = TimeBucketizer(
datetime.timedelta(minutes=1), td, "hasher", "3"
)
for _ in range(5):
for _ in range(3):
sample.add_record(SampleCSViableClass())
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
sample.add_record(SampleCSViableClass())
fileContent = sample.get_records(
initial_datetime,
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 5 * 3
self.assertEqual(fileContent, to_compare, "Invalid data")
@freeze_time("2012-08-13 14:04:00")
def test_buffer_overload(self):
with tempfile.TemporaryDirectory() as td:
sample = TimeBucketizer(datetime.timedelta(minutes=1), td, "hasher", "4")
for _ in range(3201):
sample.add_record(SampleCSViableClass())
fileContent = sample.get_records(
datetime.datetime.now(),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 3200
self.assertEqual(
fileContent,
to_compare,
"Buffer overload, did not write the file and reset the buffer.",
)
def test_force_flush(self):
with tempfile.TemporaryDirectory() as td:
sample = TimeBucketizer(datetime.timedelta(minutes=1), td, "hasher", "4")
for _ in range(5):
sample.add_record(SampleCSViableClass())
sample.force_flush()
fileContent = sample.get_records(
datetime.datetime.now(),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
to_compare = [SampleCSViableClass()] * 5
self.assertEqual(
fileContent,
to_compare,
"Destroy method did not flush the remaining files stored in the buffer",
)
def test_destroy_empty_buffer(self):
with tempfile.TemporaryDirectory() as td:
sample = TimeBucketizer(datetime.timedelta(minutes=1), td, "hasher", "4")
sample.force_flush()
fileContent = sample.get_records(
datetime.datetime.now(),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
SampleCSViableClass,
)
self.assertEqual(
fileContent,
[],
"Destroy method should not have executed as the buffer is empty",
)
@freeze_time("2012-08-13 14:04:00")
def test_squash_content(self):
with tempfile.TemporaryDirectory() as td:
with freeze_time(datetime.datetime.now()) as frozen_datetime:
VALUE_1 = 5
VALUE_2 = 10
VALUE_3 = 3
expected_records = []
for i in range(VALUE_1):
for i in range(VALUE_2):
sample = TimeBucketizer(
datetime.timedelta(minutes=1), td, "hasher", str(i)
)
for i in range(VALUE_3):
content = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
new_record = HashRecord(content, str(i))
sample.add_record(new_record)
expected_records.append(new_record)
sample.force_flush()
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
file_count_prev = self.get_file_count(td)
TimeBucketizer.squash_content(
"hasher",
td,
datetime.timedelta(minutes=1),
datetime.datetime.now() - datetime.timedelta(days=1),
datetime.datetime.now() - datetime.timedelta(minutes=2),
)
records = TimeBucketizer.get_records(
datetime.datetime.now() - datetime.timedelta(minutes=10),
datetime.datetime.now(),
"hasher",
td,
datetime.timedelta(minutes=1),
HashRecord,
)
now = datetime.datetime(2012, 8, 13, 14, 4, 0)
file_count = self.get_file_count(td)
self.assertEqual(len(records), VALUE_1 * VALUE_2 * VALUE_3)
self.assertCountEqual(records, expected_records)
self.assertEqual(file_count_prev, VALUE_1 * VALUE_2)
self.assertEqual(file_count, VALUE_1) | 0.629319 | 0.230801 |
import argparse
import os
from pathlib import Path
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
import torch
from torch.utils.data import DataLoader
from package.data.tokenizers import RelationshipTokenizer
from package.data.label_encoders import LabelEncoder
from package.data.semeval import label_set
from package.data.dataset import RelationStatementDataset
from package.models import RelationshipEncoderLightningModule
def parse_args(sys_args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--learning-rate",
type=float,
default=0.0007
)
parser.add_argument(
"--gpus",
type=int,
default=os.environ.get("SM_NUM_GPUS", 0)
)
parser.add_argument(
"--model-dir",
type=str,
default=os.environ.get("SM_MODEL_DIR")
)
parser.add_argument(
"--output-dir",
type=str,
default=os.environ.get("SM_OUTPUT_DATA_DIR")
)
parser.add_argument(
"--train-data-dir",
type=str,
default=os.environ.get("SM_CHANNEL_TRAIN"),
)
parser.add_argument(
"--test-data-dir",
type=str,
default=os.environ.get("SM_CHANNEL_TEST")
)
args, _ = parser.parse_known_args(sys_args)
return args
def train_fn(args):
print(args)
# load tokenizer
tokenizer = RelationshipTokenizer.from_pretrained(
pretrained_model_name_or_path='bert-base-uncased',
contains_entity_tokens=False
)
tokenizer.save(file_path=Path(args.model_dir, 'tokenizer.json'), pretty=True)
# load data
train_file_path = Path(args.train_data_dir, 'train.txt')
test_file_path = Path(args.test_data_dir, 'test.txt')
# construct label encoder
labels = list(label_set(train_file_path))
label_encoder = LabelEncoder.from_str_list(sorted(labels))
print('Using the following label encoder mappings:\n\n', label_encoder)
label_encoder.save(file_path=str(Path(args.model_dir, 'label_encoder.json')))
# prepare datasets
model_size = 512
tokenizer.set_truncation(model_size)
tokenizer.set_padding(model_size)
train_dataset = RelationStatementDataset(
file_path=train_file_path,
tokenizer=tokenizer,
label_encoder=label_encoder
)
test_dataset = RelationStatementDataset(
file_path=test_file_path,
tokenizer=tokenizer,
label_encoder=label_encoder
)
batch_size = 16
train_dataloader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
num_workers=4
)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
num_workers=4
)
# create model
relationship_encoder = RelationshipEncoderLightningModule(
tokenizer,
label_encoder,
learning_rate=float(args.learning_rate)
)
checkpoint_callback = ModelCheckpoint(
monitor='valid_loss',
filepath=str(Path(args.model_dir, 'model'))
)
# train model
trainer = Trainer(
default_root_dir=args.output_dir,
accumulate_grad_batches=2,
gradient_clip_val=1.0,
max_epochs=1,
weights_summary='full',
gpus=args.gpus,
checkpoint_callback=checkpoint_callback,
fast_dev_run=True
)
trainer.fit(relationship_encoder, train_dataloader, test_dataloader) | sagemaker_notebook_instance/containers/relationship_extraction/package/training.py | import argparse
import os
from pathlib import Path
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
import torch
from torch.utils.data import DataLoader
from package.data.tokenizers import RelationshipTokenizer
from package.data.label_encoders import LabelEncoder
from package.data.semeval import label_set
from package.data.dataset import RelationStatementDataset
from package.models import RelationshipEncoderLightningModule
def parse_args(sys_args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--learning-rate",
type=float,
default=0.0007
)
parser.add_argument(
"--gpus",
type=int,
default=os.environ.get("SM_NUM_GPUS", 0)
)
parser.add_argument(
"--model-dir",
type=str,
default=os.environ.get("SM_MODEL_DIR")
)
parser.add_argument(
"--output-dir",
type=str,
default=os.environ.get("SM_OUTPUT_DATA_DIR")
)
parser.add_argument(
"--train-data-dir",
type=str,
default=os.environ.get("SM_CHANNEL_TRAIN"),
)
parser.add_argument(
"--test-data-dir",
type=str,
default=os.environ.get("SM_CHANNEL_TEST")
)
args, _ = parser.parse_known_args(sys_args)
return args
def train_fn(args):
print(args)
# load tokenizer
tokenizer = RelationshipTokenizer.from_pretrained(
pretrained_model_name_or_path='bert-base-uncased',
contains_entity_tokens=False
)
tokenizer.save(file_path=Path(args.model_dir, 'tokenizer.json'), pretty=True)
# load data
train_file_path = Path(args.train_data_dir, 'train.txt')
test_file_path = Path(args.test_data_dir, 'test.txt')
# construct label encoder
labels = list(label_set(train_file_path))
label_encoder = LabelEncoder.from_str_list(sorted(labels))
print('Using the following label encoder mappings:\n\n', label_encoder)
label_encoder.save(file_path=str(Path(args.model_dir, 'label_encoder.json')))
# prepare datasets
model_size = 512
tokenizer.set_truncation(model_size)
tokenizer.set_padding(model_size)
train_dataset = RelationStatementDataset(
file_path=train_file_path,
tokenizer=tokenizer,
label_encoder=label_encoder
)
test_dataset = RelationStatementDataset(
file_path=test_file_path,
tokenizer=tokenizer,
label_encoder=label_encoder
)
batch_size = 16
train_dataloader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
num_workers=4
)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
num_workers=4
)
# create model
relationship_encoder = RelationshipEncoderLightningModule(
tokenizer,
label_encoder,
learning_rate=float(args.learning_rate)
)
checkpoint_callback = ModelCheckpoint(
monitor='valid_loss',
filepath=str(Path(args.model_dir, 'model'))
)
# train model
trainer = Trainer(
default_root_dir=args.output_dir,
accumulate_grad_batches=2,
gradient_clip_val=1.0,
max_epochs=1,
weights_summary='full',
gpus=args.gpus,
checkpoint_callback=checkpoint_callback,
fast_dev_run=True
)
trainer.fit(relationship_encoder, train_dataloader, test_dataloader) | 0.719778 | 0.230541 |
from __future__ import absolute_import
from datetime import datetime
import json
from pyDataverse.exceptions import ApiAuthorizationError
from pyDataverse.exceptions import ApiResponseError
from pyDataverse.exceptions import ApiUrlError
from pyDataverse.exceptions import DataverseNotFoundError
from pyDataverse.exceptions import OperationFailedError
from requests import ConnectionError
from requests import delete
from requests import get
from requests import post
import subprocess as sp
"""
Connect and request the Dataverse API Endpoints. Save and use request results.
"""
class Api(object):
"""API class.
Parameters
----------
base_url : string
Base URL of Dataverse instance. Without trailing `/` at the end.
e.g. `http://demo.dataverse.org`
api_token : string
Authenication token for the api.
api_version : string
Dataverse API version. Default: `v1`
Attributes
----------
conn_started : datetime
Description of attribute `conn_started`.
native_base_url : type
Description of attribute `native_base_url`.
base_url
api_token
api_version
"""
def __init__(self, base_url, api_token=None, api_version='v1'):
"""Init an Api() class.
Scheme, host and path combined create the base-url for the API.
See more about url at https://en.wikipedia.org/wiki/URL
"""
# Check and set basic variables.
if not isinstance(base_url, ("".__class__, u"".__class__)):
raise ApiUrlError('base_url {0} is not a string.'.format(base_url))
self.base_url = base_url
if not isinstance(api_version, ("".__class__, u"".__class__)):
raise ApiUrlError('api_version {0} is not a string.'.format(
api_version))
self.api_version = api_version
if api_token:
if not isinstance(api_token, ("".__class__, u"".__class__)):
raise ApiAuthorizationError(
'Api token passed is not a string.')
self.api_token = api_token
self.conn_started = datetime.now()
# Test connection.
query_str = '/info/server'
if base_url and api_version:
self.native_api_base_url = '{0}/api/{1}'.format(self.base_url,
self.api_version)
url = '{0}{1}'.format(self.native_api_base_url, query_str)
try:
resp = get(url)
if resp:
self.status = resp.json()['status']
else:
self.status = 'ERROR'
raise ApiResponseError(
'No response from api request {0}.'.format(url)
)
except KeyError as e:
print('Key not in response {0} {1}.'.format(e, url))
except ConnectionError as e:
self.status = 'ERROR'
print('Could not establish connection to api {0} {1}.'.format(
url, e))
else:
self.status = 'ERROR'
self.native_api_base_url = None
def __str__(self):
"""Return name of Api() class for users.
Returns
-------
string
Naming of the Api() class.
"""
return 'pyDataverse API class'
def make_get_request(self, query_str, params=None, auth=False):
"""Make a GET request.
Parameters
----------
query_str : string
Description of parameter `query_str`.
auth : bool
Should an api token be used for authentication? By default = False.
params : dict
Dictionary of parameters to be passed with the request.
Default: None
Returns
-------
requests.Response
Response object of request library.
"""
url = '{0}{1}'.format(self.native_api_base_url, query_str)
if auth:
if self.api_token:
if not params:
params = {}
params['key'] = self.api_token
else:
raise ApiAuthorizationError(
'GET api token not available {}.'.format(url)
)
try:
resp = get(
url,
params=params
)
if resp:
if resp.status_code == 401:
raise ApiAuthorizationError(
'GET Authorization provided is invalid {}.'.format(url)
)
elif resp.status_code != 200:
raise OperationFailedError(
'GET {} {} not working'.format(resp.status_code, url)
)
return resp
except ConnectionError:
raise ConnectionError(
'GET Could not establish connection to api {}.'.format(url)
)
def make_post_request(self, query_str, data, auth=False, headers=None,
params=None):
"""Make a POST request.
Parameters
----------
query_str : string
Description of parameter `query_str`.
data : ??
Description of parameter `data`.
auth : bool
Should an api token be used for authentication? By default = False.
headers : dict()
Description.
params : dict
Dictionary of parameters to be passed with the request.
Default: None
Returns
-------
requests.Response
Response object of requerst library.
"""
url = '{0}{1}'.format(self.native_api_base_url, query_str)
if auth:
if self.api_token:
if not params:
params = {}
params['key'] = self.api_token
else:
print(
'POST api token not available {}.'.format(url)
)
try:
resp = post(
url,
data=data,
headers=headers,
params=params
)
if resp.status_code != 201:
raise OperationFailedError(
'POST {} {}'.format(resp.status_code, url)
)
return resp
except ConnectionError:
raise ConnectionError(
'POST Could not establish connection to api {}.'.format(url)
)
def make_delete_request(self, query_str, auth=False, params=None):
"""Make a DELETE request.
auth : bool
Should an api token be used for authentication? By default = False.
params : dict
Dictionary of parameters to be passed with the request.
Default: None
"""
url = '{0}{1}'.format(self.native_base_url, query_str)
if auth:
if self.api_token:
if not params:
params = {}
params['key'] = self.api_token
else:
print(
'DELETE api token not available {}.'.format(url)
)
try:
resp = delete(
url,
params={'key': self.api_token}
)
return resp
except ConnectionError:
raise ConnectionError(
'DELETE Could not establish connection to api {}.'.format(url)
)
def get_dataverse(self, identifier):
"""Get dataverse metadata by alias or id.
View data about the dataverse $identified by identifier. Identifier can
be the id number of the dataverse, its alias, or the special
value :root.
GET http://$SERVER/api/dataverses/$id
Parameters
----------
identifier : string
Can either be a dataverse id (long) or a dataverse alias (more
robust).
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/dataverses/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp
def create_dataverse(self, identifier, json, parent=':root'):
"""Create a dataverse.
Generates a new dataverse under $id. Expects a JSON content describing
the dataverse, as in the example below. If $id is omitted, a root
dataverse is created. $id can either be a dataverse id (long) or a
dataverse alias (more robust).
POST http://$SERVER/api/dataverses/$id?key=$apiKey
Download the JSON example file and modified to create dataverses to
suit your needs. The fields name, alias, and dataverseContacts are
required. http://guides.dataverse.org/en/latest/
_downloads/dataverse-complete.json
Parameters
----------
identifier : string
Can either be a dataverse id (long) or a dataverse alias (more
robust).
json : string
JSON-formatted string for upload.
parent : string
Parent dataverse if existing. Default is `:root`.
Returns
-------
requests.Response
Response object of requerst library.
"""
if not parent:
print('No parent dataverse passed.')
query_str = '/dataverses/{0}'.format(parent)
resp = self.make_post_request(query_str, json)
if resp.status_code == 404:
raise DataverseNotFoundError(
'Dataverse {0} was not found.'.format(parent))
elif resp.status_code != 201:
raise OperationFailedError(
'{0} Dataverse could not be created.'.format(identifier)
)
else:
print('{0} Dataverse has been created.'.format(identifier))
return resp
def delete_dataverse(self, identifier):
"""Delete dataverse by alias or id.
Deletes the dataverse whose ID is given:
DELETE http://$SERVER/api/dataverses/$id?key=$apiKey
Parameters
----------
identifier : string
Can either be a dataverse id (long) or a dataverse alias (more
robust).
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/dataverses/{0}'.format(identifier)
resp = self.make_delete_request(query_str)
if resp.status_code == 401:
raise ApiAuthorizationError(
'Delete Dataverse {0} unauthorized.'.format(identifier)
)
elif resp.status_code == 404:
raise DataverseNotFoundError(
'Dataverse {0} was not found.'.format(identifier)
)
elif resp.status_code != 200:
raise OperationFailedError(
'Dataverse {0} could not be deleted.'.format(identifier)
)
elif resp.status_code == 200:
print('{0} Dataverse has been deleted.'.format(identifier))
else:
print('{0} Dataverse could not be deleted.'.format(identifier))
return resp
def get_dataset(self, identifier, is_doi=True):
"""Get metadata of a dataset.
With Dataverse identifier:
GET http://$SERVER/api/datasets/$identifier
With PID:
GET http://$SERVER/api/datasets/:persistentId/?persistentId=$ID
GET http://$SERVER/api/datasets/:persistentId/
?persistentId=doi:10.5072/FK2/J8SJZB
Parameters
----------
identifier : string
Doi of the dataset.
is_doi : bool
Is the identifier a Doi? Defaul: True, cause so far the module only
supports Doi's.
Returns
-------
requests.Response
Response object of requerst library.
"""
if is_doi:
query_str = '/datasets/:persistentId/?persistentId={0}'.format(
identifier)
else:
query_str = '/datasets/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp
def get_dataset_export(self, export_format, identifier):
"""Get metadata of dataset exported in different formats.
CORS Export the metadata of the current published version of a dataset
in various formats:
Formats: 'ddi', 'oai_ddi', 'dcterms', 'oai_dc', 'schema.org',
'dataverse_json'
GET http://$SERVER/api/datasets/
export?exporter=ddi&persistentId=$persistentId
Parameters
----------
export_format : string
Export format as a string.
identifier : string
Doi of the dataset.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/datasets/export?exporter={0}&persistentId={1}'.format(
export_format, identifier)
resp = self.make_get_request(query_str)
return resp
def create_dataset(self, dataverse, json):
"""Add dataset to dataverse.
http://guides.dataverse.org/en/latest/api/native-api.html#create-a-dataset-in-a-dataverse
POST http://$SERVER/api/dataverses/$dataverse/datasets --upload-file
FILENAME
curl -H "X-Dataverse-key: $API_TOKEN" -X POST $SERVER_URL/api/
dataverses/$DV_ALIAS/datasets/:import?pid=$PERSISTENT_IDENTIFIER&
release=yes --upload-file dataset.json
curl -H "X-Dataverse-key: $API_TOKEN" -X POST $SERVER_URL/api/
dataverses/$DV_ALIAS/datasets --upload-file dataset-finch1.json
To create a dataset, you must create a JSON file containing all the
metadata you want such as in this example file: dataset-finch1.json.
Then, you must decide which dataverse to create the dataset in and
target that datavese with either the "alias" of the dataverse (e.g.
"root" or the database id of the dataverse (e.g. "1"). The initial
version state will be set to DRAFT:
http://guides.dataverse.org/en/latest/_downloads/dataset-finch1.json
Parameters
----------
dataverse : string
Alias for dataverse.
json : string
Dataverse metadata as json-formatted string.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/dataverses/{0}/datasets'.format(dataverse)
resp = self.make_post_request(query_str, json)
if resp.status_code == 404:
print('Dataverse {0} was not found.'.format(dataverse))
elif resp.status_code == 201:
print('Dataset has been created.')
else:
print('Dataset could not be created.')
return resp
def delete_dataset(self, identifier):
"""Delete dataset.
Delete the dataset whose id is passed:
DELETE http://$SERVER/api/datasets/$id?key=$apiKey
Parameters
----------
identifier : string
Dataverse id or alias.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/datasets/:persistentId/?persistentId={0}'.format(
identifier)
resp = self.make_delete_request(query_str)
print(resp.status_code)
print(resp.text)
if resp.status_code == 404:
print('Dataset {0} was not found.'.format(identifier))
elif resp.status_code == 200:
print('{0} Dataset has been deleted.'.format(identifier))
elif resp.status_code == 405:
print(
'Published datasets can only be deleted from the GUI. For '
'more information, please refer to '
'https://github.com/IQSS/dataverse/issues/778'
)
else:
print('{0} Dataset could not be deleted.'.format(identifier))
return resp
def get_files(self, doi, version='1'):
"""List metadata of all files of a dataset.
http://guides.dataverse.org/en/latest/api/native-api.html#list-files-in-a-dataset
GET http://$SERVER/api/datasets/$id/versions/$versionId/
files?key=$apiKey
Parameters
----------
doi : string
Doi of dataset.
version : string
Version of dataset.
Returns
-------
requests.Response
Response object of requerst library.
"""
base_str = '/datasets/:persistentId/versions/'
query_str = base_str+'{0}/files?persistentId={1}'.format(version, doi)
resp = self.make_get_request(query_str)
return resp
def get_file(self, identifier):
"""Download a datafile.
File ID
GET /api/access/datafile/$id
DOI
GET http://$SERVER/api/access/datafile/
:persistentId/?persistentId=doi:10.5072/FK2/J8SJZB
Parameters
----------
identifier : string
Doi of datafile.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/access/datafile/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp
def get_file_bundle(self, identifier):
"""Download a datafile in all its formats.
GET /api/access/datafile/bundle/$id
Data Access API calls can now be made using persistent identifiers (in
addition to database ids). This is done by passing the constant
:persistentId where the numeric id of the file is expected, and then
passing the actual persistent id as a query parameter with the name
persistentId.
Parameters
----------
identifier : string
Doi of Datafile.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/access/datafile/bundle/{0}'.format(identifier)
data = self.make_get_request(query_str)
return data
def upload_file(self, identifier, filename):
"""Add file to a dataset.
Add a file to an existing Dataset. Description and tags are optional:
POST http://$SERVER/api/datasets/$id/add?key=$apiKey
The upload endpoint checks the content of the file, compares it with
existing files and tells if already in the database (most likely via
hashing)
Parameters
----------
identifier : string
Doi of dataset.
filename : string
Full filename with path.
Returns
-------
dict
Response of CURL request, converted to dict().
"""
query_str = self.native_api_base_url
query_str += '/datasets/:persistentId/add?persistentId={0}'.format(
identifier)
shell_command = 'curl -H "X-Dataverse-key: {0}"'.format(
self.api_token)
shell_command += ' -X POST {0} -F file=@{2}'.format(
query_str, filename)
# TODO: is shell=True necessary?
result = sp.run(shell_command, shell=True, stdout=sp.PIPE)
resp = json.loads(result.stdout)
return resp
def get_info_version(self):
"""Get the Dataverse version and build number.
The response contains the version and build numbers.
Requires no api_token
GET http://$SERVER/api/info/version
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/info/version'
resp = self.make_get_request(query_str)
return resp
def get_info_server(self):
"""Get Dataverse Server Name.
This is useful when a Dataverse system is
composed of multiple Java EE servers behind a load balancer.
GET http://$SERVER/api/info/server
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/info/server'
resp = self.make_get_request(query_str)
return resp
def get_info_apiTermsOfUse(self):
"""Get API Terms of Use URL.
The response contains the text value inserted as API Terms of use which
uses the database setting :ApiTermsOfUse.
GET http://$SERVER/api/info/apiTermsOfUse
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/info/apiTermsOfUse'
resp = self.make_get_request(query_str)
return resp
def get_metadatablocks(self):
"""Get info about all metadata blocks.
Lists brief info about all metadata blocks registered in the system.
GET http://$SERVER/api/metadatablocks
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/metadatablocks'
resp = self.make_get_request(query_str)
return resp
def get_metadatablock(self, identifier):
"""Get info about single metadata block.
Returns data about the block whose identifier is passed. identifier can
either be the block’s id, or its name.
GET http://$SERVER/api/metadatablocks/$identifier
Parameters
----------
identifier : string
Can be block's id, or it's name.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/metadatablocks/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp | src/pyDataverse/api.py | from __future__ import absolute_import
from datetime import datetime
import json
from pyDataverse.exceptions import ApiAuthorizationError
from pyDataverse.exceptions import ApiResponseError
from pyDataverse.exceptions import ApiUrlError
from pyDataverse.exceptions import DataverseNotFoundError
from pyDataverse.exceptions import OperationFailedError
from requests import ConnectionError
from requests import delete
from requests import get
from requests import post
import subprocess as sp
"""
Connect and request the Dataverse API Endpoints. Save and use request results.
"""
class Api(object):
"""API class.
Parameters
----------
base_url : string
Base URL of Dataverse instance. Without trailing `/` at the end.
e.g. `http://demo.dataverse.org`
api_token : string
Authenication token for the api.
api_version : string
Dataverse API version. Default: `v1`
Attributes
----------
conn_started : datetime
Description of attribute `conn_started`.
native_base_url : type
Description of attribute `native_base_url`.
base_url
api_token
api_version
"""
def __init__(self, base_url, api_token=None, api_version='v1'):
"""Init an Api() class.
Scheme, host and path combined create the base-url for the API.
See more about url at https://en.wikipedia.org/wiki/URL
"""
# Check and set basic variables.
if not isinstance(base_url, ("".__class__, u"".__class__)):
raise ApiUrlError('base_url {0} is not a string.'.format(base_url))
self.base_url = base_url
if not isinstance(api_version, ("".__class__, u"".__class__)):
raise ApiUrlError('api_version {0} is not a string.'.format(
api_version))
self.api_version = api_version
if api_token:
if not isinstance(api_token, ("".__class__, u"".__class__)):
raise ApiAuthorizationError(
'Api token passed is not a string.')
self.api_token = api_token
self.conn_started = datetime.now()
# Test connection.
query_str = '/info/server'
if base_url and api_version:
self.native_api_base_url = '{0}/api/{1}'.format(self.base_url,
self.api_version)
url = '{0}{1}'.format(self.native_api_base_url, query_str)
try:
resp = get(url)
if resp:
self.status = resp.json()['status']
else:
self.status = 'ERROR'
raise ApiResponseError(
'No response from api request {0}.'.format(url)
)
except KeyError as e:
print('Key not in response {0} {1}.'.format(e, url))
except ConnectionError as e:
self.status = 'ERROR'
print('Could not establish connection to api {0} {1}.'.format(
url, e))
else:
self.status = 'ERROR'
self.native_api_base_url = None
def __str__(self):
"""Return name of Api() class for users.
Returns
-------
string
Naming of the Api() class.
"""
return 'pyDataverse API class'
def make_get_request(self, query_str, params=None, auth=False):
"""Make a GET request.
Parameters
----------
query_str : string
Description of parameter `query_str`.
auth : bool
Should an api token be used for authentication? By default = False.
params : dict
Dictionary of parameters to be passed with the request.
Default: None
Returns
-------
requests.Response
Response object of request library.
"""
url = '{0}{1}'.format(self.native_api_base_url, query_str)
if auth:
if self.api_token:
if not params:
params = {}
params['key'] = self.api_token
else:
raise ApiAuthorizationError(
'GET api token not available {}.'.format(url)
)
try:
resp = get(
url,
params=params
)
if resp:
if resp.status_code == 401:
raise ApiAuthorizationError(
'GET Authorization provided is invalid {}.'.format(url)
)
elif resp.status_code != 200:
raise OperationFailedError(
'GET {} {} not working'.format(resp.status_code, url)
)
return resp
except ConnectionError:
raise ConnectionError(
'GET Could not establish connection to api {}.'.format(url)
)
def make_post_request(self, query_str, data, auth=False, headers=None,
params=None):
"""Make a POST request.
Parameters
----------
query_str : string
Description of parameter `query_str`.
data : ??
Description of parameter `data`.
auth : bool
Should an api token be used for authentication? By default = False.
headers : dict()
Description.
params : dict
Dictionary of parameters to be passed with the request.
Default: None
Returns
-------
requests.Response
Response object of requerst library.
"""
url = '{0}{1}'.format(self.native_api_base_url, query_str)
if auth:
if self.api_token:
if not params:
params = {}
params['key'] = self.api_token
else:
print(
'POST api token not available {}.'.format(url)
)
try:
resp = post(
url,
data=data,
headers=headers,
params=params
)
if resp.status_code != 201:
raise OperationFailedError(
'POST {} {}'.format(resp.status_code, url)
)
return resp
except ConnectionError:
raise ConnectionError(
'POST Could not establish connection to api {}.'.format(url)
)
def make_delete_request(self, query_str, auth=False, params=None):
"""Make a DELETE request.
auth : bool
Should an api token be used for authentication? By default = False.
params : dict
Dictionary of parameters to be passed with the request.
Default: None
"""
url = '{0}{1}'.format(self.native_base_url, query_str)
if auth:
if self.api_token:
if not params:
params = {}
params['key'] = self.api_token
else:
print(
'DELETE api token not available {}.'.format(url)
)
try:
resp = delete(
url,
params={'key': self.api_token}
)
return resp
except ConnectionError:
raise ConnectionError(
'DELETE Could not establish connection to api {}.'.format(url)
)
def get_dataverse(self, identifier):
"""Get dataverse metadata by alias or id.
View data about the dataverse $identified by identifier. Identifier can
be the id number of the dataverse, its alias, or the special
value :root.
GET http://$SERVER/api/dataverses/$id
Parameters
----------
identifier : string
Can either be a dataverse id (long) or a dataverse alias (more
robust).
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/dataverses/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp
def create_dataverse(self, identifier, json, parent=':root'):
"""Create a dataverse.
Generates a new dataverse under $id. Expects a JSON content describing
the dataverse, as in the example below. If $id is omitted, a root
dataverse is created. $id can either be a dataverse id (long) or a
dataverse alias (more robust).
POST http://$SERVER/api/dataverses/$id?key=$apiKey
Download the JSON example file and modified to create dataverses to
suit your needs. The fields name, alias, and dataverseContacts are
required. http://guides.dataverse.org/en/latest/
_downloads/dataverse-complete.json
Parameters
----------
identifier : string
Can either be a dataverse id (long) or a dataverse alias (more
robust).
json : string
JSON-formatted string for upload.
parent : string
Parent dataverse if existing. Default is `:root`.
Returns
-------
requests.Response
Response object of requerst library.
"""
if not parent:
print('No parent dataverse passed.')
query_str = '/dataverses/{0}'.format(parent)
resp = self.make_post_request(query_str, json)
if resp.status_code == 404:
raise DataverseNotFoundError(
'Dataverse {0} was not found.'.format(parent))
elif resp.status_code != 201:
raise OperationFailedError(
'{0} Dataverse could not be created.'.format(identifier)
)
else:
print('{0} Dataverse has been created.'.format(identifier))
return resp
def delete_dataverse(self, identifier):
"""Delete dataverse by alias or id.
Deletes the dataverse whose ID is given:
DELETE http://$SERVER/api/dataverses/$id?key=$apiKey
Parameters
----------
identifier : string
Can either be a dataverse id (long) or a dataverse alias (more
robust).
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/dataverses/{0}'.format(identifier)
resp = self.make_delete_request(query_str)
if resp.status_code == 401:
raise ApiAuthorizationError(
'Delete Dataverse {0} unauthorized.'.format(identifier)
)
elif resp.status_code == 404:
raise DataverseNotFoundError(
'Dataverse {0} was not found.'.format(identifier)
)
elif resp.status_code != 200:
raise OperationFailedError(
'Dataverse {0} could not be deleted.'.format(identifier)
)
elif resp.status_code == 200:
print('{0} Dataverse has been deleted.'.format(identifier))
else:
print('{0} Dataverse could not be deleted.'.format(identifier))
return resp
def get_dataset(self, identifier, is_doi=True):
"""Get metadata of a dataset.
With Dataverse identifier:
GET http://$SERVER/api/datasets/$identifier
With PID:
GET http://$SERVER/api/datasets/:persistentId/?persistentId=$ID
GET http://$SERVER/api/datasets/:persistentId/
?persistentId=doi:10.5072/FK2/J8SJZB
Parameters
----------
identifier : string
Doi of the dataset.
is_doi : bool
Is the identifier a Doi? Defaul: True, cause so far the module only
supports Doi's.
Returns
-------
requests.Response
Response object of requerst library.
"""
if is_doi:
query_str = '/datasets/:persistentId/?persistentId={0}'.format(
identifier)
else:
query_str = '/datasets/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp
def get_dataset_export(self, export_format, identifier):
"""Get metadata of dataset exported in different formats.
CORS Export the metadata of the current published version of a dataset
in various formats:
Formats: 'ddi', 'oai_ddi', 'dcterms', 'oai_dc', 'schema.org',
'dataverse_json'
GET http://$SERVER/api/datasets/
export?exporter=ddi&persistentId=$persistentId
Parameters
----------
export_format : string
Export format as a string.
identifier : string
Doi of the dataset.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/datasets/export?exporter={0}&persistentId={1}'.format(
export_format, identifier)
resp = self.make_get_request(query_str)
return resp
def create_dataset(self, dataverse, json):
"""Add dataset to dataverse.
http://guides.dataverse.org/en/latest/api/native-api.html#create-a-dataset-in-a-dataverse
POST http://$SERVER/api/dataverses/$dataverse/datasets --upload-file
FILENAME
curl -H "X-Dataverse-key: $API_TOKEN" -X POST $SERVER_URL/api/
dataverses/$DV_ALIAS/datasets/:import?pid=$PERSISTENT_IDENTIFIER&
release=yes --upload-file dataset.json
curl -H "X-Dataverse-key: $API_TOKEN" -X POST $SERVER_URL/api/
dataverses/$DV_ALIAS/datasets --upload-file dataset-finch1.json
To create a dataset, you must create a JSON file containing all the
metadata you want such as in this example file: dataset-finch1.json.
Then, you must decide which dataverse to create the dataset in and
target that datavese with either the "alias" of the dataverse (e.g.
"root" or the database id of the dataverse (e.g. "1"). The initial
version state will be set to DRAFT:
http://guides.dataverse.org/en/latest/_downloads/dataset-finch1.json
Parameters
----------
dataverse : string
Alias for dataverse.
json : string
Dataverse metadata as json-formatted string.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/dataverses/{0}/datasets'.format(dataverse)
resp = self.make_post_request(query_str, json)
if resp.status_code == 404:
print('Dataverse {0} was not found.'.format(dataverse))
elif resp.status_code == 201:
print('Dataset has been created.')
else:
print('Dataset could not be created.')
return resp
def delete_dataset(self, identifier):
"""Delete dataset.
Delete the dataset whose id is passed:
DELETE http://$SERVER/api/datasets/$id?key=$apiKey
Parameters
----------
identifier : string
Dataverse id or alias.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/datasets/:persistentId/?persistentId={0}'.format(
identifier)
resp = self.make_delete_request(query_str)
print(resp.status_code)
print(resp.text)
if resp.status_code == 404:
print('Dataset {0} was not found.'.format(identifier))
elif resp.status_code == 200:
print('{0} Dataset has been deleted.'.format(identifier))
elif resp.status_code == 405:
print(
'Published datasets can only be deleted from the GUI. For '
'more information, please refer to '
'https://github.com/IQSS/dataverse/issues/778'
)
else:
print('{0} Dataset could not be deleted.'.format(identifier))
return resp
def get_files(self, doi, version='1'):
"""List metadata of all files of a dataset.
http://guides.dataverse.org/en/latest/api/native-api.html#list-files-in-a-dataset
GET http://$SERVER/api/datasets/$id/versions/$versionId/
files?key=$apiKey
Parameters
----------
doi : string
Doi of dataset.
version : string
Version of dataset.
Returns
-------
requests.Response
Response object of requerst library.
"""
base_str = '/datasets/:persistentId/versions/'
query_str = base_str+'{0}/files?persistentId={1}'.format(version, doi)
resp = self.make_get_request(query_str)
return resp
def get_file(self, identifier):
"""Download a datafile.
File ID
GET /api/access/datafile/$id
DOI
GET http://$SERVER/api/access/datafile/
:persistentId/?persistentId=doi:10.5072/FK2/J8SJZB
Parameters
----------
identifier : string
Doi of datafile.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/access/datafile/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp
def get_file_bundle(self, identifier):
"""Download a datafile in all its formats.
GET /api/access/datafile/bundle/$id
Data Access API calls can now be made using persistent identifiers (in
addition to database ids). This is done by passing the constant
:persistentId where the numeric id of the file is expected, and then
passing the actual persistent id as a query parameter with the name
persistentId.
Parameters
----------
identifier : string
Doi of Datafile.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/access/datafile/bundle/{0}'.format(identifier)
data = self.make_get_request(query_str)
return data
def upload_file(self, identifier, filename):
"""Add file to a dataset.
Add a file to an existing Dataset. Description and tags are optional:
POST http://$SERVER/api/datasets/$id/add?key=$apiKey
The upload endpoint checks the content of the file, compares it with
existing files and tells if already in the database (most likely via
hashing)
Parameters
----------
identifier : string
Doi of dataset.
filename : string
Full filename with path.
Returns
-------
dict
Response of CURL request, converted to dict().
"""
query_str = self.native_api_base_url
query_str += '/datasets/:persistentId/add?persistentId={0}'.format(
identifier)
shell_command = 'curl -H "X-Dataverse-key: {0}"'.format(
self.api_token)
shell_command += ' -X POST {0} -F file=@{2}'.format(
query_str, filename)
# TODO: is shell=True necessary?
result = sp.run(shell_command, shell=True, stdout=sp.PIPE)
resp = json.loads(result.stdout)
return resp
def get_info_version(self):
"""Get the Dataverse version and build number.
The response contains the version and build numbers.
Requires no api_token
GET http://$SERVER/api/info/version
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/info/version'
resp = self.make_get_request(query_str)
return resp
def get_info_server(self):
"""Get Dataverse Server Name.
This is useful when a Dataverse system is
composed of multiple Java EE servers behind a load balancer.
GET http://$SERVER/api/info/server
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/info/server'
resp = self.make_get_request(query_str)
return resp
def get_info_apiTermsOfUse(self):
"""Get API Terms of Use URL.
The response contains the text value inserted as API Terms of use which
uses the database setting :ApiTermsOfUse.
GET http://$SERVER/api/info/apiTermsOfUse
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/info/apiTermsOfUse'
resp = self.make_get_request(query_str)
return resp
def get_metadatablocks(self):
"""Get info about all metadata blocks.
Lists brief info about all metadata blocks registered in the system.
GET http://$SERVER/api/metadatablocks
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/metadatablocks'
resp = self.make_get_request(query_str)
return resp
def get_metadatablock(self, identifier):
"""Get info about single metadata block.
Returns data about the block whose identifier is passed. identifier can
either be the block’s id, or its name.
GET http://$SERVER/api/metadatablocks/$identifier
Parameters
----------
identifier : string
Can be block's id, or it's name.
Returns
-------
requests.Response
Response object of requerst library.
"""
query_str = '/metadatablocks/{0}'.format(identifier)
resp = self.make_get_request(query_str)
return resp | 0.785638 | 0.09739 |
from unittest import mock
import pytest
from submission import helpers
def test_send_email_with_html(mailoutbox, settings):
helpers.send_email(
subject='this thing',
reply_to=['<EMAIL>'],
recipients=['<EMAIL>'],
text_body='Hello',
html_body='<a>Hello</a>',
)
message = mailoutbox[0]
assert message.subject == 'this thing'
assert message.from_email == settings.DEFAULT_FROM_EMAIL
assert message.reply_to == ['<EMAIL>']
assert message.to == ['<EMAIL>']
assert message.body == 'Hello'
def test_send_email_without_html(mailoutbox, settings):
helpers.send_email(
subject='this thing',
reply_to=['<EMAIL>'],
recipients=['<EMAIL>'],
text_body='Hello',
)
message = mailoutbox[0]
assert message.subject == 'this thing'
assert message.from_email == settings.DEFAULT_FROM_EMAIL
assert message.reply_to == ['<EMAIL>']
assert list(message.to) == ['<EMAIL>']
assert message.body == 'Hello'
@mock.patch('submission.helpers.ZendeskUser')
def test_zendesk_client_create_user(mock_user):
client = helpers.ZendeskClient(
email='<EMAIL>',
token='token<PASSWORD>',
subdomain='subdomain123',
custom_field_id=123,
)
with mock.patch.object(client.client.users, 'create_or_update') as stub:
client.get_or_create_user(
full_name='<NAME>', email_address='<EMAIL>'
)
assert stub.call_count == 1
assert stub.call_args == mock.call(
mock_user(name='<NAME>', email='<EMAIL>')
)
@pytest.mark.parametrize(
'parameters',
[
[
'subject123',
123,
{'field': 'value'},
'some-service-name',
None,
[{'id': 123, 'value': 'some-service-name'}],
'Field: value',
],
[
'subject123',
123,
{},
'some-service-name',
None,
[{'id': 123, 'value': 'some-service-name'}],
'',
],
[
'subject123',
123,
{
'field': 'value',
'_custom_fields': [
{'id': '11', 'value': 'v1'},
{'id': '22', 'value': 'v2'},
],
},
'some-service-name',
None,
[
{'id': 123, 'value': 'some-service-name'},
{'id': '11', 'value': 'v1'},
{'id': '22', 'value': 'v2'},
],
'Field: value',
],
[
'subject123',
123,
{'field': 'value', '_custom_fields': []},
'some-service-name',
None,
[{'id': 123, 'value': 'some-service-name'}],
'Field: value',
],
[
'subject123',
123,
{'field': 'value', '_tags': ['t1', 't2']},
'some-service-name',
['t1', 't2'],
[{'id': 123, 'value': 'some-service-name'}],
'Field: value',
],
[
'subject123',
'123',
{'field': 'value', '_tags': []},
'some-service-name',
None,
[{'id': '123', 'value': 'some-service-name'}],
'Field: value',
],
],
)
@mock.patch('submission.helpers.Ticket')
def test_zendesk_client_create_ticket(mock_ticket, parameters, settings):
(
subject,
custom_field_id,
payload,
service_name,
tags,
custom_fields,
description,
) = parameters
client = helpers.ZendeskClient(
email='<EMAIL>',
token='token<PASSWORD>',
subdomain='subdomain123',
custom_field_id=custom_field_id,
)
user = mock.Mock()
client.client = mock.Mock()
client.create_ticket(
subject=subject, payload=payload, zendesk_user=user, service_name=service_name
)
assert mock_ticket.call_count == 1
assert mock_ticket.call_args == mock.call(
subject=subject,
description=description,
submitter_id=user.id,
requester_id=user.id,
tags=tags,
custom_fields=custom_fields,
)
assert client.client.tickets.create.call_args == mock.call(mock_ticket())
@mock.patch('submission.helpers.ZendeskClient')
def test_create_zendesk_ticket(mock_zendesk_client, settings):
zendesk_email = '<EMAIL>'
zendesk_token = '<PASSWORD>'
settings.ZENDESK_CREDENTIALS = {
settings.ZENDESK_SUBDOMAIN_DEFAULT: {
'token': zendesk_token,
'email': zendesk_email,
'custom_field_id': '1234',
}
}
helpers.create_zendesk_ticket(
subject='subject123',
full_name='<NAME>',
email_address='<EMAIL>',
payload={'field': 'value'},
service_name='some-service',
subdomain=settings.ZENDESK_SUBDOMAIN_DEFAULT,
)
assert mock_zendesk_client.call_count == 1
assert mock_zendesk_client.call_args == mock.call(
email=zendesk_email,
token=zendesk_token,
subdomain=settings.ZENDESK_SUBDOMAIN_DEFAULT,
custom_field_id='1234',
)
client = mock_zendesk_client()
assert client.get_or_create_user.call_count == 1
assert client.get_or_create_user.call_args == mock.call(
full_name='<NAME>',
email_address='<EMAIL>',
)
assert client.get_or_create_user.call_count == 1
assert client.create_ticket.call_args == mock.call(
subject='subject123',
payload={'field': 'value'},
zendesk_user=client.get_or_create_user(),
service_name='some-service',
)
@mock.patch('submission.helpers.ZendeskClient')
def test_create_zendesk_ticket_subdomain(mock_zendesk_client, settings):
zendesk_email = '<EMAIL>'
zendesk_token = '<PASSWORD>'
settings.ZENDESK_CREDENTIALS = {
'123': {
'token': zendesk_token,
'email': zendesk_email,
'custom_field_id': '1234',
}
}
helpers.create_zendesk_ticket(
subject='subject123',
full_name='<NAME>',
email_address='<EMAIL>',
payload={'field': 'value'},
service_name='some-service',
subdomain='123',
)
assert mock_zendesk_client.call_count == 1
assert mock_zendesk_client.call_args == mock.call(
email=zendesk_email,
token=zendesk_token,
subdomain='123',
custom_field_id='1234',
)
@mock.patch('submission.helpers.ZendeskClient')
def test_create_zendesk_ticket_unsupported_subdomain(mock_zendesk_client, settings):
settings.ZENDESK_CREDENTIALS = {}
with pytest.raises(NotImplementedError):
helpers.create_zendesk_ticket(
subject='subject123',
full_name='<NAME>',
email_address='<EMAIL>',
payload={'field': 'value'},
service_name='some-service',
subdomain='1',
)
@mock.patch('submission.helpers.NotificationsAPIClient')
def test_send_gov_notify_email(mock_notify_client, settings):
settings.GOV_NOTIFY_API_KEY = '123456'
helpers.send_gov_notify_email(
email_address='<EMAIL>',
template_id='123-456-789',
personalisation={'title': 'Mr'},
email_reply_to_id='123',
)
assert mock_notify_client.call_count == 1
assert mock_notify_client.call_args == mock.call('123456')
assert mock_notify_client().send_email_notification.call_count == 1
assert mock_notify_client().send_email_notification.call_args == mock.call(
email_address='<EMAIL>',
template_id='123-456-789',
personalisation={'title': 'Mr'},
email_reply_to_id='123',
)
@mock.patch('submission.helpers.NotificationsAPIClient')
def test_send_gov_notify_letter(mock_notify_client, settings):
settings.GOV_NOTIFY_LETTER_API_KEY = 'letterkey123'
helpers.send_gov_notify_letter(
template_id='123-456-789-2222',
personalisation={
'address_line_1': 'The Occupier',
'address_line_2': '123 High Street',
'postcode': 'SW14 6BF',
'name': '<NAME>',
},
)
assert mock_notify_client.call_count == 1
assert mock_notify_client.call_args == mock.call('letterkey123')
assert mock_notify_client().send_letter_notification.call_count == 1
assert mock_notify_client().send_letter_notification.call_args == (
mock.call(
template_id='123-456-789-2222',
personalisation={
'address_line_1': 'The Occupier',
'address_line_2': '123 High Street',
'postcode': 'SW14 6BF',
'name': '<NAME>',
},
)
)
@mock.patch('requests.post')
def test_send_pardor(mock_post):
helpers.send_pardot(
pardot_url='http://www.example.com/some/submission/path/',
payload={'field': 'value'},
)
assert mock_post.call_count == 1
assert mock_post.call_args == mock.call(
'http://www.example.com/some/submission/path/',
{'field': 'value'},
allow_redirects=False,
)
def test_get_sender_email_address_email_action(email_action_payload):
email = helpers.get_sender_email_address(email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_sender_email_address_zendesk_action(zendesk_action_payload):
email = helpers.get_sender_email_address(zendesk_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_sender_email_address_notify_action(gov_notify_email_action_payload):
del gov_notify_email_action_payload['meta']['sender']
email = helpers.get_sender_email_address(gov_notify_email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_sender_email_address_pardot_action(pardot_action_payload):
email = helpers.get_sender_email_address(pardot_action_payload['meta'])
assert email is None
def test_get_sender_email_address_sender(gov_notify_email_action_payload):
email = helpers.get_sender_email_address(gov_notify_email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_recipient_email_address_notify_action(gov_notify_email_action_payload):
email = helpers.get_recipient_email_address(gov_notify_email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_recipient_email_address_zendesk_action(zendesk_action_payload, settings):
zendesk_action_payload['meta']['subdomain'] = settings.ZENDESK_SUBDOMAIN_DEFAULT
email = helpers.get_recipient_email_address(zendesk_action_payload['meta'])
assert email == f'{settings.ZENDESK_SUBDOMAIN_DEFAULT}:Market Access'
def test_get_recipient_email_address_email_action(email_action_payload):
email = helpers.get_recipient_email_address(email_action_payload['meta'])
assert email == '<EMAIL>,<EMAIL>'
def test_get_recipient_email_address_pardot_action(pardot_action_payload):
email = helpers.get_recipient_email_address(pardot_action_payload['meta'])
assert email is None
def test_get_recipient_email_address_letter_action(gov_notify_letter_action_payload):
email = helpers.get_recipient_email_address(
gov_notify_letter_action_payload['meta']
)
assert email is None | submission/tests/test_helpers.py | from unittest import mock
import pytest
from submission import helpers
def test_send_email_with_html(mailoutbox, settings):
helpers.send_email(
subject='this thing',
reply_to=['<EMAIL>'],
recipients=['<EMAIL>'],
text_body='Hello',
html_body='<a>Hello</a>',
)
message = mailoutbox[0]
assert message.subject == 'this thing'
assert message.from_email == settings.DEFAULT_FROM_EMAIL
assert message.reply_to == ['<EMAIL>']
assert message.to == ['<EMAIL>']
assert message.body == 'Hello'
def test_send_email_without_html(mailoutbox, settings):
helpers.send_email(
subject='this thing',
reply_to=['<EMAIL>'],
recipients=['<EMAIL>'],
text_body='Hello',
)
message = mailoutbox[0]
assert message.subject == 'this thing'
assert message.from_email == settings.DEFAULT_FROM_EMAIL
assert message.reply_to == ['<EMAIL>']
assert list(message.to) == ['<EMAIL>']
assert message.body == 'Hello'
@mock.patch('submission.helpers.ZendeskUser')
def test_zendesk_client_create_user(mock_user):
client = helpers.ZendeskClient(
email='<EMAIL>',
token='token<PASSWORD>',
subdomain='subdomain123',
custom_field_id=123,
)
with mock.patch.object(client.client.users, 'create_or_update') as stub:
client.get_or_create_user(
full_name='<NAME>', email_address='<EMAIL>'
)
assert stub.call_count == 1
assert stub.call_args == mock.call(
mock_user(name='<NAME>', email='<EMAIL>')
)
@pytest.mark.parametrize(
'parameters',
[
[
'subject123',
123,
{'field': 'value'},
'some-service-name',
None,
[{'id': 123, 'value': 'some-service-name'}],
'Field: value',
],
[
'subject123',
123,
{},
'some-service-name',
None,
[{'id': 123, 'value': 'some-service-name'}],
'',
],
[
'subject123',
123,
{
'field': 'value',
'_custom_fields': [
{'id': '11', 'value': 'v1'},
{'id': '22', 'value': 'v2'},
],
},
'some-service-name',
None,
[
{'id': 123, 'value': 'some-service-name'},
{'id': '11', 'value': 'v1'},
{'id': '22', 'value': 'v2'},
],
'Field: value',
],
[
'subject123',
123,
{'field': 'value', '_custom_fields': []},
'some-service-name',
None,
[{'id': 123, 'value': 'some-service-name'}],
'Field: value',
],
[
'subject123',
123,
{'field': 'value', '_tags': ['t1', 't2']},
'some-service-name',
['t1', 't2'],
[{'id': 123, 'value': 'some-service-name'}],
'Field: value',
],
[
'subject123',
'123',
{'field': 'value', '_tags': []},
'some-service-name',
None,
[{'id': '123', 'value': 'some-service-name'}],
'Field: value',
],
],
)
@mock.patch('submission.helpers.Ticket')
def test_zendesk_client_create_ticket(mock_ticket, parameters, settings):
(
subject,
custom_field_id,
payload,
service_name,
tags,
custom_fields,
description,
) = parameters
client = helpers.ZendeskClient(
email='<EMAIL>',
token='token<PASSWORD>',
subdomain='subdomain123',
custom_field_id=custom_field_id,
)
user = mock.Mock()
client.client = mock.Mock()
client.create_ticket(
subject=subject, payload=payload, zendesk_user=user, service_name=service_name
)
assert mock_ticket.call_count == 1
assert mock_ticket.call_args == mock.call(
subject=subject,
description=description,
submitter_id=user.id,
requester_id=user.id,
tags=tags,
custom_fields=custom_fields,
)
assert client.client.tickets.create.call_args == mock.call(mock_ticket())
@mock.patch('submission.helpers.ZendeskClient')
def test_create_zendesk_ticket(mock_zendesk_client, settings):
zendesk_email = '<EMAIL>'
zendesk_token = '<PASSWORD>'
settings.ZENDESK_CREDENTIALS = {
settings.ZENDESK_SUBDOMAIN_DEFAULT: {
'token': zendesk_token,
'email': zendesk_email,
'custom_field_id': '1234',
}
}
helpers.create_zendesk_ticket(
subject='subject123',
full_name='<NAME>',
email_address='<EMAIL>',
payload={'field': 'value'},
service_name='some-service',
subdomain=settings.ZENDESK_SUBDOMAIN_DEFAULT,
)
assert mock_zendesk_client.call_count == 1
assert mock_zendesk_client.call_args == mock.call(
email=zendesk_email,
token=zendesk_token,
subdomain=settings.ZENDESK_SUBDOMAIN_DEFAULT,
custom_field_id='1234',
)
client = mock_zendesk_client()
assert client.get_or_create_user.call_count == 1
assert client.get_or_create_user.call_args == mock.call(
full_name='<NAME>',
email_address='<EMAIL>',
)
assert client.get_or_create_user.call_count == 1
assert client.create_ticket.call_args == mock.call(
subject='subject123',
payload={'field': 'value'},
zendesk_user=client.get_or_create_user(),
service_name='some-service',
)
@mock.patch('submission.helpers.ZendeskClient')
def test_create_zendesk_ticket_subdomain(mock_zendesk_client, settings):
zendesk_email = '<EMAIL>'
zendesk_token = '<PASSWORD>'
settings.ZENDESK_CREDENTIALS = {
'123': {
'token': zendesk_token,
'email': zendesk_email,
'custom_field_id': '1234',
}
}
helpers.create_zendesk_ticket(
subject='subject123',
full_name='<NAME>',
email_address='<EMAIL>',
payload={'field': 'value'},
service_name='some-service',
subdomain='123',
)
assert mock_zendesk_client.call_count == 1
assert mock_zendesk_client.call_args == mock.call(
email=zendesk_email,
token=zendesk_token,
subdomain='123',
custom_field_id='1234',
)
@mock.patch('submission.helpers.ZendeskClient')
def test_create_zendesk_ticket_unsupported_subdomain(mock_zendesk_client, settings):
settings.ZENDESK_CREDENTIALS = {}
with pytest.raises(NotImplementedError):
helpers.create_zendesk_ticket(
subject='subject123',
full_name='<NAME>',
email_address='<EMAIL>',
payload={'field': 'value'},
service_name='some-service',
subdomain='1',
)
@mock.patch('submission.helpers.NotificationsAPIClient')
def test_send_gov_notify_email(mock_notify_client, settings):
settings.GOV_NOTIFY_API_KEY = '123456'
helpers.send_gov_notify_email(
email_address='<EMAIL>',
template_id='123-456-789',
personalisation={'title': 'Mr'},
email_reply_to_id='123',
)
assert mock_notify_client.call_count == 1
assert mock_notify_client.call_args == mock.call('123456')
assert mock_notify_client().send_email_notification.call_count == 1
assert mock_notify_client().send_email_notification.call_args == mock.call(
email_address='<EMAIL>',
template_id='123-456-789',
personalisation={'title': 'Mr'},
email_reply_to_id='123',
)
@mock.patch('submission.helpers.NotificationsAPIClient')
def test_send_gov_notify_letter(mock_notify_client, settings):
settings.GOV_NOTIFY_LETTER_API_KEY = 'letterkey123'
helpers.send_gov_notify_letter(
template_id='123-456-789-2222',
personalisation={
'address_line_1': 'The Occupier',
'address_line_2': '123 High Street',
'postcode': 'SW14 6BF',
'name': '<NAME>',
},
)
assert mock_notify_client.call_count == 1
assert mock_notify_client.call_args == mock.call('letterkey123')
assert mock_notify_client().send_letter_notification.call_count == 1
assert mock_notify_client().send_letter_notification.call_args == (
mock.call(
template_id='123-456-789-2222',
personalisation={
'address_line_1': 'The Occupier',
'address_line_2': '123 High Street',
'postcode': 'SW14 6BF',
'name': '<NAME>',
},
)
)
@mock.patch('requests.post')
def test_send_pardor(mock_post):
helpers.send_pardot(
pardot_url='http://www.example.com/some/submission/path/',
payload={'field': 'value'},
)
assert mock_post.call_count == 1
assert mock_post.call_args == mock.call(
'http://www.example.com/some/submission/path/',
{'field': 'value'},
allow_redirects=False,
)
def test_get_sender_email_address_email_action(email_action_payload):
email = helpers.get_sender_email_address(email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_sender_email_address_zendesk_action(zendesk_action_payload):
email = helpers.get_sender_email_address(zendesk_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_sender_email_address_notify_action(gov_notify_email_action_payload):
del gov_notify_email_action_payload['meta']['sender']
email = helpers.get_sender_email_address(gov_notify_email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_sender_email_address_pardot_action(pardot_action_payload):
email = helpers.get_sender_email_address(pardot_action_payload['meta'])
assert email is None
def test_get_sender_email_address_sender(gov_notify_email_action_payload):
email = helpers.get_sender_email_address(gov_notify_email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_recipient_email_address_notify_action(gov_notify_email_action_payload):
email = helpers.get_recipient_email_address(gov_notify_email_action_payload['meta'])
assert email == '<EMAIL>'
def test_get_recipient_email_address_zendesk_action(zendesk_action_payload, settings):
zendesk_action_payload['meta']['subdomain'] = settings.ZENDESK_SUBDOMAIN_DEFAULT
email = helpers.get_recipient_email_address(zendesk_action_payload['meta'])
assert email == f'{settings.ZENDESK_SUBDOMAIN_DEFAULT}:Market Access'
def test_get_recipient_email_address_email_action(email_action_payload):
email = helpers.get_recipient_email_address(email_action_payload['meta'])
assert email == '<EMAIL>,<EMAIL>'
def test_get_recipient_email_address_pardot_action(pardot_action_payload):
email = helpers.get_recipient_email_address(pardot_action_payload['meta'])
assert email is None
def test_get_recipient_email_address_letter_action(gov_notify_letter_action_payload):
email = helpers.get_recipient_email_address(
gov_notify_letter_action_payload['meta']
)
assert email is None | 0.560974 | 0.376938 |