text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
Casey likes Willy Wonka and the Chocolate Factory and Paper Clips paperclips. If he wears a fake moustache, he resembles Kurt Vonnegut.
|
{"hexsha": "d48662727fe37ce0d2b85c60e434a42acbad39f2", "size": 136, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/CaseyMcGrath.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/CaseyMcGrath.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/CaseyMcGrath.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 68.0, "max_line_length": 135, "alphanum_fraction": 0.8161764706, "num_tokens": 35}
|
import unittest
import numpy as np
from stock_data_analysis_module.indicators import stochastic_oscillator
class StochasticOscillatorTestCase(unittest.TestCase):
def __init__(self, *args):
super().__init__(*args)
self._empty_sequence = []
self._empty_ndarray = np.zeros((0,))
def test_invalid_length(self):
with self.assertRaises(ValueError):
stochastic_oscillator.stochastic_oscillator(self._empty_sequence,
self._empty_sequence,
self._empty_sequence,
period=10)
with self.assertRaises(ValueError):
stochastic_oscillator.stochastic_oscillator(self._empty_ndarray,
self._empty_ndarray,
self._empty_ndarray,
period=10)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "d17b70cbb507f68518c8f02038fe11a806828fb0", "size": 1110, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/test/stock_data_analysis_module/indicators/test_stochastic_oscillator.py", "max_stars_repo_name": "Freitacr/ML-StockAnalysisProject", "max_stars_repo_head_hexsha": "37411c1204ecf69040ba2a1658013e4bf71eef9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test/stock_data_analysis_module/indicators/test_stochastic_oscillator.py", "max_issues_repo_name": "Freitacr/ML-StockAnalysisProject", "max_issues_repo_head_hexsha": "37411c1204ecf69040ba2a1658013e4bf71eef9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-01-05T16:42:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-18T00:20:18.000Z", "max_forks_repo_path": "src/test/stock_data_analysis_module/indicators/test_stochastic_oscillator.py", "max_forks_repo_name": "Freitacr/ML-StockAnalysisProject", "max_forks_repo_head_hexsha": "37411c1204ecf69040ba2a1658013e4bf71eef9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-21T04:49:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-21T04:49:51.000Z", "avg_line_length": 37.0, "max_line_length": 78, "alphanum_fraction": 0.4981981982, "include": true, "reason": "import numpy", "num_tokens": 166}
|
import os
import pickle
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from statsmodels.tsa.seasonal import STL
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "stl_test_results.csv")
results = pd.read_csv(file_path)
results.columns = [c.strip() for c in results.columns]
results.scenario = results.scenario.apply(str.strip)
results = results.set_index(["scenario", "idx"])
@pytest.fixture(scope="module", params=[True, False])
def robust(request):
return request.param
def default_kwargs_base():
file_path = os.path.join(cur_dir, "results", "stl_co2.csv")
co2 = np.asarray(pd.read_csv(file_path, header=None).iloc[:, 0])
y = co2
nobs = y.shape[0]
nperiod = 12
work = np.zeros((nobs + 2 * nperiod, 7))
rw = np.ones(nobs)
trend = np.zeros(nobs)
season = np.zeros(nobs)
return dict(
y=y,
n=y.shape[0],
np=nperiod,
ns=35,
nt=19,
nl=13,
no=2,
ni=1,
nsjump=4,
ntjump=2,
nljump=2,
isdeg=1,
itdeg=1,
ildeg=1,
rw=rw,
trend=trend,
season=season,
work=work,
)
@pytest.fixture(scope="function")
def default_kwargs():
return default_kwargs_base()
@pytest.fixture(scope="function")
def default_kwargs_short():
kwargs = default_kwargs_base()
y = kwargs["y"][:-1]
nobs = y.shape[0]
work = np.zeros((nobs + 2 * kwargs["np"], 7))
rw = np.ones(nobs)
trend = np.zeros(nobs)
season = np.zeros(nobs)
kwargs.update(
dict(y=y, n=nobs, rw=rw, trend=trend, season=season, work=work)
)
return kwargs
def _to_class_kwargs(kwargs, robust=False):
endog = kwargs["y"]
np = kwargs["np"]
ns = kwargs["ns"]
nt = kwargs["nt"]
nl = kwargs["nl"]
isdeg = kwargs["isdeg"]
itdeg = kwargs["itdeg"]
ildeg = kwargs["ildeg"]
nsjump = kwargs["nsjump"]
ntjump = kwargs["ntjump"]
nljump = kwargs["nljump"]
outer_iter = kwargs["no"]
inner_iter = kwargs["ni"]
class_kwargs = dict(
endog=endog,
period=np,
seasonal=ns,
trend=nt,
low_pass=nl,
seasonal_deg=isdeg,
trend_deg=itdeg,
low_pass_deg=ildeg,
robust=robust,
seasonal_jump=nsjump,
trend_jump=ntjump,
low_pass_jump=nljump,
)
return class_kwargs, outer_iter, inner_iter
def test_baseline_class(default_kwargs):
class_kwargs, outer, inner = _to_class_kwargs(default_kwargs)
mod = STL(**class_kwargs)
res = mod.fit(outer_iter=outer, inner_iter=inner)
expected = results.loc["baseline"].sort_index()
assert_allclose(res.trend, expected.trend)
assert_allclose(res.seasonal, expected.season)
assert_allclose(res.weights, expected.rw)
resid = class_kwargs["endog"] - expected.trend - expected.season
assert_allclose(res.resid, resid)
def test_short_class(default_kwargs_short):
class_kwargs, outer, inner = _to_class_kwargs(default_kwargs_short)
mod = STL(**class_kwargs)
res = mod.fit(outer_iter=outer, inner_iter=inner)
expected = results.loc["short"].sort_index()
assert_allclose(res.seasonal, expected.season)
assert_allclose(res.trend, expected.trend)
assert_allclose(res.weights, expected.rw)
def test_nljump_1_class(default_kwargs):
default_kwargs["nljump"] = 1
class_kwargs, outer, inner = _to_class_kwargs(default_kwargs)
mod = STL(**class_kwargs)
res = mod.fit(outer_iter=outer, inner_iter=inner)
expected = results.loc["nljump-1"].sort_index()
assert_allclose(res.seasonal, expected.season)
assert_allclose(res.trend, expected.trend)
assert_allclose(res.weights, expected.rw)
def test_ntjump_1_class(default_kwargs):
default_kwargs["ntjump"] = 1
class_kwargs, outer, inner = _to_class_kwargs(default_kwargs)
mod = STL(**class_kwargs)
res = mod.fit(outer_iter=outer, inner_iter=inner)
expected = results.loc["ntjump-1"].sort_index()
assert_allclose(res.seasonal, expected.season)
assert_allclose(res.trend, expected.trend)
assert_allclose(res.weights, expected.rw)
def test_nljump_1_ntjump_1_class(default_kwargs):
default_kwargs["nljump"] = 1
default_kwargs["ntjump"] = 1
class_kwargs, outer, inner = _to_class_kwargs(default_kwargs)
mod = STL(**class_kwargs)
res = mod.fit(outer_iter=outer, inner_iter=inner)
expected = results.loc["nljump-1-ntjump-1"].sort_index()
assert_allclose(res.seasonal, expected.season)
assert_allclose(res.trend, expected.trend)
assert_allclose(res.weights, expected.rw)
def test_parameter_checks_period(default_kwargs):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
endog = class_kwargs["endog"]
endog2 = np.hstack((endog[:, None], endog[:, None]))
period = class_kwargs["period"]
with pytest.raises(ValueError, match="y must be a 1d array"):
STL(endog=endog2, period=period)
match = "period must be a positive integer >= 2"
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=1)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=-12)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=4.0)
def test_parameter_checks_seasonal(default_kwargs):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
endog = class_kwargs["endog"]
period = class_kwargs["period"]
match = "seasonal must be an odd positive integer >= 3"
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, seasonal=2)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, seasonal=-7)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, seasonal=13.0)
def test_parameter_checks_trend(default_kwargs):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
endog = class_kwargs["endog"]
period = class_kwargs["period"]
match = "trend must be an odd positive integer >= 3 where trend > period"
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, trend=14)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, trend=11)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, trend=-19)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, trend=19.0)
def test_parameter_checks_low_pass(default_kwargs):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
endog = class_kwargs["endog"]
period = class_kwargs["period"]
match = (
"low_pass must be an odd positive integer >= 3 where"
" low_pass > period"
)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, low_pass=14)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, low_pass=7)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, low_pass=-19)
with pytest.raises(ValueError, match=match):
STL(endog=endog, period=period, low_pass=19.0)
def test_jump_errors(default_kwargs):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
endog = class_kwargs["endog"]
period = class_kwargs["period"]
with pytest.raises(ValueError, match="low_pass_jump must be a positve"):
STL(endog=endog, period=period, low_pass_jump=0)
with pytest.raises(ValueError, match="low_pass_jump must be a positve"):
STL(endog=endog, period=period, low_pass_jump=1.0)
with pytest.raises(ValueError, match="seasonal_jump must be a positve"):
STL(endog=endog, period=period, seasonal_jump=0)
with pytest.raises(ValueError, match="seasonal_jump must be a positve"):
STL(endog=endog, period=period, seasonal_jump=1.0)
with pytest.raises(ValueError, match="trend_jump must be a positve"):
STL(endog=endog, period=period, trend_jump=0)
with pytest.raises(ValueError, match="trend_jump must be a positve"):
STL(endog=endog, period=period, trend_jump=1.0)
def test_defaults_smoke(default_kwargs, robust):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs, robust)
endog = class_kwargs["endog"]
period = class_kwargs["period"]
mod = STL(endog=endog, period=period)
mod.fit()
def test_pandas(default_kwargs, robust):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs, robust)
endog = pd.Series(class_kwargs["endog"], name="y")
period = class_kwargs["period"]
mod = STL(endog=endog, period=period)
res = mod.fit()
assert isinstance(res.trend, pd.Series)
assert isinstance(res.seasonal, pd.Series)
assert isinstance(res.resid, pd.Series)
assert isinstance(res.weights, pd.Series)
def test_period_detection(default_kwargs):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
mod = STL(**class_kwargs)
res = mod.fit()
del class_kwargs["period"]
endog = class_kwargs["endog"]
index = pd.date_range("1-1-1959", periods=348, freq="M")
class_kwargs["endog"] = pd.Series(endog, index=index)
mod = STL(**class_kwargs)
res_implicit_period = mod.fit()
assert_allclose(res.seasonal, res_implicit_period.seasonal)
def test_no_period(default_kwargs):
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
del class_kwargs["period"]
class_kwargs["endog"] = pd.Series(class_kwargs["endog"])
with pytest.raises(ValueError, match="Unable to determine period from"):
STL(**class_kwargs)
@pytest.mark.matplotlib
def test_plot(default_kwargs):
class_kwargs, outer, inner = _to_class_kwargs(default_kwargs)
res = STL(**class_kwargs).fit(outer_iter=outer, inner_iter=inner)
res.plot()
class_kwargs["endog"] = pd.Series(class_kwargs["endog"], name="CO2")
res = STL(**class_kwargs).fit()
res.plot()
def test_default_trend(default_kwargs):
# GH 6686
class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
class_kwargs["seasonal"] = 17
class_kwargs["trend"] = None
mod = STL(**class_kwargs)
period = class_kwargs["period"]
seasonal = class_kwargs["seasonal"]
expected = int(np.ceil(1.5 * period / (1 - 1.5 / seasonal)))
expected += 1 if expected % 2 == 0 else 0
assert mod.config["trend"] == expected
class_kwargs["seasonal"] = 7
mod = STL(**class_kwargs)
period = class_kwargs["period"]
seasonal = class_kwargs["seasonal"]
expected = int(np.ceil(1.5 * period / (1 - 1.5 / seasonal)))
expected += 1 if expected % 2 == 0 else 0
assert mod.config["trend"] == expected
def test_pickle(default_kwargs):
class_kwargs, outer, inner = _to_class_kwargs(default_kwargs)
mod = STL(**class_kwargs)
res = mod.fit()
pkl = pickle.dumps(mod)
reloaded = pickle.loads(pkl)
res2 = reloaded.fit()
assert_allclose(res.trend, res2.trend)
assert_allclose(res.seasonal, res2.seasonal)
assert mod.config == reloaded.config
|
{"hexsha": "7321e0e781a6de50798fb5053cb80a64863523bc", "size": 11117, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/Lib/site-packages/statsmodels/tsa/tests/test_stl.py", "max_stars_repo_name": "EkremBayar/bayar", "max_stars_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-07-07T17:49:20.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-07T17:49:20.000Z", "max_issues_repo_path": "venv/Lib/site-packages/statsmodels/tsa/tests/test_stl.py", "max_issues_repo_name": "EkremBayar/bayar", "max_issues_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "venv/Lib/site-packages/statsmodels/tsa/tests/test_stl.py", "max_forks_repo_name": "EkremBayar/bayar", "max_forks_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-09-23T02:33:54.000Z", "max_forks_repo_forks_event_max_datetime": "2016-09-23T02:33:54.000Z", "avg_line_length": 33.2844311377, "max_line_length": 77, "alphanum_fraction": 0.683637672, "include": true, "reason": "import numpy,from numpy,from statsmodels", "num_tokens": 2888}
|
from torch.utils import data
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import json
import train_weak_supervision.config as config
from src.utils.data_manipulation import resize, resize_generated, normalize_mean_variance
from src.utils.data_manipulation import generate_affinity, generate_target, generate_target_others
DEBUG = False
class DataLoaderMIX(data.Dataset):
"""
Dataloader to train weak-supervision providing a mix of SynthText and the dataset
"""
def __init__(self, type_, iteration):
self.type_ = type_
self.base_path_synth = config.DataLoaderSYNTH_base_path
self.base_path_other_images = config.Other_Dataset_Path + '/Images/' + type_
self.base_path_other_gt = config.Other_Dataset_Path + '/Generated/' + str(iteration)
if config.prob_synth != 0:
print('Loading Synthetic dataset')
if DEBUG: # Make this True if you want to do a run on small set of Synth-Text
if not os.path.exists('cache.pkl'):
# Create cache of 1000 samples if it does not exist
with open('cache.pkl', 'wb') as f:
import pickle
from scipy.io import loadmat
mat = loadmat(config.DataLoaderSYNTH_mat)
pickle.dump([mat['imnames'][0][0:1000], mat['charBB'][0][0:1000], mat['txt'][0][0:1000]], f)
print('Created the pickle file, rerun the program')
exit(0)
else:
# Read the Cache
with open('cache.pkl', 'rb') as f:
import pickle
self.imnames, self.charBB, self.txt = pickle.load(f)
print('Loaded DEBUG')
else:
from scipy.io import loadmat
mat = loadmat(config.DataLoaderSYNTH_mat) # Loads MATLAB .mat extension as a dictionary of numpy arrays
# Read documentation of how synth-text dataset is stored to understand the processing at
# http://www.robots.ox.ac.uk/~vgg/data/scenetext/readme.txt
total_number = mat['imnames'][0].shape[0]
train_images = int(total_number * 0.9)
if self.type_ == 'train':
self.imnames = mat['imnames'][0][0:train_images]
self.charBB = mat['charBB'][0][0:train_images] # number of images, 2, 4, num_character
self.txt = mat['txt'][0][0:train_images]
else:
self.imnames = mat['imnames'][0][train_images:]
self.charBB = mat['charBB'][0][train_images:] # number of images, 2, 4, num_character
self.txt = mat['txt'][0][train_images:]
for no, i in enumerate(self.txt):
all_words = []
for j in i:
all_words += [k for k in ' '.join(j.split('\n')).split() if k != '']
# Getting all words given paragraph like text in SynthText
self.txt[no] = all_words
self.gt = []
for no, i in enumerate(sorted(os.listdir(self.base_path_other_gt))):
with open(self.base_path_other_gt+'/'+i, 'r') as f:
self.gt.append([i[:-5], json.load(f)])
def __getitem__(self, item_i):
# noinspection PyArgumentList
check = np.random.uniform()
if check < config.prob_synth and self.type_ == 'train':
# probability of picking a Synth-Text image vs Image from dataset
random_item = np.random.randint(len(self.imnames))
character = self.charBB[random_item].copy()
image = plt.imread(self.base_path_synth+'/'+self.imnames[random_item][0]) # Read the image
if len(image.shape) == 2:
image = np.repeat(image[:, :, None], repeats=3, axis=2)
elif image.shape[2] == 1:
image = np.repeat(image, repeats=3, axis=2)
else:
image = image[:, :, 0: 3]
height, width, channel = image.shape
image, character = resize(image, character) # Resize the image to (768, 768)
image = normalize_mean_variance(image).transpose(2, 0, 1)
# Generate character heatmap with weights
weight_character, weak_supervision_char = generate_target(image.shape, character.copy(), weight=1)
# Generate affinity heatmap with weights
weight_affinity, weak_supervision_affinity = generate_affinity(
image.shape, character.copy(),
self.txt[random_item].copy(),
weight=1)
dataset_name = 'SYNTH'
text_target = ''
else:
random_item = np.random.randint(len(self.gt))
image = plt.imread(self.base_path_other_images+'/'+self.gt[random_item][0]) # Read the image
if len(image.shape) == 2:
image = np.repeat(image[:, :, None], repeats=3, axis=2)
elif image.shape[2] == 1:
image = np.repeat(image, repeats=3, axis=2)
else:
image = image[:, :, 0: 3]
height, width, channel = image.shape
character = [
np.array(word_i).reshape([len(word_i), 4, 1, 2]) for word_i in self.gt[random_item][1]['characters'].copy()]
affinity = [
np.array(word_i).reshape([len(word_i), 4, 1, 2]) for word_i in self.gt[random_item][1]['affinity'].copy()]
assert len(character) == len(affinity), 'word length different in character and affinity'
# Resize the image to (768, 768)
image, character, affinity = resize_generated(image, character.copy(), affinity.copy())
image = normalize_mean_variance(image).transpose(2, 0, 1)
weights = np.array(self.gt[random_item][1]['weights'])
text_target = '#@#@#@'.join(self.gt[random_item][1]['text'])
assert len(self.gt[random_item][1]['text']) == len(self.gt[random_item][1]['word_bbox']), \
'Length of word_bbox != Length of text'
# Generate character heatmap with weights
weight_character, weak_supervision_char = generate_target_others(
image.shape, character.copy(), weights[:, 0].tolist())
# Generate affinity heatmap with weights
weight_affinity, weak_supervision_affinity = generate_target_others(
image.shape, affinity.copy(), weights[:, 1].tolist(), type_='aff')
# Get original word_bbox annotations
dataset_name = 'ICDAR'
return \
image.astype(np.float32), \
weight_character.astype(np.float32), \
weight_affinity.astype(np.float32), \
weak_supervision_char.astype(np.float32), \
weak_supervision_affinity.astype(np.float32), \
dataset_name, \
text_target, \
random_item, \
np.array([height, width])
def __len__(self):
if self.type_ == 'train':
return config.iterations
else:
return len(self.gt)
class DataLoaderEvalOther(data.Dataset):
"""
ICDAR 2013 dataloader
"""
def __init__(self, type_):
self.type_ = type_
if self.type_ == 'train':
self.base_path = config.Other_Dataset_Path + '/Images/'
else:
self.base_path = config.Test_Dataset_Path + '/Images/'
with open(self.base_path + self.type_ + '_gt.json', 'r') as f:
self.gt = json.load(f)
self.imnames = sorted(self.gt['annots'].keys())
self.unknown = self.gt['unknown']
def __getitem__(self, item):
"""
Function to read, resize and pre-process the image from the icdar 2013 dataset
:param item:
:return:
"""
image = plt.imread(self.base_path+self.type_+'/'+self.imnames[item])
if len(image.shape) == 2:
image = np.repeat(image[:, :, None], repeats=3, axis=2)
elif image.shape[2] == 1:
image = np.repeat(image, repeats=3, axis=2)
else:
image = image[:, :, 0: 3]
height, width, channel = image.shape
max_side = max(height, width)
new_resize = (int(width / max_side * 768), int(height / max_side * 768))
image = cv2.resize(image, new_resize)
big_image = np.ones([768, 768, 3], dtype=np.float32) * np.mean(image)
big_image[
(768 - image.shape[0]) // 2: (768 - image.shape[0]) // 2 + image.shape[0],
(768 - image.shape[1]) // 2: (768 - image.shape[1]) // 2 + image.shape[1]] = image
big_image = normalize_mean_variance(big_image).transpose(2, 0, 1)
return big_image.astype(np.float32), self.imnames[item], np.array([height, width]), item
def __len__(self):
return len(self.imnames)
|
{"hexsha": "ae96b044a70f2c70a98e38fece94c94316307c10", "size": 7570, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_weak_supervision/dataloader.py", "max_stars_repo_name": "ds-brx/CRAFT-Remade", "max_stars_repo_head_hexsha": "1d5d990ebdca44582cd5271f13c46a27b79e0249", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 180, "max_stars_repo_stars_event_min_datetime": "2019-07-23T15:07:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T00:48:59.000Z", "max_issues_repo_path": "train_weak_supervision/dataloader.py", "max_issues_repo_name": "ds-brx/CRAFT-Remade", "max_issues_repo_head_hexsha": "1d5d990ebdca44582cd5271f13c46a27b79e0249", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2019-07-25T15:00:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:54:41.000Z", "max_forks_repo_path": "train_weak_supervision/dataloader.py", "max_forks_repo_name": "ds-brx/CRAFT-Remade", "max_forks_repo_head_hexsha": "1d5d990ebdca44582cd5271f13c46a27b79e0249", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 52, "max_forks_repo_forks_event_min_datetime": "2019-08-15T15:58:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T14:21:41.000Z", "avg_line_length": 31.2809917355, "max_line_length": 112, "alphanum_fraction": 0.6780713342, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2106}
|
# Adapted from: https://gitlab.tubit.tu-berlin.de/rsim/bigearthnet-19-models/blob/93c3d398341a5ccc2c4f61d64e702901cd9340ab/prep_splits_BigEarthNet-19.py
import argparse
import os
import csv
import json
import pathlib
import gdal
import tensorflow as tf
import numpy as np
# Spectral band names to read related GeoTIFF files
band_names = ['B01', 'B02', 'B03', 'B04', 'B05',
'B06', 'B07', 'B08', 'B8A', 'B09', 'B11', 'B12']
def fix_incomplete_data(band, dimension):
raveled = np.ravel(band)
if len(raveled) == dimension * dimension:
return raveled
elif len(band) != dimension:
return np.pad(raveled, (0, ((dimension * dimension) - len(raveled))))
else:
return np.ravel(np.vstack([np.pad(row, (0, dimension - len(row))) for row in band]))
def prep_example(bands, labels, labels_multi_hot, patch_name):
return tf.train.Example(
features=tf.train.Features(
feature={
'B01': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B01'], 20))),
'B02': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B02'], 120))),
'B03': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B03'], 120))),
'B04': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B04'], 120))),
'B05': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B05'], 60))),
'B06': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B06'], 60))),
'B07': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B07'], 60))),
'B08': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B08'], 120))),
'B8A': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B8A'], 60))),
'B09': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B09'], 20))),
'B11': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B11'], 60))),
'B12': tf.train.Feature(
int64_list=tf.train.Int64List(value=fix_incomplete_data(bands['B12'], 60))),
'BigEarthNet-19_labels': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[i.encode('utf-8') for i in labels])),
'BigEarthNet-19_labels_multi_hot': tf.train.Feature(
int64_list=tf.train.Int64List(value=labels_multi_hot)),
'patch_name': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[patch_name.encode('utf-8')]))
}))
def create_tfrecord(directory):
root_folder = f"{directory}/patches/"
patch_names = os.listdir(root_folder)
TFRecord_writer = tf.python_io.TFRecordWriter(f"{directory}/record.tfrecord")
progress_bar = tf.contrib.keras.utils.Progbar(target = len(patch_names))
for patch_idx, patch_name in enumerate(patch_names):
patch_folder_path = os.path.join(root_folder, patch_name)
bands = {}
for band_name in band_names:
# First finds related GeoTIFF path and reads values as an array
band_path = os.path.join(
patch_folder_path, patch_name + '_' + band_name + '.tif')
band_ds = gdal.Open(band_path, gdal.GA_ReadOnly)
raster_band = band_ds.GetRasterBand(1)
band_data = raster_band.ReadAsArray()
bands[band_name] = np.array(band_data)
BigEarthNet_19_labels = []
BigEarthNet_19_labels_multi_hot = np.zeros(19,dtype=int)
example = prep_example(
bands,
BigEarthNet_19_labels,
BigEarthNet_19_labels_multi_hot,
patch_name
)
TFRecord_writer.write(example.SerializeToString())
progress_bar.update(patch_idx)
TFRecord_writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
'This script creates TFRecord files for the BigEarthNet-19 running')
parser.add_argument('-d', '--dir', dest = 'directory',
help = 'dir path')
args = parser.parse_args()
absolute_dir = pathlib.Path(args.directory).resolve()
create_tfrecord(absolute_dir)
|
{"hexsha": "20b2ef7f354fd92b3dd6a183a6672f5af3c97624", "size": 4881, "ext": "py", "lang": "Python", "max_stars_repo_path": "prepare_data.py", "max_stars_repo_name": "tommygod3/fyp-scripts", "max_stars_repo_head_hexsha": "603e4f5025f297c2242f23b1ed56991606dff31d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "prepare_data.py", "max_issues_repo_name": "tommygod3/fyp-scripts", "max_issues_repo_head_hexsha": "603e4f5025f297c2242f23b1ed56991606dff31d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prepare_data.py", "max_forks_repo_name": "tommygod3/fyp-scripts", "max_forks_repo_head_hexsha": "603e4f5025f297c2242f23b1ed56991606dff31d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4857142857, "max_line_length": 152, "alphanum_fraction": 0.5925015366, "include": true, "reason": "import numpy", "num_tokens": 1133}
|
# k-means
## scikit-learn.cluster.kmeans
# hierarchical clustering analysis "EMSC" (Extended Multiplicative Scattering Correction)
## https://scikit-learn.org/stable/modules/clustering.html#hierarchical-clustering
## https://github.com/RPCausin/EMSC/blob/master/EMSC.py: (Bassan, Konevskikh)
import enum
from typing import Union
import numpy as np
from sklearn.cluster import KMeans
from xicam.plugins.operationplugin import operation, categories, display_name, output_names, visible, input_names, intent
from xicam.core.intents import PlotIntent, ImageIntent
class init(enum.Enum):
k_means = 'k-means++'
random = 'random'
class algorithm(enum.Enum):
auto = 'auto'
full = 'full'
elkan = 'elkan'
@operation
# @categories('Clustering')
@categories(('Spectral Imaging', 'Clustering'))
@display_name('K-means Clustering')
@output_names('data')
@visible('data', False)
@input_names('data', 'Number of clusters', 'Init method', 'Number of init run', 'Max iter', 'Tolerance', 'Verbose', 'Copy', 'algorithm', 'Random State')
@intent(ImageIntent, 'K-means clusters', output_map={'image':'data'})
def kmeans(data:np.ndarray, n_clusters:int=8, init:init='k-means++', n_init:int=10, max_iter:int=300, tol:float=0.0001, verbose:int=0, copy_x:bool=True, algorithm:algorithm='auto', random_state:int=None):
km = KMeans(n_clusters, init=init, n_init=n_init, max_iter=max_iter, tol=tol, verbose=verbose, copy_x=copy_x, algorithm=algorithm, random_state=random_state)
row, col, n_w = data.shape
data = np.asarray(data).reshape(-1, n_w)
km.fit(data)
labels = km.predict(data)
return labels.reshape(row, col)
|
{"hexsha": "6d534a1b0372ba6d2aa5637623ab600c8b9dc442", "size": 1636, "ext": "py", "lang": "Python", "max_stars_repo_path": "xicam/spectral/operations/clustering.py", "max_stars_repo_name": "lchen23/Xi-cam.spectral", "max_stars_repo_head_hexsha": "db3046f269443a58a6a4958f3092bfad8b71dcfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xicam/spectral/operations/clustering.py", "max_issues_repo_name": "lchen23/Xi-cam.spectral", "max_issues_repo_head_hexsha": "db3046f269443a58a6a4958f3092bfad8b71dcfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xicam/spectral/operations/clustering.py", "max_forks_repo_name": "lchen23/Xi-cam.spectral", "max_forks_repo_head_hexsha": "db3046f269443a58a6a4958f3092bfad8b71dcfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2162162162, "max_line_length": 204, "alphanum_fraction": 0.7377750611, "include": true, "reason": "import numpy", "num_tokens": 444}
|
### A Pluto.jl notebook ###
# v0.14.5
using Markdown
using InteractiveUtils
# ╔═╡ 774b5c68-c2e8-11eb-0c25-c124276640d6
begin
using Revise
using Pkg
Pkg.activate(tempname())
Pkg.develop(url="https://github.com/manuelbb-upb/RadialBasisFunctionModels.jl.git")
Pkg.add("Flux")
Pkg.add("MLDatasets")
using Flux, MLDatasets
using Random, Statistics
using RadialBasisFunctionModels
end
# ╔═╡ 8c51b60d-e1b5-439d-ab67-ef7813490684
using Flux.Optimise: update!
# ╔═╡ ec8e09e0-2238-4314-baf4-1209794bb4e4
md"Create some test data: `N_d` is the total number of samples."
# ╔═╡ a974f4de-47b2-4981-9381-717abd8ccda2
begin
F = x -> [
sum(x.^2);
exp(x[1]) + sum(x);
sin( sum(x)/(1+sum(x)) )
]
N_d = 100
features = [ rand(3) for i = 1 : N_d ]
labels = F.(features)
data_indices = eachindex(features)
# training data
training_percentage = .8
num_training = ceil(Int, N_d * training_percentage )
num_test = N_d - num_training
train_indices = randperm(N_d)[1:num_training]
X_train = features[ train_indices ]
Y_train = labels[ train_indices ]
test_indices = setdiff( data_indices, train_indices )
X_test = features[ test_indices ]
Y_test = labels[ test_indices ]
n_rbf_centers = ceil(Int, num_training/5)
center_indices = randperm(num_training)[ 1 : n_rbf_centers ]
nothing
end
# ╔═╡ 4b0db5f4-77cb-4391-a3bb-aae7d3c1c140
md"Build a model by chaining a dense neural net and an RBF Layer."
# ╔═╡ 1f80e0a3-71a3-483d-acc5-93c032655d4e
begin
function get_rbf_model( m̃ )
global X_train, Y_train
X̃ = m̃.( X_train )
rbf_centers = X̃[ center_indices ]
return RBFModel( X̃, Y_train, Multiquadric(), -1; centers = rbf_centers )
end
function get_model1()
n_out = 8
layer1 = Dense(3, 10)
layer2 = Dense(10, n_out)
m̃ = Chain( layer1, layer2 )
r = get_rbf_model( m̃ )
return Chain(m̃, r)
end
end
# ╔═╡ da1f41c7-12dd-4c8f-b267-a74bf99bc73b
begin
batch_size = 10
train_loader = Flux.Data.DataLoader((X_train, Y_train); batchsize = batch_size)
end
# ╔═╡ 44b9ab7e-c52e-4faa-b925-12eaf7c734da
Markdown.parse("""
## Minibatch Training
The training data is split into batches of size ``N_b = $(batch_size)``.
Suppose the labels have dimension ``k``.
The loss is calculated over the batch ``Y``:
```math
L(Y) = \\frac{1}{N_b} \\sum_{y_i ∈ Y} \\frac{1}{k} \\sum_{ℓ=1}^k (y_{i,ℓ} - \\hat{y}_{i,ℓ})^2
```
""")
# ╔═╡ 5cdfe543-d773-41ec-8840-2c79b767fb27
md"Training without affecting the rbf layer at all."
# ╔═╡ 10dc99e7-028b-4378-a549-c32b49f14a65
Flux.trainable(::RBFModel) = ()
# ╔═╡ 2ad4a6ab-57ff-45d6-9087-73bae6e81aff
begin
opt = ADAM()
end
# ╔═╡ c8f1ce7d-a17f-4d13-acb5-e03d6002a273
begin
m = get_model1()
loss_sample = (x, y) -> Flux.Losses.mse(m(x), y);
loss_batch = function( X, Y )
mean( loss_sample.(X,Y) )
end
ps = params(m) # not affecting RBF Layer
test_loss_before = loss_batch( X_test, Y_test )
Flux.@epochs 2 for (X_batch, Y_batch) ∈ train_loader
loss_val, pback = Flux.Zygote.pullback(ps) do
loss_batch(X_batch, Y_batch)
end
@show loss_val
gs = pback( one(loss_val) )
update!(opt, ps, gs )
end
test_loss_after = loss_batch( X_test, Y_test )
Markdown.parse("$(test_loss_before) -> \n$(test_loss_after)")
end
# ╔═╡ 80b2d7ed-1cb6-46b1-a3da-17e19b434569
begin
m̃ = get_model1()[1]
loss_whole_chain = function(X,Y)
local m
rbf = get_rbf_model( m̃ )
m = Chain( m̃, rbf )
loss_sample = (x, y) -> Flux.Losses.mse(m(x), y);
return mean( loss_sample.(X,Y) )
end
ps_2 = params(m̃) # not affecting RBF Layer
test_loss_before_2 = loss_whole_chain( X_test, Y_test )
Flux.@epochs 1 for (X_batch, Y_batch) ∈ train_loader
loss_val, pback = Flux.Zygote.pullback(ps_2) do
loss_whole_chain(X_batch, Y_batch)
end
@show loss_val
gs = pback( one(loss_val) )
update!(opt, ps_2, gs )
end
test_loss_after_2 = loss_whole_chain( X_test, Y_test )
Markdown.parse("$(test_loss_before_2) -> \n$(test_loss_after_2)")
end
# ╔═╡ 0123fe9d-352e-453c-8482-08c9230da47f
test_l = function(x)
M = RBFModel( X_train, Y_train; centers = X_train[center_indices] )
M(x, 1)
end
# ╔═╡ ed05ca08-9da5-48ad-8795-5ae6f3ee07dd
Flux.Zygote.gradient( test_l, rand(3))
# ╔═╡ 9f8ae0a4-adfb-49a9-8591-b3ba8a2f3409
M = RBFModel( X_train, Y_train; centers = X_train[center_indices] )
# ╔═╡ 589e6360-5d11-494b-9081-5ea30111b052
auto_grad(M, rand(3))
# ╔═╡ Cell order:
# ╠═774b5c68-c2e8-11eb-0c25-c124276640d6
# ╠═8c51b60d-e1b5-439d-ab67-ef7813490684
# ╟─ec8e09e0-2238-4314-baf4-1209794bb4e4
# ╠═a974f4de-47b2-4981-9381-717abd8ccda2
# ╟─4b0db5f4-77cb-4391-a3bb-aae7d3c1c140
# ╠═1f80e0a3-71a3-483d-acc5-93c032655d4e
# ╟─44b9ab7e-c52e-4faa-b925-12eaf7c734da
# ╠═da1f41c7-12dd-4c8f-b267-a74bf99bc73b
# ╟─5cdfe543-d773-41ec-8840-2c79b767fb27
# ╠═10dc99e7-028b-4378-a549-c32b49f14a65
# ╠═2ad4a6ab-57ff-45d6-9087-73bae6e81aff
# ╠═c8f1ce7d-a17f-4d13-acb5-e03d6002a273
# ╠═80b2d7ed-1cb6-46b1-a3da-17e19b434569
# ╠═0123fe9d-352e-453c-8482-08c9230da47f
# ╠═ed05ca08-9da5-48ad-8795-5ae6f3ee07dd
# ╠═9f8ae0a4-adfb-49a9-8591-b3ba8a2f3409
# ╠═589e6360-5d11-494b-9081-5ea30111b052
|
{"hexsha": "c61bfaf1f3573ed5a8cb513453a9857b880e5bcd", "size": 5062, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Flux_Notebook.jl", "max_stars_repo_name": "manuelbb-upb/RBFModels.jl", "max_stars_repo_head_hexsha": "d321761fd58c88a1b11f6f1cf0e82b6ab64531e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/Flux_Notebook.jl", "max_issues_repo_name": "manuelbb-upb/RBFModels.jl", "max_issues_repo_head_hexsha": "d321761fd58c88a1b11f6f1cf0e82b6ab64531e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-06-18T00:28:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T07:46:46.000Z", "max_forks_repo_path": "test/Flux_Notebook.jl", "max_forks_repo_name": "manuelbb-upb/RadialBasisFunctionModels.jl", "max_forks_repo_head_hexsha": "d321761fd58c88a1b11f6f1cf0e82b6ab64531e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5656565657, "max_line_length": 95, "alphanum_fraction": 0.7058474911, "num_tokens": 2193}
|
import numpy as np
class BatchIterator:
def __init__(self, inputs, labels, batch_size):
self.inputs = inputs
self.labels = labels
self.batch_size = batch_size
self.size = self.inputs.shape[0]
self.epochs = 0
self.cursor = 0
self.shuffle()
def shuffle(self):
self.indices = np.random.permutation(self.size)
self.cursor = 0
def next_batch(self):
batch_inputs, batch_labels = self.shuffled_batch()
self.cursor += self.batch_size
if self.cursor + self.batch_size - 1 >= self.size:
self.epochs += 1
self.shuffle()
return batch_inputs, batch_labels
def shuffled_batch(self):
batch_indices = self.indices[self.cursor:self.cursor + self.batch_size]
return self.inputs[batch_indices], self.labels[batch_indices]
|
{"hexsha": "6edbbbeae1dfd4b86ca991a620f56895db850f92", "size": 880, "ext": "py", "lang": "Python", "max_stars_repo_path": "batch_iterator.py", "max_stars_repo_name": "suleymanaslan/open-set-identification-gan", "max_stars_repo_head_hexsha": "8ffa37790134394aaac9ed87d30d341c02f7228a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "batch_iterator.py", "max_issues_repo_name": "suleymanaslan/open-set-identification-gan", "max_issues_repo_head_hexsha": "8ffa37790134394aaac9ed87d30d341c02f7228a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "batch_iterator.py", "max_forks_repo_name": "suleymanaslan/open-set-identification-gan", "max_forks_repo_head_hexsha": "8ffa37790134394aaac9ed87d30d341c02f7228a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3448275862, "max_line_length": 79, "alphanum_fraction": 0.6215909091, "include": true, "reason": "import numpy", "num_tokens": 192}
|
import random
from collections import defaultdict
import copy
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models.keyedvectors import Vocab
from six import iteritems
from sklearn.metrics import auc, f1_score, precision_recall_curve, roc_auc_score
from tqdm import tqdm
from cogdl import options
from cogdl.datasets import build_dataset
from cogdl.models import build_model
from . import BaseTask, register_task
def get_score(embs, node1, node2):
vector1 = embs[int(node1)]
vector2 = embs[int(node2)]
return np.dot(vector1, vector2) / (
np.linalg.norm(vector1) * np.linalg.norm(vector2)
)
def evaluate(embs, true_edges, false_edges):
true_list = list()
prediction_list = list()
for edge in true_edges:
true_list.append(1)
prediction_list.append(get_score(embs, edge[0], edge[1]))
for edge in false_edges:
true_list.append(0)
prediction_list.append(get_score(embs, edge[0], edge[1]))
sorted_pred = prediction_list[:]
sorted_pred.sort()
threshold = sorted_pred[-len(true_edges)]
y_pred = np.zeros(len(prediction_list), dtype=np.int32)
for i in range(len(prediction_list)):
if prediction_list[i] >= threshold:
y_pred[i] = 1
y_true = np.array(true_list)
y_scores = np.array(prediction_list)
ps, rs, _ = precision_recall_curve(y_true, y_scores)
return roc_auc_score(y_true, y_scores), f1_score(y_true, y_pred), auc(rs, ps)
@register_task("multiplex_link_prediction")
class MultiplexLinkPrediction(BaseTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument("--hidden-size", type=int, default=200)
parser.add_argument("--negative-ratio", type=int, default=5)
parser.add_argument("--eval-type", type=str, default='all', nargs='+')
# fmt: on
def __init__(self, args):
super(MultiplexLinkPrediction, self).__init__(args)
dataset = build_dataset(args)
data = dataset[0]
self.data = data
print(self.data.keys)
# self.data = data.cuda()
if hasattr(dataset, 'num_features'):
args.num_features = dataset.num_features
model = build_model(args)
self.model = model
self.patience = args.patience
self.max_epoch = args.max_epoch
self.eval_type = args.eval_type
# edge_list = self.data.edge_index.cpu().numpy()
# edge_list = list(zip(edge_list[0], edge_list[1]))
# self.train_data, self.valid_data, self.test_data = divide_data(edge_list, [0.85, 0.05, 0.10])
# self.valid_data, self.test_data = gen_node_pairs(self.train_data, self.valid_data, self.test_data)
# edge_list = self.data.edge_index.cpu().numpy()
# G = nx.Graph()
# G.add_edges_from(edge_list.T.tolist())
# self.criterion = NEG_loss(
# len(self.data.x),
# args.negative_ratio,
# degree=np.array(list(dict(G.degree()).values())),
# )
# self.optimizer = torch.optim.Adam(
# self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay
# )
def train(self):
total_roc_auc, total_f1_score, total_pr_auc = [], [], []
for key in self.data.train_data.keys():
if self.eval_type == 'all' or key in self.eval_type:
G = nx.Graph()
G.add_edges_from(self.data.train_data[key])
embeddings = self.model.train(G)
embs = dict()
for vid, node in enumerate(G.nodes()):
embs[node] = embeddings[vid]
roc_auc, f1_score, pr_auc = evaluate(embs, self.data.test_data[key][0], self.data.test_data[key][1])
total_roc_auc.append(roc_auc)
total_f1_score.append(f1_score)
total_pr_auc.append(pr_auc)
assert len(total_roc_auc) > 0
roc_auc, f1_score, pr_auc = np.mean(total_roc_auc), np.mean(total_f1_score), np.mean(total_pr_auc)
print(
f"Test ROC-AUC = {roc_auc:.4f}, F1 = {f1_score:.4f}, PR-AUC = {pr_auc:.4f}"
)
return dict(
ROC_AUC=roc_auc,
PR_AUC=pr_auc,
F1=f1_score,
)
|
{"hexsha": "72a82d2509758280dba01e6327e17775c4f9766d", "size": 4378, "ext": "py", "lang": "Python", "max_stars_repo_path": "cogdl/tasks/multiplex_link_prediction.py", "max_stars_repo_name": "awesome-archive/cogdl", "max_stars_repo_head_hexsha": "0a354eaaaf851e7218197508e7e85a81d3fb5753", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-06-03T00:55:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T16:06:56.000Z", "max_issues_repo_path": "cogdl/tasks/multiplex_link_prediction.py", "max_issues_repo_name": "awesome-archive/cogdl", "max_issues_repo_head_hexsha": "0a354eaaaf851e7218197508e7e85a81d3fb5753", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cogdl/tasks/multiplex_link_prediction.py", "max_forks_repo_name": "awesome-archive/cogdl", "max_forks_repo_head_hexsha": "0a354eaaaf851e7218197508e7e85a81d3fb5753", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-06-03T00:55:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T01:14:36.000Z", "avg_line_length": 34.746031746, "max_line_length": 116, "alphanum_fraction": 0.6308816811, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1054}
|
library(rstan)
library(loo)
rstan_options(auto_write = TRUE);
options(mc.cores = parallel::detectCores());
plotProfileFit <- function(fit, data, targetSco, numSamples = 10) {
true_value = extract(fit,'true_value')$true_value;
samplesToPlot = true_value[sample(1:(dim(true_value)[1]),numSamples),];
#Add the raw profile
plotData = t(rbind(subset(data, sco ==targetSco)[,"val"], samplesToPlot));
defaultWidth = 1;
lineWidths = rep.int(1, numSamples + 1);
lineWidths[1] = defaultWidth * 2;
matplot(plotData, lwd = lineWidths, type="l")
}
plotNoChangeFit <- function(fit, data, targetSco) {
relevant_data = subset(data, sco==targetSco);
minTime = min(relevant_data$time)
maxTime = max(relevant_data$time)
smmry = summary(fit)$summary;
val_mean = smmry["true_value","mean"];
val_25 = smmry["true_value","25%"];
val_75 = smmry["true_value","75%"];
val_2_5 = smmry["true_value","2.5%"];
val_97_5 = smmry["true_value","97.5%"];
ribbon1 = data.frame("x" = c(minTime,maxTime),"ymin" = c(val_2_5,val_2_5), "ymax" = c(val_97_5, val_97_5))
ribbon2 = data.frame("x" = c(minTime,maxTime),"ymin" = c(val_25,val_25), "ymax" = c(val_75, val_75))
ribbonAes = aes(x = x, ymin = ymin, ymax = ymax)
ggplot(relevant_data) +
geom_ribbon(mapping = ribbonAes, data = ribbon1, fill = "red", alpha = 0.3) +
geom_ribbon(mapping = ribbonAes, data = ribbon2, fill = "darkred", alpha = 0.3) +
geom_hline(yintercept = val_mean, color = "black", size = 2) +
geom_ribbon(aes(x=time, ymin = val - sd, ymax = val + sd ), alpha = 0.3, fill = "blue") +
geom_ribbon(aes(x=time, ymin = val - sd, ymax = val + sd ), alpha = 0.3, fill = "blue") +
geom_line(aes(x=time, y=val))
}
plotSco <- function(data, targetSco) {
relevant_data = subset(data, sco==targetSco);
ggplot(relevant_data, aes(x=time, y=val, ymin = val - sd, ymax = val + sd )) + geom_ribbon(alpha = 0.3, fill = "blue") + geom_line()
}
fitBySco <- function(long_data, target_sco, model='interpolate.stan', ...) {
relevant_data = subset(long_data, sco == target_sco);
relevant_data = relevant_data[sort.list(relevant_data$time),];
#normalize time
relevant_data$time = (relevant_data$time - min(relevant_data$time)) / (max(relevant_data$time) - min(relevant_data$time));
data = list(numData = length(relevant_data$sco), y = relevant_data[,"val"], sigma = relevant_data[,"sd"], time = relevant_data[,"time"]);
return(stan(file =model, data = data, ...));
}
compareFits <- function(data, target_sco) {
ow <- options("warn");
options(warn = 1);
cat("No change fit\n");
noChangeFit = fitBySco(data, target_sco, 'no-change.stan');
print(plotNoChangeFit(noChangeFit, data, target_sco));
cat("C. synth fit\n");
csynthFit = fitBySco(data, target_sco, 'constant-synthesis-euler.stan', control=list(adapt_delta = 0.98), iter = 5000);
print(plotProfileFit(csynthFit, data, target_sco));
cat("Compare > 0 means csynth better");
options(ow);
compare(loo(extract_log_lik(noChangeFit)), loo(extract_log_lik(csynthFit)));
}
|
{"hexsha": "89e226c1ac890a3a1f773e1a8dde0cca623c391a", "size": 3072, "ext": "r", "lang": "R", "max_stars_repo_path": "stan-experiments/interpolate-funcs.r", "max_stars_repo_name": "cas-bioinf/genexpi-stan", "max_stars_repo_head_hexsha": "1164ff1c44ed967574aace8f629e5315fa70e18b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-08-31T03:06:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T18:26:05.000Z", "max_issues_repo_path": "stan-experiments/interpolate-funcs.r", "max_issues_repo_name": "cas-bioinf/genexpi-stan", "max_issues_repo_head_hexsha": "1164ff1c44ed967574aace8f629e5315fa70e18b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stan-experiments/interpolate-funcs.r", "max_forks_repo_name": "cas-bioinf/genexpi-stan", "max_forks_repo_head_hexsha": "1164ff1c44ed967574aace8f629e5315fa70e18b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8961038961, "max_line_length": 139, "alphanum_fraction": 0.6702473958, "num_tokens": 992}
|
module TimePetriNets
using Requires
include("base.jl")
include("Monoflop.jl")
function __init__()
# Optional dependency for makie-based plotting
@require GraphMakie="1ecd5474-83a3-4783-bb4f-06765db800d2" begin
@require GLMakie="e9467ef8-e4e7-5192-8a1a-b1aee30e663a" begin
include("plotting_makie.jl")
end
end
end
end
|
{"hexsha": "96136384a378957f49ce863852d8169a2397de46", "size": 360, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TimePetriNets/TimePetriNets.jl", "max_stars_repo_name": "jleugeri/TimedAutomata.jl", "max_stars_repo_head_hexsha": "97305371cc4fd150069ea5634663e1b30e3a8987", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TimePetriNets/TimePetriNets.jl", "max_issues_repo_name": "jleugeri/TimedAutomata.jl", "max_issues_repo_head_hexsha": "97305371cc4fd150069ea5634663e1b30e3a8987", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TimePetriNets/TimePetriNets.jl", "max_forks_repo_name": "jleugeri/TimedAutomata.jl", "max_forks_repo_head_hexsha": "97305371cc4fd150069ea5634663e1b30e3a8987", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1764705882, "max_line_length": 69, "alphanum_fraction": 0.7138888889, "num_tokens": 127}
|
// Copyright Louis Dionne 2013-2016
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
#ifndef TEST_SUPPORT_SEQ_HPP
#define TEST_SUPPORT_SEQ_HPP
#include <boost/hana/fwd/at.hpp>
#include <boost/hana/fwd/concept/sequence.hpp>
#include <boost/hana/fwd/core/make.hpp>
#include <boost/hana/fwd/drop_front.hpp>
#include <boost/hana/fwd/fold_left.hpp>
#include <boost/hana/fwd/is_empty.hpp>
#include <boost/hana/fwd/length.hpp>
#include <boost/hana/tuple.hpp>
#include <boost/hana/unpack.hpp>
struct Seq;
template <typename Storage>
struct seq_type {
explicit constexpr seq_type(Storage s) : storage(s) { }
Storage storage;
using hana_tag = Seq;
};
struct seq_t {
template <typename ...Xs>
constexpr decltype(auto) operator()(Xs ...xs) const {
auto storage = boost::hana::make_tuple(xs...);
return seq_type<decltype(storage)>(storage);
}
};
constexpr seq_t seq{};
namespace boost { namespace hana {
//////////////////////////////////////////////////////////////////////////
// Foldable
//
// Define either one to select which MCD is used:
// BOOST_HANA_TEST_FOLDABLE_FOLD_LEFT_MCD
// BOOST_HANA_TEST_FOLDABLE_UNPACK_MCD
// BOOST_HANA_TEST_FOLDABLE_ITERABLE_MCD
//
// If neither is defined, the MCD used is unspecified.
//////////////////////////////////////////////////////////////////////////
#ifdef BOOST_HANA_TEST_FOLDABLE_FOLD_LEFT_MCD
template <>
struct fold_left_impl<Seq> {
template <typename Xs, typename S, typename F>
static constexpr auto apply(Xs xs, S s, F f) {
return hana::fold_left(xs.storage, s, f);
}
template <typename Xs, typename F>
static constexpr auto apply(Xs xs, F f) {
return hana::fold_left(xs.storage, f);
}
};
#elif defined(BOOST_HANA_TEST_FOLDABLE_ITERABLE_MCD)
template <>
struct length_impl<Seq> {
template <typename Xs>
static constexpr auto apply(Xs const& xs) {
return hana::length(xs.storage);
}
};
#else
template <>
struct unpack_impl<Seq> {
template <typename Xs, typename F>
static constexpr auto apply(Xs xs, F f)
{ return hana::unpack(xs.storage, f); }
};
#endif
//////////////////////////////////////////////////////////////////////////
// Iterable
//////////////////////////////////////////////////////////////////////////
template <>
struct at_impl<Seq> {
template <typename Xs, typename N>
static constexpr decltype(auto) apply(Xs&& xs, N&& n) {
return hana::at(static_cast<Xs&&>(xs).storage, n);
}
};
template <>
struct drop_front_impl<Seq> {
template <typename Xs, typename N>
static constexpr auto apply(Xs xs, N n) {
return hana::unpack(hana::drop_front(xs.storage, n), ::seq);
}
};
template <>
struct is_empty_impl<Seq> {
template <typename Xs>
static constexpr auto apply(Xs xs) {
return hana::is_empty(xs.storage);
}
};
//////////////////////////////////////////////////////////////////////////
// Sequence
//////////////////////////////////////////////////////////////////////////
template <>
struct Sequence<Seq> {
static constexpr bool value = true;
};
template <>
struct make_impl<Seq> {
template <typename ...Xs>
static constexpr auto apply(Xs&& ...xs) {
return ::seq(static_cast<Xs&&>(xs)...);
}
};
}} // end namespace boost::hana
#endif // !TEST_SUPPORT_SEQ_HPP
|
{"hexsha": "2819161dfb541349753298fe92722f3a471a001c", "size": 3832, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "thirdparty-cpp/boost_1_62_0/libs/hana/test/_include/support/seq.hpp", "max_stars_repo_name": "nxplatform/nx-mobile", "max_stars_repo_head_hexsha": "0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-12-15T19:57:24.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-15T19:57:24.000Z", "max_issues_repo_path": "thirdparty-cpp/boost_1_62_0/libs/hana/test/_include/support/seq.hpp", "max_issues_repo_name": "nxplatform/nx-mobile", "max_issues_repo_head_hexsha": "0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thirdparty-cpp/boost_1_62_0/libs/hana/test/_include/support/seq.hpp", "max_forks_repo_name": "nxplatform/nx-mobile", "max_forks_repo_head_hexsha": "0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1544715447, "max_line_length": 82, "alphanum_fraction": 0.5331419624, "num_tokens": 844}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import sys
if sys.version_info >= (3,0):
xrange = range
class MultiFieldPlotter:
"""
This class tries to make it easier to view functions of more than two
variables.
For each set of two variables (or 'fields', since this is part of the
CosmoTransitions package), this class will display a separate subplot in a
managed figure. Each subplot is a different slice through the
multi-dimensional space. By clicking on the subplots, the user can
dynamically change the offsets of the slices in the other subplots.
Parameters
----------
bounds : array_like
A list of ``(xmin, xmax)`` tuples for each dimension.
f : callable
The function to plot. The first argument must accept arrays of shape
``(..., Ndim)``, where `Ndim` is the number of dimensions.
f_args : tuple, optional
Extra agruments to pass to `f`.
nx : int, optional
Number of data points to plot in each dimension.
contour_levs : int or array_like, optional
If an array, a list of the contour levels to plot. If a list, the total
number of contour levels across the bounding box (the contour levels are
then calculated using :func:`calcContourLevels`).
plot_1d : bool, optional
If True, plot one-dimensional plots along with the contours. (not yet
implemented)
plot_flipped : bool, optional
If True, plot the flipped contour for each field (so that the subplots
form a square grid rather than a triangle).
Attributes
----------
figure : matplotlib.figure.Figure
offset : array_like
Each slice interesects the point given by `offset`. Initially set to
the average of `bounds` and interactively modifiable by clicking on
the plots.
draws_offset : bool
Set to True if the plots should draw the offset point (as intersecting
lines).
Example
-------
The following example will make three contour plots whose offsets can be
changed interactively:
>>> from multi_field_plotting import MultiFieldPlotter
>>> def V(X): # Some potential that looks vaguely interesting
... x,y,z = X[...,0], X[...,1], X[...,2]
... return x*x - x**3 + x*y + y**2 - y*z**2 + z**4
>>> mfp = MultiFieldPlotter([[-1,1.],[-1,1],[-1,1]], V)
"""
def __init__(self, bounds, f, f_args=(), nx=40, contour_levs=50,
plot_1d=False, plot_flipped=False):
self.bounds = np.array(bounds)
self.f = f
self.f_args = f_args
self.nx = nx
self.contour_levs = (contour_levs)
self.contour_levs = np.array(contour_levs)
if len(self.contour_levs.shape) == 0:
self.calcContourLevels(self.contour_levs)
self.plot_1d = plot_1d
self.plot_flipped = plot_flipped
self.figure = plt.figure()
# Make the offset the center of the data bounds
self.offset = np.average(bounds, axis=1)
self.draws_offset = True if len(self.bounds) > 2 else False
self.figure.canvas.mpl_connect('button_press_event', self._mouseDown)
self.drawSubplot()
def calcContourLevels(self, num_levs, nx=11):
"""
Find the contour levels which span the bounds. Store in
``self.contour_levs``.
Parameters
----------
num_levs : int
Desired number of contour levels.
nx : int, optional
The number of data points along each dimension that are used to
find the minimum and maximum levels.
"""
Ndim = len(self.bounds)
X = np.empty([nx]*Ndim + [Ndim])
for i in xrange(Ndim):
xmin, xmax = self.bounds[i]
Y = X.swapaxes(i, -2)
Y[...,i] = np.linspace(xmin,xmax,nx)
Z = self.f(X, *self.f_args)
fmin = np.min(Z.ravel())
fmax = np.max(Z.ravel())
df = fmax-fmin
self.contour_levs = np.linspace(fmin-df*.1, fmax+df*.1, num_levs*1.2)
def drawSubplot(self, subplot='all'):
"""
Performs the actual drawing.
Parameters
----------
subplot : (int, int) or 'all'
The subplot to redraw. If a tuple, it should be field indicies of
the x and y axes.
"""
Ndim = len(self.bounds)
if subplot == 'all':
for i in xrange(Ndim):
for j in xrange(Ndim):
self.drawSubplot((i,j))
return
if not self.plot_1d and subplot[0] == subplot[1]:
return
if not self.plot_flipped and subplot[0] > subplot[1]:
return
if self.plot_1d or self.plot_flipped:
nrows_cols = Ndim
plot_num = 1+subplot[0] + nrows_cols*subplot[1]
else:
nrows_cols = Ndim - 1
plot_num = 1+subplot[0] + nrows_cols*(subplot[1]-1)
ax = self.figure.add_subplot(nrows_cols,nrows_cols,plot_num)
ax.clear()
ax.xfield, ax.yfield = subplot
if ax.yfield == Ndim-1:
ax.set_xlabel("$x_%i$" % ax.xfield)
if ax.xfield == 0:
if ax.yfield == 0:
ax.set_ylabel("$f(x_0)$")
else:
ax.set_ylabel("$x_%i$" % ax.yfield)
# Generate the data and make the plot
if ax.xfield == ax.yfield:
pass # 1d_plot
else:
X = np.empty((self.nx, self.nx, Ndim))
X[:] = self.offset
X[:,:,ax.xfield] = np.linspace(
self.bounds[ax.xfield,0],
self.bounds[ax.xfield,1], self.nx
)[:,np.newaxis] * np.ones((self.nx, self.nx))
X[:,:,ax.yfield] = np.linspace(
self.bounds[ax.yfield,0],
self.bounds[ax.yfield,1], self.nx
)[np.newaxis,:] * np.ones((self.nx, self.nx))
Z = self.f(X, *self.f_args)
ax.contour(
X[:,:,ax.xfield], X[:,:,ax.yfield], Z,
self.contour_levs, cmap=plt.cm.Spectral)
# ax.pcolormesh(X[:,:,ax.xfield], X[:,:,ax.yfield], Z,
# cmap=plt.cm.Spectral)
if self.draws_offset:
xbounds = self.bounds[ax.xfield]
ybounds = self.bounds[ax.yfield]
x0 = self.offset[ax.xfield]
y0 = self.offset[ax.yfield]
ax.plot(xbounds, [y0,y0], 'k', lw=1.)
ax.plot([x0,x0], ybounds, 'k', lw=1.)
self.figure.show()
def _mouseDown(self, event):
ax = event.inaxes
if not ax:
return
self.offset[ax.xfield] = event.xdata
self.offset[ax.yfield] = event.ydata
self.drawSubplot()
|
{"hexsha": "7ec4f2ee284e4b8a0ea801e388162c3a2eb0f304", "size": 6894, "ext": "py", "lang": "Python", "max_stars_repo_path": "cosmoTransitions/multi_field_plotting.py", "max_stars_repo_name": "ycwu1030/CosmoTransitions", "max_stars_repo_head_hexsha": "dabf3fe02c05d13458571e84398a148aad5aec4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2016-11-24T15:26:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T09:08:42.000Z", "max_issues_repo_path": "cosmoTransitions/multi_field_plotting.py", "max_issues_repo_name": "ycwu1030/CosmoTransitions", "max_issues_repo_head_hexsha": "dabf3fe02c05d13458571e84398a148aad5aec4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2015-11-17T19:23:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-13T18:54:57.000Z", "max_forks_repo_path": "cosmoTransitions/multi_field_plotting.py", "max_forks_repo_name": "ycwu1030/CosmoTransitions", "max_forks_repo_head_hexsha": "dabf3fe02c05d13458571e84398a148aad5aec4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2015-09-14T03:59:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-05T01:18:44.000Z", "avg_line_length": 37.2648648649, "max_line_length": 80, "alphanum_fraction": 0.5726718886, "include": true, "reason": "import numpy", "num_tokens": 1756}
|
/*
Copyright [2017-2020] [IBM Corporation]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "ado.h"
#include "ado_proto.h"
#include "ado_ipc_proto.h"
#include "ado_proto_buffer.h"
#include "resource_unavailable.h"
#include <common/logging.h>
#include <common/exceptions.h>
#include <common/utils.h>
#include <common/fd_open.h>
#include <common/memory_mapped.h>
#include <common/dump_utils.h>
#include <api/interfaces.h>
#include <nupm/mcas_mod.h>
#include <boost/program_options.hpp>
#include <sys/mman.h>
#include <atomic>
#include <condition_variable>
#include <cstring>
#include <iostream>
#include <mutex>
#include <queue>
#include <map>
#include <sched.h>
#include <cstdio>
#include <cstdlib>
#include <sys/resource.h>
#include <sys/wait.h>
#include <sys/prctl.h>
#include <sys/mman.h>
#include <thread>
#include <unistd.h>
#include <nupm/mcas_mod.h>
#include <xpmem.h>
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
#ifdef PROFILE
#include <gperftools/profiler.h>
#endif
static constexpr unsigned MAP_LOG_GRAIN = 21U;
static constexpr std::size_t MAP_GRAIN = std::size_t(1) << MAP_LOG_GRAIN;
static constexpr int MAP_HUGE = MAP_LOG_GRAIN << MAP_HUGE_SHIFT;
using namespace component;
/* Note: currently the ADO does not support intake of new memory mappings
resulting from pool expansion - only single segment pools are supported
at the moment.
*/
/* Globals */
namespace global
{
static std::vector<std::tuple<void*, void*, size_t>> shared_memory_mappings;
static void * base_addr = nullptr;
static int64_t base_offset = 0; /* added to shard address gives local address */
}
/* Helpers */
template <typename T = void>
auto shard_to_local(const void * shard_addr) {
return reinterpret_cast<T*>(reinterpret_cast<addr_t>(shard_addr) + global::base_offset);
}
bool check_xpmem_kernel_module()
{
int fd = open("/dev/xpmem", O_RDWR, 0666);
close(fd);
return (fd != -1);
}
/**
* Class to manage plugins
*/
class ADO_plugin_mgr
{
public:
explicit ADO_plugin_mgr(const std::vector<std::string>& plugin_vector,
IADO_plugin::Callback_table cb_table)
: _i_plugins{}
{
for(const auto& ppath : plugin_vector) {
_i_plugins.push_back(make_itf_ref
(static_cast<IADO_plugin*>(load_component(ppath.c_str(),
interface::ado_plugin))));
if( ! _i_plugins.back() )
throw General_exception("unable to load ADO plugin (%s)", ppath.c_str());
_i_plugins.back()->register_callbacks(cb_table);
PLOG("ADO: plugin loaded OK! (%s)", ppath.c_str());
}
}
virtual ~ADO_plugin_mgr() {
PLOG("ADO: plugin mgr dtor.");
}
void shutdown() {
for(const auto &i: _i_plugins) i->shutdown();
}
status_t register_mapped_memory(void *shard_vaddr,
void *local_vaddr,
size_t len) {
status_t s = S_OK;
for(const auto &i: _i_plugins) {
s |= i->register_mapped_memory(shard_vaddr,
local_vaddr,
len);
}
return s;
}
status_t do_work(const uint64_t work_key,
const char * key,
size_t key_len,
IADO_plugin::value_space_t& values,
const void *in_work_request,
const size_t in_work_request_len,
const bool new_root,
IADO_plugin::response_buffer_vector_t& response_buffers) {
status_t s = S_OK;
for(const auto &i: _i_plugins) {
s |= i->do_work(work_key, key, key_len, values,
in_work_request,
in_work_request_len,
new_root,
response_buffers);
}
return s;
}
void launch_event(const uint64_t auth_id,
const std::string& pool_name,
const size_t pool_size,
const unsigned int pool_flags,
const unsigned int memory_type,
const size_t expected_obj_count,
const std::vector<std::string>& params) {
for(const auto &i: _i_plugins)
i->launch_event(auth_id,
pool_name,
pool_size,
pool_flags,
memory_type,
expected_obj_count,
params);
}
void notify_op_event(ADO_op op) {
for(const auto &i: _i_plugins)
i->notify_op_event(op);
}
void send_cluster_event(const std::string& sender,
const std::string& type,
const std::string& message) {
for(const auto &i: _i_plugins)
i->cluster_event(sender, type, message);
}
private:
std::vector<component::Itf_ref<IADO_plugin>> _i_plugins;
};
/**
* Main entry point
*
* @param argc
* @param argv
*
* @return
*/
int main(int argc, char* argv[])
{
try
{
std::string plugins, channel_id, base;
unsigned debug_level;
std::string cpu_mask;
std::vector<std::string> ado_params;
bool use_log = false;
try {
namespace po = boost::program_options;
po::options_description desc("Options");
desc.add_options()
("help", "Print help")
("plugins", po::value<std::string>(&plugins)->required(), "ADO plugins")
("channel_id", po::value<std::string>(&channel_id)->required(), "Channel (prefix) identifier")
("debug", po::value<unsigned>(&debug_level)->default_value(0), "Debug level")
("cpumask", po::value<std::string>(&cpu_mask), "Cores to restrict threads to (string form)")
("param", po::value<std::vector<std::string>>(&ado_params), "Plugin parameters")
("base", po::value<std::string>(&base), "Virtual base address for memory mapping into ADO space")
("log", "Redirect output to ado.log")
;
po::variables_map vm;
try {
po::store(po::parse_command_line(argc, argv, desc), vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
return 0;
}
if (vm.count("base")) {
global::base_addr = reinterpret_cast<void*>(strtoull(vm["base"].as<std::string>().c_str(),nullptr, 16));
}
use_log = vm.count("log") > 0;
po::notify(vm);
}
catch (const po::error &e) {
std::cerr << e.what() << std::endl;
std::cerr << desc << std::endl;
return -1;
}
}
catch (const std::exception &e) {
std::cerr << e.what() << std::endl;
return -1;
}
/* configure process to die with parent */
if(prctl(PR_SET_PDEATHSIG, SIGKILL))
throw General_exception("prctl failed");
/* capture output from child process */
if(use_log) {
int fd = ::open("ado.log", O_CREAT|O_WRONLY, 0600);
::dup2(fd, 1);
::dup2(fd, 2);
::close(fd);
PMAJOR("ADO: redirected output to ado.log ...");
}
PMAJOR("ADO: launched");
ADO_protocol_builder ipc(debug_level, channel_id, ADO_protocol_builder::Role::ACCEPT);
PMAJOR("ADO: listening");
/* Callback functions */
auto ipc_create_key =
[&ipc] (const uint64_t work_request_id,
const std::string& key_name,
const size_t value_size,
const uint64_t flags,
void*& out_value_addr,
const char ** out_key_ptr,
component::IKVStore::key_t * out_key_handle) -> status_t
{
status_t rc = S_OK;
ipc.send_table_op_create(work_request_id, key_name, value_size, flags);
ipc.recv_table_op_response(rc, out_value_addr, nullptr /* value len */, out_key_ptr, out_key_handle);
return rc;
};
auto ipc_open_key =
[&ipc] (const uint64_t work_request_id,
const std::string& key_name,
const uint64_t flags,
void*& out_value_addr,
size_t& out_value_len,
const char** out_key_ptr,
component::IKVStore::key_t * out_key_handle) -> status_t
{
status_t rc = S_OK;
ipc.send_table_op_open(work_request_id, key_name, out_value_len, flags);
ipc.recv_table_op_response(rc, out_value_addr, &out_value_len, out_key_ptr, out_key_handle);
return rc;
};
auto ipc_erase_key =
[&ipc] (const std::string& key_name) -> status_t
{
status_t rc = S_OK;
void* na;
ipc.send_table_op_erase(key_name);
ipc.recv_table_op_response(rc, na);
return rc;
};
auto ipc_resize_value =
[&ipc] (const uint64_t work_request_id,
const std::string& key_name,
const size_t new_value_size,
void*& out_new_value_addr) -> status_t
{
status_t rc = S_OK;
ipc.send_table_op_resize(work_request_id, key_name, new_value_size);
ipc.recv_table_op_response(rc, out_new_value_addr);
return rc;
};
auto ipc_allocate_pool_memory =
[&ipc] (const size_t size,
const size_t alignment,
void *&out_new_addr) -> status_t
{
status_t rc = S_OK;
ipc.send_table_op_allocate_pool_memory(size, alignment);
ipc.recv_table_op_response(rc, out_new_addr);
return rc;
};
auto ipc_free_pool_memory =
[&ipc] (const size_t size,
const void * addr) -> status_t
{
status_t rc = S_OK;
void * na;
ipc.send_table_op_free_pool_memory(addr, size);
ipc.recv_table_op_response(rc, na);
return rc;
};
auto ipc_find_key =
[&ipc] (const std::string& key_expression,
const offset_t begin_position,
const component::IKVIndex::find_t find_type,
offset_t& out_matched_position,
std::string& out_matched_key) -> status_t
{
status_t rc = S_OK;
ipc.send_find_index_request(key_expression,
begin_position,
find_type);
ipc.recv_find_index_response(rc,
out_matched_position,
out_matched_key);
return rc;
};
auto ipc_get_reference_vector =
[&ipc] (const common::epoch_time_t t_begin,
const common::epoch_time_t t_end,
IADO_plugin::Reference_vector& out_vector) -> status_t
{
status_t rc = S_OK;
ipc.send_vector_request(t_begin, t_end);
ipc.recv_vector_response(rc, out_vector);
return rc;
};
auto ipc_get_pool_info =
[&ipc] (std::string& out_response) -> status_t
{
status_t rc = S_OK;
ipc.send_pool_info_request();
ipc.recv_pool_info_response(rc, out_response);
return rc;
};
auto ipc_iterate =
[&ipc] (const common::epoch_time_t t_begin,
const common::epoch_time_t t_end,
component::IKVStore::pool_iterator_t& iterator,
component::IKVStore::pool_reference_t& reference) -> status_t
{
status_t rc = S_OK;
ipc.send_iterate_request(t_begin, t_end, iterator);
ipc.recv_iterate_response(rc, iterator, reference);
return rc;
};
auto ipc_unlock =
[&ipc] (const uint64_t work_id,
component::IKVStore::key_t key_handle) -> status_t
{
status_t rc = S_OK;
if(work_id == 0 || key_handle == nullptr) return E_INVAL;
ipc.send_unlock_request(work_id, key_handle);
if(!ipc.recv_unlock_response(rc))
throw General_exception("ipc_unclock recv unexpected response");
return rc;
};
auto ipc_configure = [&ipc](const uint64_t options) -> status_t
{
status_t rc = S_OK;
ipc.send_configure_request(options);
if(!ipc.recv_configure_response(rc))
throw General_exception("ipc_configure recv unexpected response");
return rc;
};
for(auto a: ado_params) { PLOG("ado_param:%s", a.c_str()); }
/* load plugin and register callbacks */
std::vector<std::string> plugin_vector;
char *plugin_name = std::strtok(const_cast<char*>(plugins.c_str()), ",");
while (plugin_name) {
plugin_vector.push_back(plugin_name);
plugin_name = std::strtok(NULL, ",");
}
/* load plugins */
ADO_plugin_mgr plugin_mgr(plugin_vector,
IADO_plugin::Callback_table{
ipc_create_key,
ipc_open_key,
ipc_erase_key,
ipc_resize_value,
ipc_allocate_pool_memory,
ipc_free_pool_memory,
ipc_get_reference_vector,
ipc_find_key,
ipc_get_pool_info,
ipc_iterate,
ipc_unlock,
ipc_configure});
/* main loop */
unsigned long count = 0;
bool exit = false;
unsigned int memory_type = 0xFF;
if(cpu_mask.empty() == false) {
cpu_mask_t m;
if(string_to_mask(cpu_mask, m) == S_OK) {
if(debug_level > 0)
PLOG("ADO process configured with cpu mask: %s", cpu_mask.c_str());
if (set_cpu_affinity_mask(m) == -1)
throw Logic_exception("bad mask parameter");
}
if( 2 < debug_level ) {
PLOG("CPU_MASK: ADO process mask: [%s]", m.string_form().c_str());
}
}
PLOG("ADO process: main thread (%lu) debug_level:%d", pthread_self(), debug_level);
#ifdef PROFILE
PMAJOR("ADO: starting profiler");
ProfilerStart("/tmp/ADO_cpu_profile.prof");
#endif
while (!exit) {
/* main loop servicing incoming IPC requests */
if(debug_level > 2)
PLOG("ADO process: waiting for message (%lu)", count);
Buffer_header * buffer = nullptr; /* recv will dequeue this */
/* poll until there is a request, sleep on too much polling */
auto st = ipc.poll_recv_sleep(buffer);
if(st != S_OK) throw Logic_exception(__FILE__ " ADO: ipc.poll_recv_sleep failed unexpectedly");
assert(buffer);
/*---------------------------------------*/
/* custom IPC message protocol - staging */
/*---------------------------------------*/
using namespace mcas::ipc;
if(mcas::ipc::Message::is_valid(buffer)) {
switch(mcas::ipc::Message::type(buffer))
{
case mcas::ipc::MSG_TYPE::CHIRP: {
auto chirp = reinterpret_cast<mcas::ipc::Chirp*>(buffer);
switch(chirp->type)
{
case chirp_t::SHUTDOWN:
PMAJOR("ADO: received Shutdown chirp in %p",
common::p_fmt(buffer));
plugin_mgr.shutdown();
exit = true;
break;
default:
throw Protocol_exception("unknown chirp");
}
break;
}
case mcas::ipc::MSG_TYPE::MAP_MEMORY: {
auto * mm = reinterpret_cast<Map_memory*>(buffer);
assert(memory_type != 0xFF);
/* set base address for mapping */
auto mapping_base = global::base_addr ? global::base_addr : mm->shard_addr;
void * mm_addr;
if(memory_type == 1) { /* DRAM case, e.g. mapstore */
if(!check_xpmem_kernel_module()) {
PERR("inaccessible XPMEM kernel module");
throw General_exception("inaccessible XPMEM kernel module");
}
xpmem_addr seg = {0,0};
seg.apid = xpmem_get(xpmem_segid_t(mm->token),
XPMEM_RDWR,
XPMEM_PERMIT_MODE,
reinterpret_cast<void *>(0666));
if(seg.apid == -1)
throw General_exception("xpmem_get: failed unexpectedly.");
mm_addr = xpmem_attach(seg,
mm->size,
mapping_base);
}
else {
if(!nupm::check_mcas_kernel_module()) {
PERR("inaccessible MCAS kernel module");
throw General_exception("inaccessible MCAS kernel module");
}
mm_addr = nupm::mmap_exposed_memory(mm->token,
mm->size,
mapping_base);
}
if(mm_addr == MAP_FAILED)
throw General_exception("mcasmod: mmap_exposed_memory failed unexpectly (base=%p).", mapping_base);
PMAJOR("ADO: mapped memory %lx size:%lu addr=%p", mm->token, mm->size, mm_addr);
/* record mapping information for clean up */
global::shared_memory_mappings.push_back(std::make_tuple(mm->shard_addr,
mm_addr,
mm->size));
global::base_offset =
reinterpret_cast<addr_t>(mm_addr) - reinterpret_cast<addr_t>(mm->shard_addr);
/* register memory with plugins */
if(plugin_mgr.register_mapped_memory(mm->shard_addr, mm_addr, mm->size) != S_OK)
throw General_exception("calling register_mapped_memory on ADO plugin failed");
break;
}
case mcas::ipc::MSG_TYPE::MAP_MEMORY_NAMED: {
auto * mm = static_cast<Map_memory_named*>(static_cast<void *>(buffer));
assert(memory_type != 0xFF);
common::Fd_open fd(std::string(mm->pool_name(), mm->pool_name_len).c_str(), O_RDWR);
int flags = MAP_SHARED_VALIDATE | MAP_FIXED | MAP_SYNC | MAP_HUGE;
common::memory_mapped mme(mm->iov,
PROT_READ|PROT_WRITE,
flags,
fd.fd(),
mm->offset);
if ( ! mme )
{
flags &= ~MAP_SYNC;
mme = common::memory_mapped(mm->iov,
PROT_READ|PROT_WRITE,
flags,
fd.fd(),
mm->offset);
}
if ( ! mme )
{
throw General_exception(
"%s: %.*s mmap(%p, 0x%zx, %s, 0x%x=%s, %i, 0x%zu) failed unexpectly: %zu/%s"
, __func__, int(mm->pool_name_len), mm->pool_name()
, ::base(mm->iov), ::size(mm->iov), "PROT_READ|PROT_WRITE", flags
, "MAP_SHARED_VALIDATE|MAP_FIXED", fd.fd(), mm->offset
, ::size(mme), ::strerror(int(::size(mme)))
);
}
PMAJOR("ADO: mapped region %u pool %.*s addr=%p:%zu",
unsigned(mm->region_id), int(mm->pool_name_len),
mm->pool_name(), ::base(mm->iov), ::size(mm->iov));
/* ADO does not use common::memory_mapped */
auto mme_local = mme.release();
/* record mapping information for clean up */
global::shared_memory_mappings.push_back(std::make_tuple(::base(mm->iov), // CLEM to check
::base(mme_local),
::size(mme_local)));
/* register memory with plugins */
if(plugin_mgr.register_mapped_memory(::base(mm->iov), ::base(mme_local), ::size(mm->iov)) != S_OK)
throw General_exception("calling register_mapped_memory on ADO plugin failed");
break;
}
case mcas::ipc::MSG_TYPE::WORK_REQUEST: {
component::IADO_plugin::response_buffer_vector_t response_buffers;
auto * wr = reinterpret_cast<Work_request*>(buffer);
if(debug_level > 1)
PLOG("ADO process: RECEIVED Work_request: key=(%p:%.*s) value=%p "
"value_len=%lu invocation_len=%lu detached_value=%p (%.*s) len=%lu new=%d",
shard_to_local(wr->get_key()),
boost::numeric_cast<int>(wr->get_key_len()),
shard_to_local<char>(wr->get_key()),
shard_to_local(wr->get_value_addr()),
wr->value_len,
wr->invocation_data_len,
shard_to_local(wr->get_detached_value_addr()),
int(wr->detached_value_len),
shard_to_local<char>(wr->get_detached_value_addr()),
wr->detached_value_len,
wr->new_root);
auto work_request_id = wr->work_key;
IADO_plugin::value_space_t values;
values.append(shard_to_local(wr->get_value_addr()), wr->value_len);
if(wr->detached_value_len > 0) {
assert(wr->get_detached_value_addr() != nullptr);
values.append(shard_to_local(wr->get_detached_value_addr()),
wr->detached_value_len);
}
/* forward to plugins */
status_t rc =
plugin_mgr.do_work(work_request_id,
shard_to_local<const char>(wr->get_key()),
wr->get_key_len(),
values,
wr->get_invocation_data(),
wr->invocation_data_len,
wr->new_root,
response_buffers);
/* pass back response data */
ipc.send_work_response(rc,
work_request_id,
response_buffers);
break;
}
case mcas::ipc::MSG_TYPE::BOOTSTRAP_REQUEST: {
auto boot_req = reinterpret_cast<Bootstrap_request*>(buffer);
std::string pool_name(boot_req->pool_name, boot_req->pool_name_len);
if(debug_level > 2)
PLOG("ADO process: bootstrap_request: (%s, %lu, %u, %u, %lu)",
pool_name.c_str(), boot_req->pool_size,
boot_req->pool_flags, boot_req->memory_type,
boot_req->expected_obj_count);
memory_type = boot_req->memory_type;
/* call the plugin */
plugin_mgr.launch_event(boot_req->auth_id,
pool_name,
boot_req->pool_size,
boot_req->pool_flags,
boot_req->memory_type,
boot_req->expected_obj_count,
ado_params);
ipc.send_bootstrap_response();
break;
}
case mcas::ipc::MSG_TYPE::OP_EVENT: {
auto event = reinterpret_cast<Op_event*>(buffer);
if(debug_level > 1)
PLOG("ADO_process: received op event (%s)",
to_str(event->op).c_str());
/* invoke plugin then return completion */
plugin_mgr.notify_op_event(event->op);
ipc.send_op_event_response(event->op);
break;
}
case mcas::ipc::MSG_TYPE::CLUSTER_EVENT: {
auto event = reinterpret_cast<Cluster_event*>(buffer);
PLOG("ADO_process: received cluster event (%s,%s,%s)",
event->sender(), event->type(), event->message());
plugin_mgr.send_cluster_event(event->sender(),
event->type(),
event->message());
break;
}
default: {
throw Logic_exception("ADO_process: unknown mcas::ipc message type");
}
}
ipc.free_ipc_buffer(buffer);
count++;
}
} // end of while loop
PMAJOR("ADO: exiting.");
/* clean up: free shared memory mappings */
/* TODO do we need to unregister with kernel module ? */
for(auto& mp : global::shared_memory_mappings) {
if(::munmap(std::get<1>(mp), std::get<2>(mp)) != 0)
throw Logic_exception("unmap of shared memory failed");
}
#ifdef PROFILE
ProfilerStop();
PMAJOR("ADO: stopped profiler");
#endif
return 0;
}
catch (const std::exception &e) {
std::cerr << e.what() << std::endl;
return -1;
}
catch (const Exception &e) {
std::cerr << e.cause() << std::endl;
return -1;
}
return -1;
}
|
{"hexsha": "d9d05113de278f6de927e44fac0e8e261cfcdde4", "size": 26713, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/server/ado/src/ado.cpp", "max_stars_repo_name": "omriarad/mcas", "max_stars_repo_head_hexsha": "f47aab12754c91ebd75b0e1881c8a7cc7aa81278", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 60.0, "max_stars_repo_stars_event_min_datetime": "2020-04-28T08:15:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T10:35:15.000Z", "max_issues_repo_path": "src/server/ado/src/ado.cpp", "max_issues_repo_name": "omriarad/mcas", "max_issues_repo_head_hexsha": "f47aab12754c91ebd75b0e1881c8a7cc7aa81278", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 66.0, "max_issues_repo_issues_event_min_datetime": "2020-09-03T23:40:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T20:34:52.000Z", "max_forks_repo_path": "src/server/ado/src/ado.cpp", "max_forks_repo_name": "omriarad/mcas", "max_forks_repo_head_hexsha": "f47aab12754c91ebd75b0e1881c8a7cc7aa81278", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2019-11-02T06:30:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T01:56:42.000Z", "avg_line_length": 35.522606383, "max_line_length": 116, "alphanum_fraction": 0.5179126268, "num_tokens": 5505}
|
/*++
BDcpp -- Simple Bjontegaard Delta metric implementation for C++.
MIT License
Copyright (c) 2022 Tim Bruylants
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--*/
#include "bdcpp.h"
#include <Eigen/Dense>
#include <cmath>
namespace bdcpp
{
namespace details
{
// Fit a polynomial of given order on the curve (minimizing squared distance).
std::vector<value_type> polyFit(const curve_data_type& curve, const size_t order)
{
const size_t numCoefficients = order + 1;
const size_t nCount = curve.size();
Eigen::MatrixX<value_type> X(nCount, numCoefficients);
Eigen::MatrixX<value_type> Y(nCount, 1);
// fill X and Y matrices (X is a Vandermonde matrix)
for (size_t row = 0; row < nCount; ++row)
{
Y(row, 0) = curve[row].second;
value_type v = (value_type)1;
for (size_t col = 0; col < numCoefficients; ++col)
{
X(row, col) = v;
v *= curve[row].first;
}
}
// Solve for the polynomial coefficients (one column) and return.
const Eigen::VectorX<value_type> coefficients = X.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Y);
return std::vector<value_type>(coefficients.data(), coefficients.data() + numCoefficients);
}
// Calculates Y(x), where the polynomial coefficients are given.
value_type polyVal(const std::vector<value_type>& coefficients, const value_type x)
{
assert(!coefficients.empty());
size_t c = coefficients.size();
value_type r = coefficients[--c];
while (c != 0)
{
r *= x;
r += coefficients[--c];
}
return r;
}
std::vector<value_type> polyIntegrate(const std::vector<value_type>& coefficients, const value_type constant = 0)
{
const size_t numCoefficients = coefficients.size();
std::vector<value_type> ic(numCoefficients + 1);
ic[0] = constant;
for (size_t c = 0; c < numCoefficients; ++c)
{
ic[c + 1] = coefficients[c] / (c + 1);
}
return ic;
}
// The main Bjontegaard calculation to get the area surface difference between two curves.
value_type bdDiff(const curve_data_type& curveA, const curve_data_type& curveB, const int polyOrder)
{
assert(polyOrder >= 3);
// Take lowest and highest X values (assumes sorted curves and relevant range overlap).
const auto lowX = std::max(curveA.front().first, curveB.front().first);
const auto highX = std::min(curveA.back().first, curveB.back().first);
// Fit curves as polynomials and integrate them.
const auto iCoefficientsA = polyIntegrate(polyFit(curveA, (size_t)polyOrder));
const auto iCoefficientsB = polyIntegrate(polyFit(curveB, (size_t)polyOrder));
// Calculate the definite integrals.
const auto intA = polyVal(iCoefficientsA, highX) - polyVal(iCoefficientsA, lowX);
const auto intB = polyVal(iCoefficientsB, highX) - polyVal(iCoefficientsB, lowX);
// Return the BD diff (as the area over range).
return (intB - intA) / (highX - lowX);
}
// Prepare curve points for BD calculations.
template<bool TRANSPOSE>
curve_data_type prepareCurve(const curve_data_type& curve)
{
assert(curve.size() >= 4);
auto newCurve(curve);
std::sort(newCurve.begin(), newCurve.end(), [](const curve_data_point_type& vA, const curve_data_point_type& vB)
{
return vA.first < vB.first;
});
std::for_each(newCurve.begin(), newCurve.end(), [](curve_data_point_type& v)
{
v.first = std::log(v.first);
if constexpr (TRANSPOSE)
{
std::swap(v.first, v.second);
}
});
return newCurve;
}
}
// Calculate the BD-SNR for the two given curves (returns a distortion improvement in dB).
value_type bdsnr(const curve_data_type& curveA, const curve_data_type& curveB, const int polyOrder)
{
return details::bdDiff(details::prepareCurve<false>(curveA), details::prepareCurve<false>(curveB), polyOrder);
}
// Calculate the BD-BR for the two given curves (returns a rate improvement in %).
value_type bdbr(const curve_data_type& curveA, const curve_data_type& curveB, const int polyOrder)
{
return (std::exp(details::bdDiff(details::prepareCurve<true>(curveA), details::prepareCurve<true>(curveB), polyOrder)) - 1) * 100;
}
}
|
{"hexsha": "dba67f1114390e57ae37931f331e903acb2035fe", "size": 5301, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "bdcpp/bdcpp.cpp", "max_stars_repo_name": "tbr/bjontegaard_cpp", "max_stars_repo_head_hexsha": "d06302497d8af0734d69ce93590acab9a4998817", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bdcpp/bdcpp.cpp", "max_issues_repo_name": "tbr/bjontegaard_cpp", "max_issues_repo_head_hexsha": "d06302497d8af0734d69ce93590acab9a4998817", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bdcpp/bdcpp.cpp", "max_forks_repo_name": "tbr/bjontegaard_cpp", "max_forks_repo_head_hexsha": "d06302497d8af0734d69ce93590acab9a4998817", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3309859155, "max_line_length": 134, "alphanum_fraction": 0.6891152613, "num_tokens": 1323}
|
#########################################################################################
# WaveformQSRCNN.py: QSRCNN for G711/ADPCM/AMR/EVS using using Waveform features
# Author: Huijun Liu
# Time: 10.05.2017
# Location: TU Braunschweig IfN
#########################################################################################
import os
import time
import math
import scipy.io as sio
import tensorflow as tf
from keras.models import Model
from keras import backend as K
from keras.layers import Input, Add, Activation
from keras.layers.convolutional import Conv1D, MaxPooling1D, UpSampling1D
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger, TensorBoard, LearningRateScheduler
from weightnorm import AdamWithWeightnorm
from tensorflow.python.framework import ops
# -------------------------------------------------------------------------------
# 0. define metric and activation function
# -------------------------------------------------------------------------------
def snr(y_true, y_pred):
"""
SNR is Signal to Noise Ratio
"""
return 10.0 * K.log((K.sum(K.square(y_true))) / (K.sum(K.square(y_pred - y_true)))) / K.log(10.0)
def selu(x):
with ops.name_scope('elu') as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
"""
def step_decay(epoch):
initial_lrate = 0.001
drop = 0.25
epochs_drop = 3.0
lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
return lrate
"""
# -------------------------------------------------------------------------------
# 1. define Cepstral-QSRCNN Model
# -------------------------------------------------------------------------------
class WaveformQSRCNN(object):
def __init__(self, opt_params={'lr': 5e-4, 'batch_size': 32, 'nb_epochs': 100},
model_params={'n1': 16, 'n2': 32, 'n3': 16, 'frame_len': 32},
codec_type_params={'weights_dir': "./model_weights", 'logdir': "./log"}):
self.learning_rate = opt_params['lr']
self.batch_size = opt_params['batch_size']
self.nb_epochs = opt_params['nb_epochs']
self.log_dir = codec_type_params['logdir']
if not (os.path.exists(self.log_dir)):
os.makedirs(self.log_dir)
self.weights_dir = codec_type_params['weights_dir']
if not (os.path.exists(self.weights_dir)):
os.makedirs(self.weights_dir)
self.frame_len = model_params['frame_len']
self.model = self.create_model(model_params)
# -------------------------------------------------------------------------------
# Load the Weights of the Model
# -------------------------------------------------------------------------------
def load_weights(self, file_path=""):
if file_path == "":
file_path = self.weights_dir + '/' + 'G711_WaveformQSRCNN_Weights_Best_bs' + \
str(self.batch_size) + '_lr' + str(self.learning_rate) + '.h5'
file_path = os.path.normcase(file_path)
self.model.load_weights(file_path)
# -------------------------------------------------------------------------------
# Save the Weights of the Model
# -------------------------------------------------------------------------------
def save_weights(self):
file_path = self.weights_dir + '/' + 'G711_WaveformQSRCNN_Weights_Final_bs' + \
str(self.batch_size) + '_lr' + str(self.learning_rate) + '.h5'
file_path = os.path.normcase(file_path)
self.model.save_weights(file_path)
# -------------------------------------------------------------------------------
# 1. define model
# -------------------------------------------------------------------------------
def create_model(self, model_params={'n1': 32, 'n2': 64, 'n3': 32, 'frame_len': 80}):
frame_len = self.frame_len
n1 = model_params['n1']
n2 = model_params['n2']
n3 = model_params['n3']
input_sque = Input(shape=(frame_len, 1))
c1 = Conv1D(n1, 3, padding='same')(input_sque)
c1 = Activation(selu)(c1)
c1 = Conv1D(n1, 3, padding='same')(c1)
c1 = Activation(selu)(c1)
x = MaxPooling1D(2)(c1)
c2 = Conv1D(n2, 3, padding='same')(x)
c2 = Activation(selu)(c2)
c2 = Conv1D(n2, 3, padding='same')(c2)
c2 = Activation(selu)(c2)
x = MaxPooling1D(2)(c2)
c3 = Conv1D(n3, 3, padding='same')(x)
c3 = Activation(selu)(c3)
x = UpSampling1D(2)(c3)
c2_2 = Conv1D(n2, 3, padding='same')(x)
c2_2 = Activation(selu)(c2_2)
c2_2 = Conv1D(n2, 3, padding='same')(c2_2)
c2_2 = Activation(selu)(c2_2)
m1 = Add()([c2, c2_2])
m1 = UpSampling1D(2)(m1)
c1_2 = Conv1D(n1, 3, padding='same')(m1)
c1_2 = Activation(selu)(c1_2)
c1_2 = Conv1D(n1, 3, padding='same')(c1_2)
c1_2 = Activation(selu)(c1_2)
m2 = Add()([c1, c1_2])
decoded = Conv1D(1, 5, padding='same', activation='linear')(m2)
model = Model(input_sque, decoded)
model.summary()
learning_rate = self.learning_rate
# adam = optimizers.Adam(lr=learning_rate)
# model.compile(optimizer=adam, loss='mse', metrics=[SNRLoss])
adam_wn = AdamWithWeightnorm(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer=adam_wn, loss='mse', metrics=[snr])
return model
# -------------------------------------------------------------------------------
# 2. Fit the model
# -------------------------------------------------------------------------------
def step_decay(self, epoch):
initial_lrate = self.learning_rate
drop = 0.25
epochs_drop = 4.0
lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
old_lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lrate)
lrate = K.get_value(self.model.optimizer.lr)
print("> Ir reduced from %f to %f" % (old_lr, lrate))
return lrate
def fit(self, x_train_noisy, x_train, x_train_noisy_vali, x_train_vali):
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
print("> Training model ...")
nb_epochs = self.nb_epochs
batch_size = self.batch_size
learning_rate = self.learning_rate
# ---------------------------------------------------------
# 1. define callback functions
# ---------------------------------------------------------
# Stop training after 10 epoches if the vali_loss not decreasing
stop_str = EarlyStopping(monitor='val_snr', patience=16, verbose=1, mode='max')
# Reduce learning rate when stop improving lr = lr*factor
reduce_LR = ReduceLROnPlateau(monitor='val_snr', factor=0.5, patience=2, verbose=1, mode='max', epsilon=0.0001, cooldown=0, min_lr=0)
best_weights = self.weights_dir + '/' + 'G711_WaveformQSRCNN_Weights_Best_bs' + \
str(batch_size) + '_lr' + str(learning_rate) + '.h5'
best_weights = os.path.normcase(best_weights)
model_save = ModelCheckpoint(best_weights, monitor='val_snr', save_best_only=True, mode='max', save_weights_only=True, period=1)
logger_name = self.log_dir + '/' + 'G711_WaveformQSRCNN_log_bs' + \
str(batch_size) + '_lr' + str(learning_rate) + '.csv'
logger_name = os.path.normcase(logger_name)
logger = CSVLogger(logger_name, separator=',', append=False)
tensor_board = TensorBoard(log_dir=self.log_dir, histogram_freq=1)
lrate = LearningRateScheduler(self.step_decay)
start = time.time()
# ---------------------------------------------------------
# 2. fit the model
# ---------------------------------------------------------
print("> Training model " + "using Batch-size: " + str(batch_size) + ", Learning_rate: " + str(learning_rate) + "...")
hist = self.model.fit(x_train_noisy, x_train, epochs=nb_epochs, batch_size=batch_size, shuffle=True,
validation_data=[x_train_noisy_vali, x_train_vali],
callbacks=[lrate, reduce_LR, stop_str, model_save, logger])
print("> Saving Completed, Time : ", time.time() - start)
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
return hist
# -------------------------------------------------------------------------------
# 3. Save loss snr val_loss val_snr as .mat File
# -------------------------------------------------------------------------------
def save_training_curves(self, hist):
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
print("> Saving Training and Validation loss-metric curve ...")
start = time.time()
trian_curve_root = "./Opting_Results"
if not(os.path.exists(trian_curve_root)):
os.makedirs(trian_curve_root)
# ---------------------------------------------------------
# 1. Saving Training Loss
# ---------------------------------------------------------
TrainLossVec = trian_curve_root + '/' + 'G711_WaveformQSRCNN_TrainLoss_bs' + \
str(self.batch_size) + '_lr' + str(self.learning_rate) + '.mat'
TrainLossVec = os.path.normcase(TrainLossVec)
sio.savemat(TrainLossVec, {'Train_Loss_Vec': hist.history['loss']})
# ---------------------------------------------------------
# 2. Saving Training Metric
# ---------------------------------------------------------
TrainSNRVec = trian_curve_root + '/' + 'G711_WaveformQSRCNN_TrainMetrice_bs' + \
str(self.batch_size) + '_lr' + str(self.learning_rate) + '.mat'
TrainSNRVec = os.path.normcase(TrainSNRVec)
sio.savemat(TrainSNRVec, {'Train_SNR_Vec': hist.history['snr']}) # snr
# ---------------------------------------------------------
# 3. Saving Validation Loss
# ---------------------------------------------------------
ValiLossVec = trian_curve_root + '/' + 'G711_WaveformDDQSRCNN_ValiLoss_bs' + \
str(self.batch_size) + '_lr' + str(self.learning_rate) + '.mat'
ValiLossVec = os.path.normcase(ValiLossVec)
sio.savemat(ValiLossVec, {'Vali_Loss_Vec': hist.history['val_loss']})
# ---------------------------------------------------------
# 4. Saving Validation Metric
# ---------------------------------------------------------
ValiSNRVec = trian_curve_root + '/' + 'G711_WaveformQSRCNN_ValiMetrice_bs' + \
str(self.batch_size) + '_lr' + str(self.learning_rate) + '.mat'
ValiSNRVec = os.path.normcase(ValiSNRVec)
sio.savemat(ValiSNRVec, {'Vali_SNR_Vec': hist.history['val_snr']}) # val_snr
print("> Saving Completed, Time : ", time.time() - start)
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
# -------------------------------------------------------------------------------
# 4. Evaluate the Trained Model
# -------------------------------------------------------------------------------
def evaluation_model(self, x_test_noisy, weights_path=""):
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
print("> Evaluation of the Trained Model ...")
# ---------------------------------------------------------
# 1. Load Model Weights
# ---------------------------------------------------------
print('> 1. Loading the Weights of the Model ...')
self.load_weights(weights_path)
# ---------------------------------------------------------
# 2. Evaluate the Model
# ---------------------------------------------------------
start = time.time()
print('> 2. Evaluating the Model, Please wait for a Moment ...')
predicted = self.model.predict(x_test_noisy)
print('> 2. Evaluating Completed, Time : ' + str(time.time() - start))
# ---------------------------------------------------------
# 3. Saving the Evaluation Result
# ---------------------------------------------------------
print('> 3. Saving the Evaluation Result ...')
start = time.time()
pre_file_root = "./Test_Outputs"
if not (os.path.exists(pre_file_root)):
os.makedirs(pre_file_root)
preOutput = pre_file_root + "/" + "G711_CNN_testplan_vec.mat"
preOutput = os.path.normcase(preOutput)
sio.savemat(preOutput, {'predictions': predicted})
print('> 3. Evaluation Result Saving Completed, Time : ' + str(time.time() - start))
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
|
{"hexsha": "bdf6dbd1774eb6ae48319521169e3b1a485ff2cb", "size": 13097, "ext": "py", "lang": "Python", "max_stars_repo_path": "CepstralCNN/WaveformQSRCNN.py", "max_stars_repo_name": "ansleliu/ConvolutionaNeuralNetworksToEnhanceCodedSpeech", "max_stars_repo_head_hexsha": "2f0852ef2d97338a8cf42fe7e20231f38c0613de", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2018-04-25T22:41:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-13T14:49:54.000Z", "max_issues_repo_path": "CepstralCNN/WaveformQSRCNN.py", "max_issues_repo_name": "ansleliu/QuantizedSpeechReconstructionUsingCNN", "max_issues_repo_head_hexsha": "2f0852ef2d97338a8cf42fe7e20231f38c0613de", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CepstralCNN/WaveformQSRCNN.py", "max_forks_repo_name": "ansleliu/QuantizedSpeechReconstructionUsingCNN", "max_forks_repo_head_hexsha": "2f0852ef2d97338a8cf42fe7e20231f38c0613de", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-07-19T11:12:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-24T04:27:58.000Z", "avg_line_length": 44.3966101695, "max_line_length": 141, "alphanum_fraction": 0.47468886, "include": true, "reason": "import scipy", "num_tokens": 2950}
|
# import numpy as np
from keras import backend as K
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true))
def root_mean_square_error(y_true, y_pred):
return mean_squared_error(y_true, y_pred) ** 0.5
def rmse(y_true, y_pred):
return mean_squared_error(y_true, y_pred) ** 0.5
# aliases
mse = MSE = mean_squared_error
# rmse = RMSE = root_mean_square_error
|
{"hexsha": "82438b7d262769522ea2122a7ecc377871c0b076", "size": 397, "ext": "py", "lang": "Python", "max_stars_repo_path": "star/metrics.py", "max_stars_repo_name": "hongnianwang/STAR", "max_stars_repo_head_hexsha": "11cad8a01c6661bbaec7da511a1c9cb960896b00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-09-18T11:13:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:24:09.000Z", "max_issues_repo_path": "star/metrics.py", "max_issues_repo_name": "hongnianwang/STAR", "max_issues_repo_head_hexsha": "11cad8a01c6661bbaec7da511a1c9cb960896b00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-07-19T07:00:36.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-29T10:01:31.000Z", "max_forks_repo_path": "star/metrics.py", "max_forks_repo_name": "hongnianwang/STAR", "max_forks_repo_head_hexsha": "11cad8a01c6661bbaec7da511a1c9cb960896b00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-02T08:46:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-17T14:51:53.000Z", "avg_line_length": 24.8125, "max_line_length": 52, "alphanum_fraction": 0.7455919395, "include": true, "reason": "import numpy", "num_tokens": 112}
|
import numpy as np
NG_EXACT = 'exact'
NG_BD = 'block_diagonal'
NG_BTD = 'block_tri_diagonal'
NG_KFAC = 'kfac'
__all__ = [
'batch_analytic_kernel',
'analytic_kernel'
]
def batch_analytic_kernel(x1, x2, w_var, ng_type, batch_size):
N1 = x1.shape[0]
N2 = x2.shape[0]
assert N1 % batch_size == 0 and N2 % batch_size == 0
n_batch1 = int(N1 / batch_size)
n_batch2 = int(N2 / batch_size)
x1s = np.split(x1, n_batch1)
x2s = np.split(x2, n_batch2)
kernel = []
for i in range(n_batch1):
kernel_i = []
for j in range(n_batch2):
rst = analytic_kernel(x1s[i], x2s[j], w_var, ng_type)
kernel_i.append(rst)
if isinstance(kernel_i[0], list):
kernel_i = [np.hstack([k[idx] for k in kernel_i]) for idx in range(len(kernel_i[0]))]
else:
kernel_i = np.hstack(kernel_i)
kernel.append(kernel_i)
if isinstance(kernel[0], list):
return [np.vstack([k[idx] for k in kernel]) for idx in range(len(kernel[0]))]
else:
return np.vstack(kernel)
def analytic_kernel(x1, x2, w_var, ng_type):
def _sqrt(x):
return np.sqrt(np.maximum(x, 0))
b_var = 0 # assume no bias
if x2 is None:
x2 = x1
N1 = x1.shape[0]
N2 = x2.shape[0]
ones = np.ones((N1, N2))
pi = np.pi
M0 = x1.shape[-1]
# forward
q0 = 2 / M0
A0 = np.matmul(x1, x2.T) / M0
q1 = w_var / 2 * q0 + b_var
Q1 = w_var * A0 + b_var * ones
P1 = Q1 / q1
P1 = np.clip(P1, -1, 1)
arcsin_P1 = np.arcsin(P1)
A1 = (q1 / 2 / pi) * (_sqrt(ones - P1 ** 2) + (pi / 2) * P1 + P1 * arcsin_P1)
q2 = w_var / 2 * q1 + b_var
Q2 = w_var * A1 + b_var * ones
P2 = Q2 / q2
P2 = np.clip(P2, -1, 1)
arcsin_P2 = np.arcsin(P2)
A2 = (q2 / 2 / pi) * (_sqrt(ones - P2 ** 2) + (pi / 2) * P2 + P2 * arcsin_P2)
Q3 = w_var * A2 + b_var * ones
# backward
B3 = ones
K2 = (1 / 2 / pi) * (arcsin_P2 + (pi / 2) * ones)
B2 = w_var * B3 * K2
K1 = (1 / 2 / pi) * (arcsin_P1 + (pi / 2) * ones)
B1 = w_var * B2 * K1
if ng_type in [NG_BD, NG_BTD]:
Th1 = w_var * B1 * A0 + b_var * B1
Th2 = w_var * B2 * A1 + b_var * B2
Th3 = w_var * B3 * A2 + b_var * B3
return [Th1, Th2, Th3]
if ng_type == NG_KFAC:
return [Q1, B1, Q2, B2, Q3, B3]
Th = w_var * B1 * A0 + b_var * B1
Th += w_var * B2 * A1 + b_var * B2
Th += w_var * B3 * A2 + b_var * B3
return Th
|
{"hexsha": "393197a6c411caf67487c794065becd056182f7c", "size": 2321, "ext": "py", "lang": "Python", "max_stars_repo_path": "numpy-based/analytical.py", "max_stars_repo_name": "kazukiosawa/ngd_in_wide_nn", "max_stars_repo_head_hexsha": "1da1a708f8486c16010856d4b43c640b86a14893", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-02-06T18:28:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T04:07:29.000Z", "max_issues_repo_path": "numpy-based/analytical.py", "max_issues_repo_name": "kazukiosawa/ngd_in_wide_nn", "max_issues_repo_head_hexsha": "1da1a708f8486c16010856d4b43c640b86a14893", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-20T13:19:55.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-20T13:19:55.000Z", "max_forks_repo_path": "numpy-based/analytical.py", "max_forks_repo_name": "kazukiosawa/ngd_in_wide_nn", "max_forks_repo_head_hexsha": "1da1a708f8486c16010856d4b43c640b86a14893", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-09T23:14:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T23:14:39.000Z", "avg_line_length": 22.9801980198, "max_line_length": 91, "alphanum_fraction": 0.5730288669, "include": true, "reason": "import numpy", "num_tokens": 970}
|
# -*- coding:utf-8 -*-
__author__ = 'boredbird'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from datetime import datetime
from sklearn.svm import l1_min_c
from woe.eval import compute_ks
import pickle
import time
"""
Search for optimal hyper parametric C in LogisticRegression
"""
def grid_search_lr_c(X_train,y_train,cs,df_coef_path=False
,pic_coefpath_title='Logistic Regression Path',pic_coefpath=False
,pic_performance_title='Logistic Regression Performance',pic_performance=False):
"""
grid search optimal hyper parameters c with the best ks performance
:param X_train: features dataframe
:param y_train: target
:param cs: list of regularization parameter c
:param df_coef_path: the file path for logistic regression coefficient dataframe
:param pic_coefpath_title: the pic title for coefficient path picture
:param pic_coefpath: the file path for coefficient path picture
:param pic_performance_title: the pic title for ks performance picture
:param pic_performance: the file path for ks performance picture
:return: a tuple of c and ks value with the best ks performance
"""
# init a LogisticRegression model
clf_l1_LR = LogisticRegression(C=0.1, penalty='l1', tol=0.01,class_weight='balanced')
# cs = l1_min_c(X_train, y_train, loss='log') * np.logspace(0, 9,200)
print("Computing regularization path ...")
start = datetime.now()
print(start)
coefs_ = []
ks = []
for c in cs:
clf_l1_LR.set_params(C=c)
clf_l1_LR.fit(X_train, y_train)
coefs_.append(clf_l1_LR.coef_.ravel().copy())
proba = clf_l1_LR.predict_proba(X_train)[:,1]
ks.append(compute_ks(proba,y_train))
end = datetime.now()
print(end)
print("This took ", end - start)
coef_cv_df = pd.DataFrame(coefs_,columns=X_train.columns)
coef_cv_df['ks'] = ks
coef_cv_df['c'] = cs
if df_coef_path:
file_name = df_coef_path if isinstance(df_coef_path, str) else None
coef_cv_df.to_csv(file_name)
coefs_ = np.array(coefs_)
fig1 = plt.figure('fig1')
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title(pic_coefpath_title)
plt.axis('tight')
if pic_coefpath:
file_name = pic_coefpath if isinstance(pic_coefpath, str) else None
plt.savefig(file_name)
else:
plt.show()
fig2 = plt.figure('fig2')
plt.plot(np.log10(cs), ks)
plt.xlabel('log(C)')
plt.ylabel('ks score')
plt.title(pic_performance_title)
plt.axis('tight')
if pic_performance:
file_name = pic_performance if isinstance(pic_performance, str) else None
plt.savefig(file_name)
else:
plt.show()
flag = coefs_<0
idx = np.array(ks)[flag.sum(axis=1) == 0].argmax()
return (cs[idx],ks[idx])
def grid_search_lr_c_validation(X_train,y_train,validation_dataset_list,cs=[0.01],df_coef_path=False
,pic_coefpath_title='Logistic Regression Path',pic_coefpath=False
,pic_performance_title='Logistic Regression Performance',pic_performance=False):
"""
grid search optimal hyper parameters c with the best ks performance
:param X_train: features dataframe
:param y_train: target
:param cs: list of c value
:param df_coef_path: the file path for logistic regression coefficient dataframe
:param pic_coefpath_title: the pic title for coefficient path picture
:param pic_coefpath: the file path for coefficient path picture
:param pic_performance_title: the pic title for ks performance picture
:param pic_performance: the file path for ks performance picture
:return: a tuple of c and ks value with the best ks performance
"""
# init a LogisticRegression model
clf_l1_LR = LogisticRegression(C=0.1, penalty='l1', tol=0.01,class_weight='balanced')
print("Computing regularization path ...")
start = datetime.now()
print(start)
coefs_ = []
ks = []
ks_validation1 = []
ks_validation2 = []
counter = 0
for c in cs:
print('time: ',time.asctime(time.localtime(time.time())),'counter: ',counter, ' c: ',c)
clf_l1_LR.set_params(C=c)
clf_l1_LR.fit(X_train, y_train)
coefs_.append(clf_l1_LR.coef_.ravel().copy())
proba = clf_l1_LR.predict_proba(X_train)[:,1]
validation_proba1 = clf_l1_LR.predict_proba(validation_dataset_list[0][X_train.columns])[:,1]
ks.append(compute_ks(proba,y_train))
ks_validation1.append(compute_ks(validation_proba1,validation_dataset_list[0]['target']))
print('ks:\t',ks[-1],'ks_validation1:\t',ks_validation1[-1])
counter += 1
end = datetime.now()
print(end)
print("This took ", end - start)
coef_cv_df = pd.DataFrame(coefs_,columns=X_train.columns)
coef_cv_df['ks'] = ks
coef_cv_df['ks_validation1'] = ks_validation1
coef_cv_df['c'] = cs
if df_coef_path:
file_name = df_coef_path if isinstance(df_coef_path, str) else None
coef_cv_df.to_csv(file_name)
coefs_ = np.array(coefs_)
fig1 = plt.figure('fig1')
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title(pic_coefpath_title)
plt.axis('tight')
if pic_coefpath:
file_name = pic_coefpath if isinstance(pic_coefpath, str) else None
plt.savefig(file_name)
plt.close()
else:
pass
# plt.show()
# plt.close()
fig2 = plt.figure('fig2')
plt.plot(np.log10(cs), ks)
plt.xlabel('log(C)')
plt.ylabel('ks score')
plt.title(pic_performance_title)
plt.axis('tight')
if pic_performance:
file_name = pic_performance if isinstance(pic_performance, str) else None
plt.savefig(file_name)
plt.close()
else:
pass
# plt.show()
# plt.close()
flag = coefs_<0
if np.array(ks)[flag.sum(axis=1) == 0].__len__()>0:
idx = np.array(ks)[flag.sum(axis=1) == 0].argmax()
else:
idx = np.array(ks).argmax()
return (cs[idx],ks[idx])
def grid_search_lr_c_main(params):
print('run into grid_search_lr_c_main:')
dataset_path = params['dataset_path']
validation_path = params['validation_path']
config_path = params['config_path']
df_coef_path = params['df_coef_path']
pic_coefpath = params['pic_coefpath']
pic_performance = params['pic_performance']
pic_coefpath_title = params['pic_coefpath_title']
pic_performance_title = params['pic_performance_title']
dataset_train = pd.read_csv(dataset_path)
cfg = pd.read_csv(config_path)
candidate_var_list = cfg[cfg['is_modelfeature'] == 1]['var_name']
b = [var for var in dataset_train.columns if sum(dataset_train[var].isnull()) == 0]
candidate_var_list = list(set(candidate_var_list).intersection(set(b)))
var_list_specfied = params['var_list_specfied']
if var_list_specfied.__len__()>0:
candidate_var_list = list(set(candidate_var_list).intersection(set(var_list_specfied)))
print('candidate_var_list length:\n',candidate_var_list.__len__())
print('candidate_var_list:\n',candidate_var_list)
print('change dtypes:float64 to float32')
for var in candidate_var_list:
dataset_train[var] = dataset_train[var].astype(np.float32)
X_train = dataset_train[dataset_train.target >=0][candidate_var_list]
y_train = dataset_train[dataset_train.target >=0]['target']
validation_cols_keep = [var for var in candidate_var_list]
validation_cols_keep.append('target')
validation_dataset_list = []
validation_dataset = pd.read_csv(validation_path)
# fillna
for var in candidate_var_list:
validation_dataset.loc[validation_dataset[var].isnull(), (var)] = 0
validation_dataset_list.append(validation_dataset[validation_cols_keep])
cs = params['cs']
print('cs',cs)
c,ks = grid_search_lr_c_validation(X_train,y_train,validation_dataset_list,cs,df_coef_path,pic_coefpath_title,pic_coefpath
,pic_performance_title,pic_performance)
print('pic_coefpath:\n',pic_coefpath)
print('pic_performance:\n',pic_performance)
print('ks performance on the c:')
print(c,ks)
return (c,ks)
def fit_single_lr(dataset_path,config_path,var_list_specfied,out_model_path,c=0.01):
dataset_train = pd.read_csv(dataset_path)
cfg = pd.read_csv(config_path)
candidate_var_list = cfg[cfg['is_modelfeature'] == 1]['var_name']
b = [var for var in dataset_train.columns if sum(dataset_train[var].isnull()) == 0]
candidate_var_list = list(set(candidate_var_list).intersection(set(b)))
if var_list_specfied.__len__()>0:
candidate_var_list = list(set(candidate_var_list).intersection(set(var_list_specfied)))
print('candidate_var_list length:\n',candidate_var_list.__len__())
print('candidate_var_list:\n',candidate_var_list)
print('change dtypes:float64 to float32')
for var in candidate_var_list:
dataset_train[var] = dataset_train[var].astype(np.float32)
X_train = dataset_train[dataset_train.target >=0][candidate_var_list]
y_train = dataset_train[dataset_train.target >=0]['target']
print('c:',c)
clf_lr_a = LogisticRegression(C=c, penalty='l1', tol=0.01,class_weight='balanced')
clf_lr_a.fit(X_train, y_train)
coefs = clf_lr_a.coef_.ravel().copy()
proba = clf_lr_a.predict_proba(X_train)[:,1]
ks = compute_ks(proba,y_train)
model = {}
model['clf'] = clf_lr_a
model['features_list'] = candidate_var_list
model['coefs'] = coefs
model['ks'] = ks
output = open(out_model_path, 'wb')
pickle.dump(model,output)
output.close()
return model
|
{"hexsha": "90bb7085359c8182866971e431350e5705f6665b", "size": 9955, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/woe/GridSearch.py", "max_stars_repo_name": "Jie-Yuan/woe", "max_stars_repo_head_hexsha": "335e9ec2a521d3bbccb0ad5d915128119e4d0ca6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 263, "max_stars_repo_stars_event_min_datetime": "2017-09-12T06:24:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T01:08:52.000Z", "max_issues_repo_path": "build/lib/woe/GridSearch.py", "max_issues_repo_name": "Jie-Yuan/woe", "max_issues_repo_head_hexsha": "335e9ec2a521d3bbccb0ad5d915128119e4d0ca6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-01-31T03:26:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-12T04:39:24.000Z", "max_forks_repo_path": "build/lib/woe/GridSearch.py", "max_forks_repo_name": "Jie-Yuan/woe", "max_forks_repo_head_hexsha": "335e9ec2a521d3bbccb0ad5d915128119e4d0ca6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 109, "max_forks_repo_forks_event_min_datetime": "2017-09-12T12:23:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T05:31:32.000Z", "avg_line_length": 35.1766784452, "max_line_length": 126, "alphanum_fraction": 0.6857860372, "include": true, "reason": "import numpy", "num_tokens": 2490}
|
"""
POI_grid
--------
POI: probability of instantiation
"""
import numpy as np
import os
from .constants import ROW, COL, create_deepcopy
import copy
from multigrids import TemporalMultiGrid
class POIGrid (TemporalMultiGrid):
""" Class doc """
def __init__ (self, *args, **kwargs):
"""This class represents each cohorts POI for the model grid at
each time step
.. note:: Note on grid coordinates
Origin (Y,X) is top left. rows = Y, cols = X
Object will store dimensional(resolution, dimensions)
metadata as a tuple (Y val, X val).
Parameters
----------
Config: Dict
should have keys 'start year', 'cohort list', and 'shape'
Attributes
----------
shape : tuple of ints
Shape of the grid (y,x) (rows,columns)
grid : array
This 3d array is the grid data at each time step.
The first dimension is the time step with 0 being the initial data.
The second dimension is the flat grid for given cohort, mapped using
key_to_index. The third dimension is the grid element. Each cohort
can be reshaped using shape to get the proper grid
init_grid: np.ndarray
starting POI grid
key_to_index : dict
Maps canon cohort names to the index for that cohort in the
data object
"""
config = args [0]
if type(config) is str:
super(POIGrid , self).__init__(*args, **kwargs)
else:
grid_names = config['_FAST_get_cohorts']##['cohorts']
args = [
config['grid_shape'][ROW], config['grid_shape'][COL],
len(grid_names), config['model length']
]
kwargs = create_deepcopy(config)
kwargs['data_type'] = 'float32'
kwargs['mode'] = 'r+'
kwargs['grid_names'] = grid_names
super(POIGrid , self).__init__(*args, **kwargs)
self.config['start_timestep'] = config['start_year']
# self.start_year = int(config['initialization year'])
# self.shape = config['shape']
|
{"hexsha": "38c77c920c890cca2c4c7892675a5fe84d8d6244", "size": 2306, "ext": "py", "lang": "Python", "max_stars_repo_path": "atm/grids/poi_grid.py", "max_stars_repo_name": "gina-alaska/arctic_thermokarst_model", "max_stars_repo_head_hexsha": "7a3dbedb72b133670bb6e476fc3f5788bbcdbca4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "atm/grids/poi_grid.py", "max_issues_repo_name": "gina-alaska/arctic_thermokarst_model", "max_issues_repo_head_hexsha": "7a3dbedb72b133670bb6e476fc3f5788bbcdbca4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "atm/grids/poi_grid.py", "max_forks_repo_name": "gina-alaska/arctic_thermokarst_model", "max_forks_repo_head_hexsha": "7a3dbedb72b133670bb6e476fc3f5788bbcdbca4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9480519481, "max_line_length": 82, "alphanum_fraction": 0.5490026019, "include": true, "reason": "import numpy", "num_tokens": 502}
|
[STATEMENT]
lemma term_variants_pred_param:
assumes "term_variants_pred P t s"
and fg: "f = g \<or> g \<in> set (P f)"
shows "term_variants_pred P (Fun f (S@t#T)) (Fun g (S@s#T))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. term_variants_pred P (Fun f (S @ t # T)) (Fun g (S @ s # T))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. term_variants_pred P (Fun f (S @ t # T)) (Fun g (S @ s # T))
[PROOF STEP]
have 1: "length (S@t#T) = length (S@s#T)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (S @ t # T) = length (S @ s # T)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length (S @ t # T) = length (S @ s # T)
goal (1 subgoal):
1. term_variants_pred P (Fun f (S @ t # T)) (Fun g (S @ s # T))
[PROOF STEP]
have "term_variants_pred P (T ! i) (T ! i)" "term_variants_pred P (S ! i) (S ! i)" for i
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. term_variants_pred P (T ! i) (T ! i) &&& term_variants_pred P (S ! i) (S ! i)
[PROOF STEP]
by (metis term_variants_pred_refl)+
[PROOF STATE]
proof (state)
this:
term_variants_pred P (T ! ?i) (T ! ?i)
term_variants_pred P (S ! ?i) (S ! ?i)
goal (1 subgoal):
1. term_variants_pred P (Fun f (S @ t # T)) (Fun g (S @ s # T))
[PROOF STEP]
hence 2: "term_variants_pred P ((S@t#T) ! i) ((S@s#T) ! i)" for i
[PROOF STATE]
proof (prove)
using this:
term_variants_pred P (T ! ?i) (T ! ?i)
term_variants_pred P (S ! ?i) (S ! ?i)
goal (1 subgoal):
1. term_variants_pred P ((S @ t # T) ! i) ((S @ s # T) ! i)
[PROOF STEP]
by (simp add: assms nth_Cons' nth_append)
[PROOF STATE]
proof (state)
this:
term_variants_pred P ((S @ t # T) ! ?i) ((S @ s # T) ! ?i)
goal (1 subgoal):
1. term_variants_pred P (Fun f (S @ t # T)) (Fun g (S @ s # T))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. term_variants_pred P (Fun f (S @ t # T)) (Fun g (S @ s # T))
[PROOF STEP]
by (metis term_variants_Fun[OF 1 2] term_variants_P[OF 1 2] fg)
[PROOF STATE]
proof (state)
this:
term_variants_pred P (Fun f (S @ t # T)) (Fun g (S @ s # T))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 994, "file": "Automated_Stateful_Protocol_Verification_Term_Variants", "length": 10}
|
"""
Module to train DBN on MNIST.
"""
from os import path
from pylearn2.config import yaml_parse
from pylearn2.utils import serial
from theano import config
#config.exception_verbosity='high'
#config.optimizer='fast_compile'
#config.mode='DEBUG_MODE'
def train_yaml(yaml_file):
train = yaml_parse.load(yaml_file)
train.main_loop()
def train(yaml_file, save_path, nvis, hidden, pretrain=None):
yaml = open(yaml_file, "r").read()
input_dim = 784 # MNIST input size
hyperparams = {"nvis": nvis,
"batch_size": 50,
"detector_layer_dim": hidden,
"monitoring_batches": 10,
"train_stop": 50000,
"max_epochs": 10,
"save_path": save_path,
"pretrain": pretrain
}
yaml = yaml % hyperparams
train_yaml(yaml)
def train_rbm():
hiddens = [200, 200, 200, 10]
names = ["rbm", "dbn1", "dbn2", "mlp"]
retrains = [False, False, True, True]
p = path.abspath(path.dirname(__file__))
for i, (hidden, name, retrain) in enumerate(zip(hiddens, names, retrains)):
yaml_file = path.join(p, name + ".yaml")
pkl_file = path.join(p, name + ".pkl")
if not path.isfile(pkl_file):
retrain = True
if not retrain:
continue
if i == 0 or name == "mlp":
nvis = 784
else:
nvis = hiddens[i-1]
if i > 0:
pretrain = path.join(p, names[i-1] + ".pkl")
else:
pretrain = None
save_path = p
train(yaml_file, save_path, nvis, hidden, pretrain)
if __name__ == "__main__":
train_rbm()
|
{"hexsha": "8781f41dac482d1fe15c07dd1168f16135f135a8", "size": 1702, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylearn2/scripts/tutorials/dbn_demo/train.py", "max_stars_repo_name": "rdevon/pylearn2", "max_stars_repo_head_hexsha": "f7b9a6ea0e2498176b47202f5bb83aec4976e1dd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-10-29T06:18:35.000Z", "max_stars_repo_stars_event_max_datetime": "2017-10-29T06:18:35.000Z", "max_issues_repo_path": "pylearn2/scripts/tutorials/dbn_demo/train.py", "max_issues_repo_name": "rdevon/pylearn2", "max_issues_repo_head_hexsha": "f7b9a6ea0e2498176b47202f5bb83aec4976e1dd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pylearn2/scripts/tutorials/dbn_demo/train.py", "max_forks_repo_name": "rdevon/pylearn2", "max_forks_repo_head_hexsha": "f7b9a6ea0e2498176b47202f5bb83aec4976e1dd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8474576271, "max_line_length": 79, "alphanum_fraction": 0.5616921269, "include": true, "reason": "from theano", "num_tokens": 458}
|
[STATEMENT]
lemma to_list_tree_single: "v \<in> verts to_list_tree \<Longrightarrow> \<exists>x. v = [x] \<and> x \<in> verts T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v \<in> verts to_list_tree \<Longrightarrow> \<exists>x. v = [x] \<and> x \<in> verts T
[PROOF STEP]
unfolding to_list_tree_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v \<in> verts \<lparr>verts = (\<lambda>x. [x]) ` verts T, arcs = arcs T, tail = \<lambda>x. [tail T x], head = \<lambda>x. [head T x]\<rparr> \<Longrightarrow> \<exists>x. v = [x] \<and> x \<in> verts T
[PROOF STEP]
by auto
|
{"llama_tokens": 248, "file": "Query_Optimization_Directed_Tree_Additions", "length": 2}
|
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from sklearn.metrics import mean_squared_error
from datetime import datetime, timedelta
import os
import time
from config import *
from dataset.wrapped_dataloader import *
from model.seq2seq.encoder import Encoder
from model.seq2seq.decoder import Decoder
from model.seq2seq.seq2seq import Seq2Seq
from loss.rmse_loss import RMSELoss
def train(model, dataloader, optimizer, criterion):
model.train()
total_loss = 0
for i, (x, y) in enumerate(dataloader):
x = x.to(torch.float32).to(device)
y = y.to(torch.float32).to(device)
optimizer.zero_grad()
y_pred = model(x, y).to(device)
loss = criterion(y_pred, y)
loss.backward()
total_loss += loss.item()
optimizer.step()
return total_loss / len(dataloader)
def evaluate(model, dataloader, criterion):
model.eval()
total_loss = 0
with torch.no_grad():
for i, (x, y) in enumerate(dataloader):
x = x.to(torch.float32).to(device)
y = y.to(torch.float32).to(device)
y_pred = model(x, y, teacher_forcing_ratio=0).to(device)
loss = criterion(y_pred, y)
total_loss += loss.item()
if i == len(dataloader) - 1:
real_y = preprocessor.inverse(y[:, 0, :].detach().cpu().numpy(), 'y')
real_y_pred = preprocessor.inverse(y_pred[:, 0, :].detach().cpu().numpy(), 'y')
return real_y, real_y_pred, total_loss / len(dataloader)
# You can write code above the if-main block.
if __name__ == '__main__':
# You should not modify this part, but additional arguments are allowed.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--training',
default='training_data.csv',
help='input training data file name')
parser.add_argument('--output',
default='submission.csv',
help='output file name')
args = parser.parse_args()
Constant.RESERVE_MARGIN = args.training
Constant.OUTPUT_FILE = args.output
# # The following part is an example.
# # You can modify it at will.
if os.path.exists(Constant.RESERVE_MARGIN_TEST):
os.remove(Constant.RESERVE_MARGIN_TEST)
if os.path.exists(Constant.OUTPUT_FILE):
os.remove(Constant.OUTPUT_FILE)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
device = torch.device(device)
encoder = Encoder()
decoder = Decoder()
model = Seq2Seq(encoder, decoder).to(device)
print(model)
optimizer = optim.Adam(model.parameters(), Hyperparameter.LEARNING_RATE, betas=(0.5, 0.999))
criterion = RMSELoss()
min_loss = 1
trigger_count = 0
for e in range(Hyperparameter.EPOCH):
train_loss = train(model, train_loader, optimizer, criterion)
y, y_pred, val_loss = evaluate(model, val_loader, criterion)
if val_loss > min_loss:
trigger_count += 1
else:
min_loss = val_loss
trigger_count = 0
print('\nEpoch:', e)
print('train loss:', train_loss)
print('eva loss:', val_loss)
print('actual loss:', val_loss * 5474)
print('trigger:', trigger_count)
# print(y)
# print(y_pred)
# if trigger_count >= Hyperparameter.PATIENCE:
# break
if val_loss * 5474 < 740:
break
torch.save(model, 'model.pt')
from selenium import webdriver
from selenium.webdriver.common.by import By
import pandas as pd
url = 'https://data.gov.tw/dataset/25850'
chrome_options = webdriver.ChromeOptions()
prefs = {'download.default_directory': os.getcwd() + '\\data'}
chrome_options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
ele = browser.find_element(By.CLASS_NAME, 'download-item')
ele.find_element(By.CLASS_NAME, 'el-button--primary').click()
browser.close()
time.sleep(5)
df1 = pd.read_csv(Constant.RESERVE_MARGIN)
df2 = pd.read_csv(Constant.RESERVE_MARGIN_TEST)
df = pd.concat([df1, df2], axis=0).drop_duplicates().reset_index(drop=True)
x = preprocessor.preprocessing(np.expand_dims(df['備轉容量(萬瓩)'].to_numpy(), axis=1) * 10, 'x')
test_index = df.index[df['日期'] == Constant.START_DATE].tolist()[0]
data = []
result = []
hp = Hyperparameter
for i in range(test_index, len(x)):
if i - test_index > 13:
break
data.append(x[i - hp.INPUT_SEQ_LEN + 1:i + 1])
data = torch.from_numpy(np.array(data))
output = pd.DataFrame({'date', 'operating_reserve(MW)'})
output = []
pred_date = datetime.strptime(Constant.START_DATE, '%Y/%m/%d') + timedelta(days=1)
for each in data:
each = each.unsqueeze(2).to(torch.float32).to(device)
r = model(each, torch.zeros((Hyperparameter.OUTPUT_SEQ_LEN, 1, 1)), teacher_forcing_ratio=-1)
for each_r in preprocessor.inverse(r[:, 0, :].cpu().detach().numpy(), 'y'):
output.append([
pred_date.strftime('%Y%m%d'),
int(each_r[0])
])
pred_date = pred_date + timedelta(days=1)
output = pd.DataFrame(output, columns=['date', 'operating_reserve(MW)'])
output.to_csv(Constant.OUTPUT_FILE, index=False)
|
{"hexsha": "1502725f78e357fbdf2c1b4dab0d2b459576f80b", "size": 5457, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "Aquarium1222/Electricity-Forecasting", "max_stars_repo_head_hexsha": "9f945d3fd8006e5d77da08ff7861577965109ec8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "Aquarium1222/Electricity-Forecasting", "max_issues_repo_head_hexsha": "9f945d3fd8006e5d77da08ff7861577965109ec8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "Aquarium1222/Electricity-Forecasting", "max_forks_repo_head_hexsha": "9f945d3fd8006e5d77da08ff7861577965109ec8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2743902439, "max_line_length": 101, "alphanum_fraction": 0.6303829943, "include": true, "reason": "import numpy", "num_tokens": 1309}
|
[STATEMENT]
lemma fixp_strong_induct_uc:
fixes F :: "'c \<Rightarrow> 'c"
and U :: "'c \<Rightarrow> 'b \<Rightarrow> 'a"
and C :: "('b \<Rightarrow> 'a) \<Rightarrow> 'c"
and P :: "('b \<Rightarrow> 'a) \<Rightarrow> bool"
assumes mono: "\<And>x. mono_body (\<lambda>f. U (F (C f)) x)"
and eq: "f \<equiv> C (fixp_fun (\<lambda>f. U (F (C f))))"
and inverse: "\<And>f. U (C f) = f"
and adm: "ccpo.admissible lub_fun le_fun P"
and bot: "P (\<lambda>_. lub {})"
and step: "\<And>f'. \<lbrakk> P (U f'); le_fun (U f') (U f) \<rbrakk> \<Longrightarrow> P (U (F f'))"
shows "P (U f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P (U f)
[PROOF STEP]
unfolding eq inverse
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P (fixp_fun (\<lambda>f. U (F (C f))))
[PROOF STEP]
apply (rule ccpo.fixp_strong_induct[OF ccpo adm])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. monotone le_fun le_fun (\<lambda>f. U (F (C f)))
2. P (lub_fun {})
3. \<And>x. \<lbrakk>le_fun x (fixp_fun (\<lambda>f. U (F (C f)))); P x\<rbrakk> \<Longrightarrow> P (U (F (C x)))
[PROOF STEP]
apply (insert mono, auto simp: monotone_def fun_ord_def bot fun_lub_def)[2]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>le_fun x (fixp_fun (\<lambda>f. U (F (C f)))); P x\<rbrakk> \<Longrightarrow> P (U (F (C x)))
[PROOF STEP]
apply (rule_tac f'5="C x" in step)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. \<lbrakk>le_fun x (fixp_fun (\<lambda>f. U (F (C f)))); P x\<rbrakk> \<Longrightarrow> P (U (C x))
2. \<And>x. \<lbrakk>le_fun x (fixp_fun (\<lambda>f. U (F (C f)))); P x\<rbrakk> \<Longrightarrow> le_fun (U (C x)) (U f)
[PROOF STEP]
apply (simp_all add: inverse eq)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 796, "file": null, "length": 6}
|
#!/usr/bin/python
from __future__ import division, print_function
import h5py
from struct import pack
import sys
import os
import numpy as np
from glob import glob
from argparse import ArgumentParser
def convert_ptype(inbase, outbase, ptype='PartType1', overwrite=False, verbose=False):
files = glob(inbase+'*')
files.sort()
ptype_index = int(ptype[-1])
if len(files) == 0:
raise OSError("No files found starting with {}".format(inbase))
solo = len(files) == 1
for fname in files:
if solo:
outname = outbase
else:
outname = outbase + '/' + fname.split('/')[-1]
if outname.endswith('.hdf5'):
outname = outname[:-5]
if outname.endswith('.h5'):
outname = outname[:-3]
if os.path.isfile(outname) and not overwrite:
print("{0} already exists; skipping {1}".format(outname, fname))
continue
print("Dumping {} particles from {} to {}".format(ptype, fname, outname))
with h5py.File(fname, 'r') as f, open(outname, 'wb') as out:
fhead = f['Header'].attrs
nfile = fhead['NumPart_ThisFile'][:]
ntot = nfile[ptype_index]
masstable = fhead['MassTable'][:]
part = f[ptype]
# start with the header:
packedheader = b""
ar = np.zeros(6, dtype='I')
ar[ptype_index] = fhead['NumPart_ThisFile'][ptype_index]
packedheader = packedheader + ar.tostring()
mtable = np.zeros(6, dtype='d')
if mtable[ptype_index] == 0:
# check if we can just fix the mass table
if (part['Masses'][:] == part['Masses'][0]).all():
mtable[1] = part['Masses'][0]
nmass = 0
# otherwise write the masses
else:
nmass = part['Masses'].shape[0]
packedheader = packedheader + mtable.tostring()
ar = np.array(fhead['Time'], dtype='d')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['Redshift'], dtype='d')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['Flag_Sfr'], dtype='i')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['Flag_Feedback'], dtype='i')
packedheader = packedheader + ar.tostring()
ar = np.zeros(6, dtype='i')
ar[ptype_index] = fhead['NumPart_Total'][ptype_index]
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['Flag_Cooling'], dtype='i')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['NumFilesPerSnapshot'], dtype='i')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['BoxSize'], dtype='d')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['Omega0'], dtype='d')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['OmegaLambda'], dtype='d')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['HubbleParam'], dtype='d')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['Flag_StellarAge'], dtype='i')
packedheader = packedheader + ar.tostring()
ar = np.array(fhead['Flag_Metals'], dtype='i')
packedheader = packedheader + ar.tostring()
ar = np.zeros(6, dtype='i')
try:
ar[ptype_index] = fhead['NumPart_Total_HW'][ptype_index]
except KeyError:
ar[ptype_index] = fhead['NumPart_Total_HighWord'][ptype_index]
packedheader = packedheader + ar.tostring()
if 'Flag_Entropy_ICs' in list(fhead.keys()):
try: # This is an array in at least one file that I have, so attempt to do it that way, and if it fails, do it as a float
ar = np.array(fhead['Flag_Entropy_ICs'][:], dtype='i')
packedheader = packedheader + ar.tostring()
except TypeError:
ar = np.array(fhead['Flag_Entropy_ICs'], dtype='i')
packedheader = packedheader + ar.tostring()
else:
# print("Using Flag_IC_Info instead of Flag_Entropy_ICs.")
ar = np.array(fhead['Flag_IC_Info'], dtype='i')
packedheader = packedheader + ar.tostring()
header_bytes_left = 256 - len(packedheader)
for i in range(header_bytes_left):
packedheader = packedheader + pack('<x')
# now to write it out into a binary file
out.write(pack('<I', 256))
out.write(packedheader)
out.write(pack('<I', 256))
# Now to do coordinates, order of gas halo disk bulge star boundary
vec_size = np.array([12*ntot], dtype='I')
out.write(vec_size.tostring())
ar = np.array(part['Coordinates'][:], dtype='f')
out.write(ar.tostring())
out.write(vec_size.tostring())
if verbose:
print("Finished with coordinates")
# Now for velocities
out.write(vec_size.tostring())
ar = np.array(part['Velocities'][:], dtype='f')
out.write(ar.tostring())
out.write(vec_size.tostring())
if verbose:
print("Finished with velocities")
# Now for particle IDs:
float_size = np.array([4*ntot], dtype='I')
out.write(float_size.tostring())
ar = np.array(part['ParticleIDs'][:], dtype='I')
out.write(ar.tostring())
out.write(float_size.tostring())
if verbose:
print("Finished with particle IDs")
# Now I have to check if there are variable particle masses
if nmass > 0:
nmass_size = np.array([4*nmass], dtype='I')
out.write(nmass_size.tostring())
ar = np.array(part['Masses'][:], dtype='f')
out.write(ar.tostring())
out.write(nmass_size.tostring())
if verbose:
print(
"Done writing masses for {0} particles".format(nmass))
print("Finished with "+fname)
return
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
'inbase', help="Path (when appended with *) to the files to convert. e.g. output/snapdir_600/snapshot")
parser.add_argument('outbase', help="""Output file path. Will be the exact name if only
one file; otherwise, should be a directory and file name will be made programatically. e.g. gadget-output/snapdir_600/""")
parser.add_argument(
'--ptype', help="Particle Type to convert.", default='PartType1')
parser.add_argument('--overwrite', help="Overwrite existing files.",
default=False, action='store_true')
parser.add_argument('--verbose', help="print when we finish each particle.",
default=False, action='store_true')
args = parser.parse_args()
convert_ptype(**args.__dict__)
|
{"hexsha": "0a3881472cbc16385c6b7e6b0853d0f31af12128", "size": 7358, "ext": "py", "lang": "Python", "max_stars_repo_path": "sgklibs/convert_ptype_to_gbin.py", "max_stars_repo_name": "sheagk/sgklibs", "max_stars_repo_head_hexsha": "47e99f7f2e24dfabff039f005847e702a12138b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sgklibs/convert_ptype_to_gbin.py", "max_issues_repo_name": "sheagk/sgklibs", "max_issues_repo_head_hexsha": "47e99f7f2e24dfabff039f005847e702a12138b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sgklibs/convert_ptype_to_gbin.py", "max_forks_repo_name": "sheagk/sgklibs", "max_forks_repo_head_hexsha": "47e99f7f2e24dfabff039f005847e702a12138b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5408163265, "max_line_length": 138, "alphanum_fraction": 0.5519162816, "include": true, "reason": "import numpy", "num_tokens": 1676}
|
import numpy as np
import pickle
class Expert_data(object):
def __init__(self, fpath):
self.load_data(fpath)
self.pointer = 0
def load_data(self, fpath):
with open(fpath, 'rb') as f:
data = pickle.load(f)
self.obs = data['observations']
self.acts = data['actions']
self.acts = self.acts.reshape(len(self.acts), -1)
self.rewards = data['ep_ret']
self.size = len(self.obs)
print(f'total transitions: {self.size}, rewards: {np.mean(self.rewards)}+-{np.std(self.rewards)}')
indexes = [i for i in range(self.size)]
np.random.shuffle(indexes)
self.obs = self.obs[indexes]
self.acts = self.acts[indexes]
self.eval_size = int(0.3 * self.size)
self.obs_eval = self.obs[: self.eval_size]
self.acts_eval = self.acts[: self.eval_size]
self.bc_obs = self.obs[self.eval_size:]
self.bc_acts = self.acts[self.eval_size:]
self.bc_size = self.size - self.eval_size
def bc_sample_batch(self, batch_size=32):
if self.pointer + batch_size > self.bc_size:
self.pointer = 0
batch_obs = self.bc_obs[self.pointer: self.pointer + batch_size]
batch_acts = self.bc_acts[self.pointer: self.pointer + batch_size]
self.pointer += batch_size
return batch_obs, batch_acts
if __name__ == '__main__':
expert_data = Expert_data('expert_data/Hopper-v2.pkl')
obs, acts = expert_data.bc_sample_batch()
print(obs.shape, acts.shape)
|
{"hexsha": "c1abfb89bcc71c64910f49600767109b349007e2", "size": 1418, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw1/data_set.py", "max_stars_repo_name": "AnxietyYoungPoet/CS294-122", "max_stars_repo_head_hexsha": "8e813c62fc1ef0bac18f97f3045d60bec4d02b72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw1/data_set.py", "max_issues_repo_name": "AnxietyYoungPoet/CS294-122", "max_issues_repo_head_hexsha": "8e813c62fc1ef0bac18f97f3045d60bec4d02b72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw1/data_set.py", "max_forks_repo_name": "AnxietyYoungPoet/CS294-122", "max_forks_repo_head_hexsha": "8e813c62fc1ef0bac18f97f3045d60bec4d02b72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.976744186, "max_line_length": 102, "alphanum_fraction": 0.6805359661, "include": true, "reason": "import numpy", "num_tokens": 384}
|
"""
~~~ REDIMENSIONALIZE THE SIMULATION AND DATA FILES ~~~
Re dimensionalizing and plotting the CQ Data (sims AND experimental)
will be done in a few steps
1. Read in the 3 experimental data files
2. Read in the 3*4 = 12 simulation data files
3. Dimensionalize P, M, wp, wm, and sb for both the data and the simulations
4. Use the old plotting program to plot everything together in 2 figures.
5. Save the results to data files.
"""
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
### STEP 1: READ IN THE EXPERIMENTAL DATA FILES
# Define the dictionaries
P = {}
M = {}
PM = {}
sb = {}
wp = {}
MasterDict = [M,P,PM,sb,wp]
# Start reading in the data
whichset = 'JulyData/'
dir = whichset+'Data CQs/NonDim CQ Values'
files = listdirNH(dir)
key1 = 'Data'
l = 0
for k in files:
Dict = MasterDict[l]
data = np.transpose(np.loadtxt(k).view(float))
Dict[key1]=data
l=l+1
### STEP 2: READ IN THE SIMULATION DATA
dir = whichset+'Simulations/Conserved Quantities'
key2 = ['dNLS CQ', 'Dysthe CQ', 'NLS CQ', 'vDysthe CQ']
dk = 0
for subdir in key2:
files = listdirNH(dir+'/'+subdir)
l = 0
for k in files:
Dict = MasterDict[l]
data = np.transpose(np.loadtxt(k).view(float))
Dict[key2[dk]]=data
l=l+1
dk = dk+1
### STEP 3: DIMENSIONALIZE THE DATA
# Define dimensionalization constants
g = 9.81
epsilon = 7.84869668208853e-05
w0 = 0.05864306286700947
k0 = w0**2/g
# Dim P
dim_P = {}
for key in P:
ent = P[key]
xi = ent[0]
p = ent[1]
x = xi/(epsilon**2*k0)
dim_p = epsilon**3*w0/k0**2*p
dim_P[key] = np.append([x],[dim_p],axis = 0)
# Dim M
dim_M = {}
for key in M:
ent = M[key]
xi = ent[0]
m = ent[1]
x = xi/(epsilon**2*k0)
dim_m = (epsilon/k0)**2*m
dim_M[key] = np.append([x],[dim_m],axis = 0)
# Dim PM
dim_PM = {}
for key in PM:
ent = PM[key]
Pent = dim_P[key]
Ment = dim_M[key]
m = Ment[1]
p = Pent[1]
xi = ent[0]
x = xi/(epsilon**2*k0)
dim_pm = (p/m)*1000 # Gives mHz
dim_PM[key] = np.append([x],[dim_pm],axis = 0)
# Dim sb
dim_sb = {}
for key in sb:
ent = sb[key]
xi = ent[0]
x = xi/(epsilon**2*k0)
sbv = np.zeros((len(ent)-1,len(xi)))
for j in range(1,len(ent)):
sideband = ent[j]
dim_sideband = (epsilon/k0)*sideband
sbv[j-1]=dim_sideband
dim_sb[key] = np.vstack([x,sbv])
# Dim wp
dim_wp = {}
for key in wp:
ent = wp[key]
xi = ent[0]
peak = ent[1]
x = xi/(epsilon**2*k0)
dim_peak = peak+w0*1000 # Gives mHz
dim_wp[key] = np.append([x],[dim_peak],axis = 0)
### STEP 4: PLOT THE RESULTS
# Initialize for plotting
plotter1 = [dim_M,dim_P,dim_PM,dim_wp]
key2[:0] = [key1]
titles1 = ['CQ M', 'CQ P', r'$\omega_m$', r'$\omega_p$']
titles2 = np.loadtxt(os.getcwd()+'/'+whichset+'sidebandnums.txt').view(float)
y1 = ['M (m'+r'$^2$'+')','P (m'+r'$^2$'+'/s)',r'$\omega_m$'+' (mHz)',r'$\omega_p$'+' (mHz)']
#y2 = [r'$|a_{-3}|$'+' (m)',r'$|a_{-2}|$'+' (m)',r'$|a_{-1}|$'+' (m)',r'$|a_0|$'+' (m)',r'$|a_1|$'+' (m)',r'$|a_2|$'+' (m)',r'$|a_3|$'+' (m)']
disp = ['.k',':m','-.g','--r','-c']
sizes = [13,1,1,1,1]
# Begin plotting
fig1, ax1 = plt.subplots(4,1,figsize = (11,6.5))
fig1.suptitle('Quantities of Interest',fontsize=16)
dispind = 0
for key in key2:
ax1 = ax1.flatten()
for i in range(len(plotter1)):
dict = plotter1[i]
VALUES = dict[key]
x = VALUES[0]
y = VALUES[1]
ax1[i].plot(x,y,disp[dispind],markersize = sizes[dispind])
ax1[i].set_title(titles1[i])
ax1[i].set_ylabel(y1[i])
ax1[i].set_xlabel('Location (m)')
ax1[i].ticklabel_format(style='sci',scilimits=(-1,1),axis='both')
dispind += 1
ax1[0].legend(key2,bbox_to_anchor=(1, 1))
fig1.tight_layout()
fig1.subplots_adjust(top=0.88)
plt.savefig(whichset+'Final Figures/CQResultFig.png',dpi=500)
fig2, ax2 = plt.subplots(len(titles2),sharex=True,figsize = (7,1.625*len(titles2)))
fig2.suptitle('Select Fourier Amplitudes',fontsize=16)
dispind = 0
for key in key2:
sbvals = dim_sb[key]
x = sbvals[0,:]
sideband7=np.delete(sbvals, 0, 0)
for po in range(len(titles2)):
ax2[po].plot(x,sideband7[po],disp[dispind],markersize = sizes[dispind])
ax2[po].set_ylabel('a'+ r'$_{'+str(int(titles2[po]))+'}$')
fig2.tight_layout()
fig2.subplots_adjust(top=0.97)
dispind += 1
plt.savefig(whichset+'Final Figures/FAResultFig.png',dpi=500)
### STEP 5: SAVE THE RESULTS
# Save P, M, wp, wm
md =[dim_P,dim_M,dim_PM,dim_wp]
val = ['dimP','dimM','dimPM','dimwp','dimsb']
#key2 = ['dNLS CQ', 'Dysthe CQ', 'NLS CQ', 'vDysthe CQ']
o = 0
for cqval in md:
# Save the Data Values
for ky in cqval:
if ky == 'Data':
np.savetxt(whichset+'Data CQs/Dim CQ Values/'+val[o]+'.txt',np.transpose(cqval[ky]).view(float))
else:
np.savetxt(whichset+'Simulations/Dimensional Results/'+str(ky)[:-3]+' dimCQ/'+val[o]+'.txt',np.transpose(cqval[ky]).view(float))
o=o+1
# Save sidebands
for ky in dim_sb:
if ky == 'Data':
np.savetxt(whichset+'Data CQs/Dim CQ Values/'+val[-1]+'.txt',np.transpose(dim_sb[ky]).view(float))
else:
np.savetxt(whichset+'Simulations/Dimensional Results/'+str(ky)[:-3]+' dimCQ/'+val[-1]+'.txt',np.transpose(dim_sb[ky]).view(float))
|
{"hexsha": "0fbe33565366a50b20fef92ae8a562438aec352e", "size": 5520, "ext": "py", "lang": "Python", "max_stars_repo_path": "Old Python Code/ReDim.py", "max_stars_repo_name": "CRZaug/NonlinearWaves", "max_stars_repo_head_hexsha": "2adfc2cc5e0c18576c6b73420a913ef1ce23000d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Old Python Code/ReDim.py", "max_issues_repo_name": "CRZaug/NonlinearWaves", "max_issues_repo_head_hexsha": "2adfc2cc5e0c18576c6b73420a913ef1ce23000d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Old Python Code/ReDim.py", "max_forks_repo_name": "CRZaug/NonlinearWaves", "max_forks_repo_head_hexsha": "2adfc2cc5e0c18576c6b73420a913ef1ce23000d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6666666667, "max_line_length": 142, "alphanum_fraction": 0.6030797101, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1928}
|
import cv2
from google.colab.patches import cv2_imshow
from fmd.mark_dataset.util import draw_marks
import numpy as np
from face_detector.detector import Detector
import tensorflow as tf
class predictor():
def __init__(self, detector_face_model_path, cnn_model_path, threshold=0.7):
self.detector_face_model = Detector(detector_face_model_path)
self.cnn_model = tf.keras.models.load_model(cnn_model_path)
self.threshold = threshold
def predict(self, image_path):
image = cv2.imread(image_path)
_image = self.detector_face_model.preprocess(image)
threshold = 0.7
boxes, scores, _ = self.detector_face_model.predict(_image, threshold)
# Transform the boxes into squares.
boxes = self.detector_face_model.transform_to_square(
boxes, scale=1.22, offset=(0, 0.13))
# Clip the boxes if they cross the image boundaries.
boxes, _ = self.detector_face_model.clip_boxes(
boxes, (0, 0, image.shape[0], image.shape[1]))
boxes = boxes.astype(np.int32)
if boxes.size > 0:
for facebox in boxes:
# Crop the face image
top, left, bottom, right = facebox
top, left, bottom, right = int(top), int(left), int(bottom), int(right)
face_image = image[top:bottom, left:right]
# Preprocess it.
face_image = cv2.resize(face_image, (128, 128))
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
cv2_imshow(cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB))
out = self.cnn_model.predict(np.expand_dims(face_image, axis=0))[0]
output_points = [[(out[i]),out[i+1]] for i in range(0,out.shape[0],2)]
draw_marks(face_image,output_points)
cv2_imshow(cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB))
|
{"hexsha": "d4a285bcfb939208141126913a0f7ed0572efee8", "size": 1771, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict.py", "max_stars_repo_name": "keshav47/cnn-facial-landmark", "max_stars_repo_head_hexsha": "2c4011db2917428ebebf70d7fc9ad59c89002bfc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict.py", "max_issues_repo_name": "keshav47/cnn-facial-landmark", "max_issues_repo_head_hexsha": "2c4011db2917428ebebf70d7fc9ad59c89002bfc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-07T10:19:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-07T10:19:43.000Z", "max_forks_repo_path": "predict.py", "max_forks_repo_name": "keshav47/cnn-facial-landmark", "max_forks_repo_head_hexsha": "2c4011db2917428ebebf70d7fc9ad59c89002bfc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8958333333, "max_line_length": 82, "alphanum_fraction": 0.6905702993, "include": true, "reason": "import numpy", "num_tokens": 450}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .losses import *
from .modules.layers import *
from .modules.context_module import *
from .modules.attention_module import *
from .modules.decoder_module import *
from .backbones.Res2Net_v1b import res2net50_v1b_26w_4s
class PraNet(nn.Module):
# res2net based encoder decoder
def __init__(self, opt):
super(PraNet, self).__init__()
self.resnet = res2net50_v1b_26w_4s(pretrained=opt.pretrained, output_stride=opt.output_stride)
self.context2 = RFB(512, opt.channel)
self.context3 = RFB(1024, opt.channel)
self.context4 = RFB(2048, opt.channel)
self.decoder = PPD(opt.channel)
self.attention2 = reverse_attention(512, 64, 2, 3)
self.attention3 = reverse_attention(1024, 64, 2, 3)
self.attention4 = reverse_attention(2048, 256, 3, 5)
self.loss_fn = bce_iou_loss
def forward(self, x, y=None):
base_size = x.shape[-2:]
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x1 = self.resnet.layer1(x)
x2 = self.resnet.layer2(x1)
x3 = self.resnet.layer3(x2)
x4 = self.resnet.layer4(x3)
x2_context = self.context2(x2)
x3_context = self.context3(x3)
x4_context = self.context4(x4)
_, a5 = self.decoder(x4_context, x3_context, x2_context)
out5 = F.interpolate(a5, size=base_size, mode='bilinear', align_corners=False)
_, a4 = self.attention4(x4, a5)
out4 = F.interpolate(a4, size=base_size, mode='bilinear', align_corners=False)
_, a3 = self.attention3(x3, a4)
out3 = F.interpolate(a3, size=base_size, mode='bilinear', align_corners=False)
_, a2 = self.attention2(x2, a3)
out2 = F.interpolate(a2, size=base_size, mode='bilinear', align_corners=False)
if y is not None:
loss5 = self.loss_fn(out5, y)
loss4 = self.loss_fn(out4, y)
loss3 = self.loss_fn(out3, y)
loss2 = self.loss_fn(out2, y)
loss = loss2 + loss3 + loss4 + loss5
else:
loss = 0
return {'pred': out2, 'loss': loss}
|
{"hexsha": "5d9181ef47d024e14ff501b336d0f6339187288f", "size": 2275, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/PraNet.py", "max_stars_repo_name": "Inovasyon-ve-Analiz/uacanet", "max_stars_repo_head_hexsha": "4f6567840360ece53868888acd72a16de8279dc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-01T14:14:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-01T14:14:53.000Z", "max_issues_repo_path": "lib/PraNet.py", "max_issues_repo_name": "Inovasyon-ve-Analiz/uacanet", "max_issues_repo_head_hexsha": "4f6567840360ece53868888acd72a16de8279dc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/PraNet.py", "max_forks_repo_name": "Inovasyon-ve-Analiz/uacanet", "max_forks_repo_head_hexsha": "4f6567840360ece53868888acd72a16de8279dc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5, "max_line_length": 102, "alphanum_fraction": 0.6272527473, "include": true, "reason": "import numpy", "num_tokens": 656}
|
#!/usr/bin/env python3
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import os
import glob
import numpy as np
import torch
import ros_utils.rosbag_pcl_extractor
class RosbagDatasetPreprocessor():
def __init__(self, config, dataset_name, topic, preprocessing_fct):
self.config = config
self.rosbag_dir = self.config[dataset_name]["data_path"]
self.identifier = self.config[dataset_name]["data_identifier"]
# Look at file
self.rosbag_file = sorted(glob.glob(
os.path.join(self.rosbag_dir, "" + format(self.identifier, '02d') + '*')))
if len(self.rosbag_file) > 1:
raise Exception(
"Identifier does not uniquely define a rosbag. There are multiple files containing "
+ format(self.identifier, '02d') + ".")
elif len(self.rosbag_file) == 0:
raise Exception(
"Rosbag corresponding to data identifier "
+ str(self.identifier) + " must include " + format(self.identifier, '02d') + ".")
self.rosbag_file = self.rosbag_file[0]
# Use rosbag tool
self.rosbag_extractor = ros_utils.rosbag_pcl_extractor.RosbagToPCLExtractor(
rosbag_file=self.rosbag_file, topic=topic, config=self.config, preprocessing_fct=preprocessing_fct)
def preprocess(self):
self.rosbag_extractor.preprocess_rosbag()
|
{"hexsha": "12c01a21d331579da99fcc7aef62de83cc7ef138", "size": 1574, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/rosbag_scans.py", "max_stars_repo_name": "leggedrobotics/DeLORA", "max_stars_repo_head_hexsha": "909948d63a9517e6dd54bedcf099f6b39ded2cb4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 154, "max_stars_repo_stars_event_min_datetime": "2020-11-12T19:54:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:48:47.000Z", "max_issues_repo_path": "src/data/rosbag_scans.py", "max_issues_repo_name": "leggedrobotics/DeLORA", "max_issues_repo_head_hexsha": "909948d63a9517e6dd54bedcf099f6b39ded2cb4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-01-13T11:23:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T19:21:31.000Z", "max_forks_repo_path": "src/data/rosbag_scans.py", "max_forks_repo_name": "leggedrobotics/DeLORA", "max_forks_repo_head_hexsha": "909948d63a9517e6dd54bedcf099f6b39ded2cb4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2020-11-21T07:11:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T15:07:18.000Z", "avg_line_length": 38.3902439024, "max_line_length": 111, "alphanum_fraction": 0.6702668361, "include": true, "reason": "import numpy", "num_tokens": 373}
|
import tensorflow as tf
import numpy as np
from models import TransWeight
from tests import TestBase
class TransWeightTest(TestBase):
"""
This class tests the functionality of the TransWeight model.
This test suite can be ran with:
python -m unittest -q tests.TransWeightTest
"""
def setUp(self):
super(TransWeightTest, self).setUp()
self._comp_model = TransWeight(embedding_size=2, nonlinearity=tf.identity, dropout_rate=0.0, transforms=2)
def test_transformation(self):
"""
Tests that the t transformations are correctly performed.
transformations_tensor contains t transformations matrices of size 2nxn, where
n is the size of the input vectors u and v
"""
u = np.array([[1, 1, 1]], dtype='float32')
v = np.array([[1, 0, 0]], dtype='float32')
transformations_tensor = np.full(shape=(2,6,3), fill_value=0.0, dtype='float32')
two_eyes = np.concatenate((np.eye(3), np.eye(3)), axis=0)
transformations_tensor[0] = np.copy(two_eyes)
transformations_tensor[0][2][2] = 0
transformations_tensor[1] = np.copy(two_eyes)
transformations_tensor[1][0][0] = 0
transformations_tensor[1][1][1] = 0
transformations_tensor[1][3][0] = 0
transformations_bias = np.full(shape=(2,3), fill_value=0.0, dtype='float32')
# the two transformation matrices are different, so the resulting
# transformations are also expected to be different,
# even if they start off with the same inputs u and v
expected_t = np.array([[[2, 1, 0], [0, 0, 1]]], dtype='float32')
with tf.Session() as sess:
t = sess.run(
self.comp_model.transform(
u=u,
v=v,
transformations_tensor=transformations_tensor,
transformations_bias=transformations_bias))
np.testing.assert_allclose(t, expected_t)
def test_weighting(self):
"""
Test that the weighting is correctly performed
If [[A B C] are two transformed representations and [[[g h i] are the transformations weighting matrices
[D E F]] [j k l]
[m n p]]
[[q r s]
[t u v]
[x y z]]]
than the elements of the composed representation [p_0 p_1 p_2] are obtained as:
p_0 = A*g + B*j + C*m + D*q + E*t + F*x
p_1 = A*h + B*k + C*n + D*r + E*u + F*y
p_2 = A*i + B*l + C*p + D*s + E*v + F*z
"""
t = np.array([[[2, 1, 0], [0, 0, 1]]], dtype='float32')
W = np.full(shape=(2,3,3), fill_value=0.0, dtype='float32')
W[0][0][0] = 1
W[0][0][1] = 1
W[0][0][2] = 2
W[0][1][0] = 2
W[0][1][1] = 2
W[0][1][2] = 0
W[0][2][0] = 0
W[0][2][1] = 2
W[0][2][2] = 2
W[1][0][0] = 2
W[1][0][1] = 0
W[1][0][2] = 0
W[1][1][0] = 0
W[1][1][1] = 1
W[1][1][2] = 0
W[1][2][0] = 1
W[1][2][1] = 2
W[1][2][2] = 2
b = np.full(shape=(3,), fill_value=0.0, dtype='float32')
expected_p = np.array([[5, 6, 6]])
with tf.Session() as sess:
p = sess.run(
self.comp_model.weight(
reg_uv=t,
W=W,
b=b))
np.testing.assert_allclose(p, expected_p)
def test_composition(self):
"""
Testing composition for TW is done by testing the two seps in test_transformations and
test_weighting.
"""
pass
|
{"hexsha": "d4cff420573d60bfd9539ab3e609e54dbf138f17", "size": 3974, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_transweight.py", "max_stars_repo_name": "larsvansoest/commix", "max_stars_repo_head_hexsha": "c5629a94d8657e67919fdbb42da99026e85023e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-08-02T12:40:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-15T08:58:52.000Z", "max_issues_repo_path": "tests/test_transweight.py", "max_issues_repo_name": "larsvansoest/commix", "max_issues_repo_head_hexsha": "c5629a94d8657e67919fdbb42da99026e85023e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_transweight.py", "max_forks_repo_name": "larsvansoest/commix", "max_forks_repo_head_hexsha": "c5629a94d8657e67919fdbb42da99026e85023e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-07-12T04:31:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-10T14:56:23.000Z", "avg_line_length": 33.6779661017, "max_line_length": 114, "alphanum_fraction": 0.4886763966, "include": true, "reason": "import numpy", "num_tokens": 1069}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 14:28:26 2019
@author: Emma
"""
import numpy as np
import os,sys
sys.path.append(os.pardir)
from dataset.mnist import load_mnist
import matplotlib.pylab as plt
def cross_entropy_error(y,t):
if y.ndim==1:
t=t.reshape(1,t.size())
y=y.reshape(1,y.size())
batch_size=y.shape[0]
delta=1e-7
return -np.sum(t*np.log(y+delta))/batch_size
def numerical_diff(f,x,i):
delta=1e-4
x2=x.copy()
x1=x.copy()
x2[i]=x2[i]+delta
x1[i]=x1[i]-delta
return (f(x2)-f(x1))/(2*delta)
def numerical_gradient(f,x):
grad=np.zeros_like(x)
for i in range(x.size):
grad[i]=numerical_diff(f,x,i)
return grad
def gradient_descent(f,init_x,lr=0.1,step_num=100):
x=init_x
for i in range(step_num):
grad=numerical_gradient(f,x)
x-=lr*grad
return x
def function_1(x):
return 0.01*x**2+0.1*x
def function_2(x):
return np.sum(x**2)
if __name__=="__main__":
# (x_train,t_train),(x_test,t_test)=load_mnist(normalize=True,one_hot_label=True)
# train_size=x_train.shape[0]
# batch_size=10
# mask_batch=np.random.choice(train_size,batch_size)
# x_batch=x_train[mask_batch]
# t_batch=t_train[mask_batch]
# x=np.arange(-20,10,0.1)
# y=function_1(x)
# plt.xlabel("x")
# plt.ylabel("f(x)")
# plt.plot(x,y)
# plt.show()
#
print(numerical_diff(function_2,np.array([4.5,3.6]),0))
print(numerical_diff(function_2,np.array([4.5,3.6]),1))
print(numerical_gradient(function_2,np.array([4.5,3.6])))
print(gradient_descent(function_2,np.array([-3.0,4.0])))
|
{"hexsha": "b3d81f42886e6f8d04dca76a6d13968994da7238", "size": 1676, "ext": "py", "lang": "Python", "max_stars_repo_path": "ch04/practice.py", "max_stars_repo_name": "py0922/deepLearn", "max_stars_repo_head_hexsha": "32d0020ac98c1c9e65acd943087cec6167808c14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ch04/practice.py", "max_issues_repo_name": "py0922/deepLearn", "max_issues_repo_head_hexsha": "32d0020ac98c1c9e65acd943087cec6167808c14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ch04/practice.py", "max_forks_repo_name": "py0922/deepLearn", "max_forks_repo_head_hexsha": "32d0020ac98c1c9e65acd943087cec6167808c14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9589041096, "max_line_length": 84, "alphanum_fraction": 0.6330548926, "include": true, "reason": "import numpy", "num_tokens": 542}
|
using LinearAlgebra, Zygote, Optim, IterativeSolvers, Arpack
using CSV, DataFrames
include("model.jl")
function data()
abalone = CSV.File(open("warpedgp/abalone.csv"))
abalone = DataFrame(abalone)
X = Matrix(Matrix(abalone[1:200,2:8])')
y = Vector{Float64}(abalone[1:200, 9])
f = Linear()
g = Identity()
k = Gaussian()
d = data(f, X, y)
return f, g, k, d
end
function testfun()
f, g, k, d = data()
unpack = x -> (x[1],)
fg! = make_fg(unpack) do ℓ
ks = kernelsystem(f, k, g, d, UniformScaling(ℓ), (1.0,), 1e-6, ())
-logprob(ks, (), 0)
end
return Optim.optimize(Optim.only_fg!(fg!), rand(1), ConjugateGradient())
end
function datavanilla()
abalone = CSV.File(open("warpedgp/abalone.csv"))
abalone = DataFrame(abalone)
X = Matrix(Matrix(abalone[1:200,2:6])')
y = Vector(abalone[1:200, 7])
f = Linear()
g = Identity()
k = Gaussian()
d = data(f, X, y)
return f, g, k, d
end
function testfunvanilla()
f, g, k, d = datavanilla()
unpack = x -> (x[1:5], x[6], x[7])
fg! = make_fg(unpack) do ℓ, ϵ, amp
ks = kernelsystem(f, k, g, d, Diagonal(ℓ), (amp,), ϵ, ())
-logprob(ks, (), 0)
end
return Optim.optimize(Optim.only_fg!(fg!), rand(7), ConjugateGradient())
end
function datavanillanoτ()
abalone = CSV.File(open("abalone.csv"))
abalone = DataFrame(abalone)
X = Matrix(Matrix(abalone[1:200,2:6])')
y = Vector(abalone[1:200, 7])
f = Linear()
g = Identity()
k = Gaussian()
d = data(f, X, y)
return f, g, k, d
end
function testfunvanillanoτ()
f, g, k, d = datavanillanoτ()
unpack = x -> (x[1:5], x[6])
fg! = make_fg(unpack) do ℓ, ϵ
ks = kernelsystem(f, k, g, d, Diagonal(ℓ), (1 / (1 + ϵ),), ϵ / (1 + ϵ), ())
-logprob(ks, (), 0)
end
return Optim.optimize(Optim.only_fg!(fg!), rand(6), ConjugateGradient())
end
|
{"hexsha": "6a7cfd23b8a7c04d1bacebcb47c30d6df3c3b342", "size": 1911, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "BayesTransformedGaussian/src/various_mle_things/warpedgp/run.jl", "max_stars_repo_name": "dbindel/btg", "max_stars_repo_head_hexsha": "83616be1d1c4d80f385fa95cef38753f39a3a4c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-09-18T04:52:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-03T18:58:55.000Z", "max_issues_repo_path": "BayesTransformedGaussian/src/various_mle_things/warpedgp/run.jl", "max_issues_repo_name": "dbindel/btg", "max_issues_repo_head_hexsha": "83616be1d1c4d80f385fa95cef38753f39a3a4c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BayesTransformedGaussian/src/various_mle_things/warpedgp/run.jl", "max_forks_repo_name": "dbindel/btg", "max_forks_repo_head_hexsha": "83616be1d1c4d80f385fa95cef38753f39a3a4c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-06T06:07:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-06T06:07:50.000Z", "avg_line_length": 26.9154929577, "max_line_length": 82, "alphanum_fraction": 0.5740450026, "num_tokens": 680}
|
[STATEMENT]
lemma word_shift_nonzero:
"\<lbrakk> (x::'a::len word) \<le> 2 ^ m; m + n < LENGTH('a::len); x \<noteq> 0\<rbrakk>
\<Longrightarrow> x << n \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<le> 2 ^ m; m + n < LENGTH('a); x \<noteq> 0\<rbrakk> \<Longrightarrow> x << n \<noteq> 0
[PROOF STEP]
apply (simp only: word_neq_0_conv word_less_nat_alt
shiftl_t2n mod_0 unat_word_ariths
unat_power_lower word_le_nat_alt)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x mod 2 ^ LENGTH('a)
[PROOF STEP]
apply (subst mod_less)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 2 ^ n * unat x < 2 ^ LENGTH('a)
2. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x
[PROOF STEP]
apply (rule order_le_less_trans)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 2 ^ n * unat x \<le> ?y5
2. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> ?y5 < 2 ^ LENGTH('a)
3. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x
[PROOF STEP]
apply (erule mult_le_mono2)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 2 ^ n * 2 ^ m < 2 ^ LENGTH('a)
2. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x
[PROOF STEP]
apply (subst power_add[symmetric])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 2 ^ (n + m) < 2 ^ LENGTH('a)
2. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x
[PROOF STEP]
apply (rule power_strict_increasing)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> n + m < LENGTH('a)
2. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 1 < 2
3. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 1 < 2
2. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>unat x \<le> 2 ^ m; m + n < LENGTH('a); 0 < unat x\<rbrakk> \<Longrightarrow> 0 < 2 ^ n * unat x
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1468, "file": "Word_Lib_Word_Lemmas", "length": 10}
|
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.atom import Atom
import cvxpy.interface as intf
import numpy as np
import scipy.sparse as sp
class sum_largest(Atom):
"""Sum of the largest k values in the matrix X.
"""
def __init__(self, x, k):
self.k = k
super(sum_largest, self).__init__(x)
def validate_arguments(self):
"""Verify that k is a positive integer.
"""
if int(self.k) != self.k or self.k <= 0:
raise ValueError("Second argument must be a positive integer.")
super(sum_largest, self).validate_arguments()
def numeric(self, values):
"""Returns the sum of the k largest entries of the matrix.
"""
value = values[0].flatten()
indices = np.argsort(-value)[:int(self.k)]
return value[indices].sum()
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
# Grad: 1 for each of k largest indices.
value = intf.from_2D_to_1D(values[0].flatten().T)
indices = np.argsort(-value)[:int(self.k)]
D = np.zeros((self.args[0].shape[0]*self.args[0].shape[1], 1))
D[indices] = 1
return [sp.csc_matrix(D)]
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return tuple()
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
# Same as argument.
return (self.args[0].is_nonneg(), self.args[0].is_nonpos())
def is_atom_convex(self):
"""Is the atom convex?
"""
return True
def is_atom_concave(self):
"""Is the atom concave?
"""
return False
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return True
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return False
def get_data(self):
"""Returns the parameter k.
"""
return [self.k]
|
{"hexsha": "e1c9998d7fb25319beef04ee215b062a89237814", "size": 2864, "ext": "py", "lang": "Python", "max_stars_repo_path": "cvxpy/atoms/sum_largest.py", "max_stars_repo_name": "jasondark/cvxpy", "max_stars_repo_head_hexsha": "56aaa01b0e9d98ae5a91a923708129a7b37a6f18", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-06-03T01:33:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T01:48:49.000Z", "max_issues_repo_path": "cvxpy/atoms/sum_largest.py", "max_issues_repo_name": "Toby-Gao/cvxpy", "max_issues_repo_head_hexsha": "bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-09T13:13:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T13:13:44.000Z", "max_forks_repo_path": "cvxpy/atoms/sum_largest.py", "max_forks_repo_name": "Toby-Gao/cvxpy", "max_forks_repo_head_hexsha": "bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-22T01:35:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T10:48:51.000Z", "avg_line_length": 29.2244897959, "max_line_length": 75, "alphanum_fraction": 0.6201117318, "include": true, "reason": "import numpy,import scipy,import cvxpy,from cvxpy", "num_tokens": 668}
|
function psnr=compute_psnr(im1,im2)
if size(im1, 3) == 3,
im1 = rgb2ycbcr(im1);
im1 = im1(:, :, 1);
end
if size(im2, 3) == 3,
im2 = rgb2ycbcr(im2);
im2 = im2(:, :, 1);
end
imdff = double(im1) - double(im2);
imdff = imdff(:);
rmse = sqrt(mean(imdff.^2));
psnr = 20*log10(255/rmse);
|
{"author": "tyshiwo", "repo": "DRRN_CVPR17", "sha": "cafe98bc73997c10947911de74279d63cb786b8a", "save_path": "github-repos/MATLAB/tyshiwo-DRRN_CVPR17", "path": "github-repos/MATLAB/tyshiwo-DRRN_CVPR17/DRRN_CVPR17-cafe98bc73997c10947911de74279d63cb786b8a/test/evaluation_func/compute_psnr.m"}
|
import calendar
from datetime import datetime as dt
from datetime import timedelta
import logging
import time
from time import sleep
import sys
import numpy as np
import requests
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')
LOG = logging.getLogger("main")
def utcnow():
return calendar.timegm(time.gmtime())
DEBUG = False # if True and "where" sent to the bot, replies with the real location
PARAMS = {'tolerance': 100., # Distance from the final goal
'attempts': 10., # How many guesses
'distance_error': 25., # Reported distance is within this much of the real one
}
def get_updates(token, offset=None):
result = requests.get('https://api.telegram.org/bot%s/getUpdates' % token, json=({'offset': offset} if offset else {}))
result.raise_for_status()
return result.json()
def send_message(message, token, chat):
result = requests.post('https://api.telegram.org/bot%s/sendMessage' % token, json={'chat_id': chat,
'text': message})
return result
def send_location(location, token, chat):
result = requests.post('https://api.telegram.org/bot%s/sendLocation' % token, json={'chat_id': chat,
'latitude': location[0],
'longitude': location[1]})
return result
def convert_dms(d, m, s, negate=False):
# Convert degree-minute-second coordinates into decimal
return (-1 if negate else 1) * (d + m/60. + s/3600.)
def get_distance(lat1, lon1, lat2, lon2):
# Haversine formula
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
return 6367000. * c
class State(object):
def __init__(self, token, chat):
self.entry_time = utcnow()
self.token = token
self.chat = chat
def process_message(self, message):
pass
def enter(self):
pass
def process_time(self):
pass
def send_message(self, message):
send_message(message, self.token, self.chat)
def send_location(self, location):
send_location(location, self.token, self.chat)
class TreasureHunt(State):
def __init__(self, token, chat, location, return_to):
State.__init__(self, token, chat)
self.location = location
self.return_to = return_to
self.attempts = PARAMS['attempts']
def process_message(self, message):
if 'text' in message and 'where' in message['text'] and DEBUG:
self.send_message('here')
self.send_location(self.location)
return
if 'location' in message:
location = (message['location']['latitude'], message['location']['longitude'])
distance = get_distance(location[0], location[1],
self.location[0], self.location[1])
if distance < PARAMS['tolerance']:
self.send_message("Congratulations!")
self.send_location(self.location)
self.send_message("The actual treasure was here, but close enough.")
return self.return_to
else:
reported_distance = np.random.uniform(low=distance - PARAMS['distance_error'],
high=distance + PARAMS['distance_error'])
self.send_message("You are %.0f metres away." % reported_distance)
self.attempts -= 1
if self.attempts > 0:
self.send_message("You have %d attempt%s remaining." % (self.attempts, ('' if self.attempts == 1 else 's')))
else:
self.send_message("You've lost!")
self.send_location(self.location)
self.send_message("The treasure was here the whole time!")
def enter(self):
self.send_message("The treasure is buried somewhere around...")
self.send_message("Send me your location and I'll tell you how far you are.")
self.send_message("You have %d tries." % self.attempts)
if PARAMS['distance_error'] > 0.:
self.send_message("Also, the distance I give you will be within %d metres of the real distance." % PARAMS['distance_error'])
def __repr__(self):
return "<TreasureHunt(location=%r, attempts=%r)>" % (self.location, self.attempts)
class TreasureHuntIntro(State):
def _get_random_coords(self):
return (np.random.uniform(self.bounds[0], self.bounds[1]),
np.random.uniform(self.bounds[2], self.bounds[3]))
def _in_bounds(self, coords):
return coords[0] >= self.bounds[0] and coords [0] <= self.bounds[1]\
and coords[1] >= self.bounds[2] and coords[1] <= self.bounds[3]
def _get_random_near_player(self, coords, sigma):
while True:
result = (coords[0] + np.random.randn() * sigma,
coords[1] + np.random.randn() * sigma)
if self._in_bounds(result):
return result
def __init__(self, token, chat, bounds):
State.__init__(self, token, chat)
self.bounds = bounds
def enter(self):
self.send_message("Welcome to the treasure hunt!")
self.send_message("Type 'hunt' to begin.")
def process_message(self, message):
if "text" in message and "hunt" in message['text']:
self.send_message("OK")
location = self._get_random_coords()
return TreasureHunt(token, chat, location, self)
def __repr__(self):
return "<TreasureHuntIntro(bounds=%r)>" % (self.bounds,)
# Rough coordinates of the corners of Hyde Park (west edge bumped to avoid Kensington Palace)
PARK_RECTANGLE = convert_dms(51, 30, 11), convert_dms(51, 30, 35),\
convert_dms(0, 11, 0, True), convert_dms(0, 9, 33, True)
if __name__ == '__main__':
print sys.argv
if len(sys.argv) != 3:
LOG.error("Usage: indiana.py <bot token> <chat ID>")
sys.exit(1)
token, chat = sys.argv[1], sys.argv[2]
start_state = TreasureHuntIntro(token, chat, PARK_RECTANGLE)
start_time = utcnow()
last_update_id = 0
start_state.enter()
current_state = start_state
while True:
sleep(5)
updates = get_updates(token, offset=last_update_id)
for update in updates['result']:
if update['update_id'] <= last_update_id:
continue
last_update_id = update['update_id']
if update['message']['date'] <= start_time:
continue
LOG.info("Received update %r" % update)
new_state = current_state.process_message(update['message'])
if new_state and new_state != current_state:
LOG.info("New state %r" % new_state)
current_state = new_state
new_state.enter()
new_state = current_state.process_time()
if new_state and new_state != current_state:
LOG.info("New state %r" % new_state)
current_state = new_state
new_state.enter()
|
{"hexsha": "6dba61caafbf98e65ae5933dda72eb00d3b3601a", "size": 7645, "ext": "py", "lang": "Python", "max_stars_repo_path": "indiana.py", "max_stars_repo_name": "mildbyte/indiana", "max_stars_repo_head_hexsha": "fa35d4f1a08f92b77b698105f92054086ee11883", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-21T21:41:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-21T21:41:02.000Z", "max_issues_repo_path": "indiana.py", "max_issues_repo_name": "mildbyte/indiana", "max_issues_repo_head_hexsha": "fa35d4f1a08f92b77b698105f92054086ee11883", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "indiana.py", "max_forks_repo_name": "mildbyte/indiana", "max_forks_repo_head_hexsha": "fa35d4f1a08f92b77b698105f92054086ee11883", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2926829268, "max_line_length": 136, "alphanum_fraction": 0.5748855461, "include": true, "reason": "import numpy", "num_tokens": 1708}
|
import os
import pickle
import uuid
import cv2
import keras
import matplotlib.pyplot as plt
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import (Activation, Conv2D, Dense, Dropout, Flatten,
MaxPooling2D)
from keras.models import Sequential, load_model
from keras.optimizers import SGD
from keras.preprocessing import image
from keras.preprocessing.image import (ImageDataGenerator, array_to_img,
img_to_array, load_img)
from PIL import Image
def converter(x):
#x has shape (batch, width, height, channels)
return (0.21 * x[:,:,:,:1]) + (0.72 * x[:,:,:,1:2]) + (0.07 * x[:,:,:,-1:])
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def get_classname(classes, index):
for class_name, lindex in classes.items(): # for name, age in list.items(): (for Python 3.x)
if lindex == index:
return class_name
def predict(resources_dir, img_width, img_height):
model = load_model(os.path.join(resources_dir,'model.please'))
model.load_weights(os.path.join(resources_dir,'weights.please'))
classes = load_obj(os.path.join(resources_dir,'classes'))
# 0 means the default video capture device in OS
video_capture = cv2.VideoCapture(0)
# infinite loop, break by key ESC
while True:
if not video_capture.isOpened():
sleep(5)
# Capture frame-by-frame
ret, img_orig = video_capture.read()
img_resize = cv2.resize(img_orig, (img_height,img_width))
img_grey = cv2.cvtColor(img_resize, cv2.COLOR_BGR2GRAY)
img_tensor = image.img_to_array(img_grey) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255.
predicted_class = model.predict_classes(img_tensor)
class_name = get_classname(classes, predicted_class)
try:
print (class_name)
img_grey = cv2.cvtColor(img_grey, cv2.COLOR_GRAY2RGB)
im_to_display = np.concatenate((img_resize, img_grey), axis=1)
im_to_display = cv2.resize(im_to_display, (1280, 480))
cv2.putText(im_to_display,class_name,(10,460),cv2.FONT_HERSHEY_SIMPLEX ,1,(0,255,255),2)
cv2.imshow("Guesstimator", im_to_display)
except Exception as e:
print (str(e))
if cv2.waitKey(1) == 27:
break # esc to quitCOLOR_BAYER_BGCOLOR_BAYER_BG2RGB
cv2.destroyAllWindows()
|
{"hexsha": "6b243c5a16c2dcafc6e4356193c3a26ca651b092", "size": 2740, "ext": "py", "lang": "Python", "max_stars_repo_path": "predictors/from_webcam.py", "max_stars_repo_name": "marmightygood/image_classifier", "max_stars_repo_head_hexsha": "8dfc94efdb15206a45d6956e115e5291c2485b15", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predictors/from_webcam.py", "max_issues_repo_name": "marmightygood/image_classifier", "max_issues_repo_head_hexsha": "8dfc94efdb15206a45d6956e115e5291c2485b15", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-11-10T20:22:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T14:55:57.000Z", "max_forks_repo_path": "predictors/from_webcam.py", "max_forks_repo_name": "marmightygood/image_classifier", "max_forks_repo_head_hexsha": "8dfc94efdb15206a45d6956e115e5291c2485b15", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0555555556, "max_line_length": 187, "alphanum_fraction": 0.6467153285, "include": true, "reason": "import numpy", "num_tokens": 658}
|
export Periodic , Bloch, DirichletNeumann , NeumannDirichlet , NeumannNeumann , DirichletDirichlet
abstract type BCType end
struct Periodic <: BCType end
struct Bloch <: BCType end
struct DirichletNeumann <: BCType end
struct NeumannDirichlet <: BCType end
struct NeumannNeumann <: BCType end
struct DirichletDirichlet <: BCType end
|
{"hexsha": "846d79b3b3212c47caa5e3951a55f2a10231e441", "size": 333, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Interface/BC.jl", "max_stars_repo_name": "MKAbdElrahman/Photon.jl", "max_stars_repo_head_hexsha": "f75c0b572233094c8a049d3bf572489b6d5d23d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-06-27T10:59:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-11T18:16:15.000Z", "max_issues_repo_path": "src/Interface/BC.jl", "max_issues_repo_name": "MKAbdElrahman/Photon.jl", "max_issues_repo_head_hexsha": "f75c0b572233094c8a049d3bf572489b6d5d23d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-06-12T00:26:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-10T00:28:45.000Z", "max_forks_repo_path": "src/Interface/BC.jl", "max_forks_repo_name": "MKAbdElrahman/Photon.jl", "max_forks_repo_head_hexsha": "f75c0b572233094c8a049d3bf572489b6d5d23d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0, "max_line_length": 98, "alphanum_fraction": 0.8138138138, "num_tokens": 96}
|
import math, cmath
import numpy as np
from openmdao.api import ImplicitComponent, Group
from openmdao.api import DirectSolver, BoundsEnforceLS, NewtonSolver
from pyLF.LF_elements.generator import ACgenerator, DCgenerator
from pyLF.LF_elements.load import ACload, DCload
class InverterCalcs(ImplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('mode', default='Phase', desc='Control Mode: Phase or PF')
def setup(self):
nn = self.options['num_nodes']
ar = np.arange(nn)
mode = self.options['mode']
if not (mode=="Phase" or mode=="PF"):
raise ValueError("mode must be 'Phase' or 'PF', but '{}' was given.".format(mode))
self.add_input('M', val=np.ones(nn), units=None, desc='Inverter modulation index (V_ac/V_dc)')
self.add_input('eff', val=np.ones(nn), units=None, desc='Inverter efficiency (P_ac/P_dc')
self.add_input('P_ac', val=np.ones(nn), units='W', desc='Real power leaving inverter')
self.add_input('V_dc', val=np.ones(nn), units='V', desc='Voltage entering inverter')
self.add_output('P_dc', val=np.ones(nn), units='W', desc='Power entering inverter')
self.add_output('Vm_ac', val=np.ones(nn), units='V', desc='Voltage magnitude leaving inverter')
self.add_output('thetaV_bus', val=np.zeros(nn), units='deg', desc='Voltage phase angle')
self.add_output('P_loss', val=np.ones(nn), units='W', desc='Power lost through inverter')
self.declare_partials('P_dc', 'eff', rows=ar, cols=ar)
self.declare_partials('P_dc', 'P_ac', rows=ar, cols=ar)
self.declare_partials('P_dc', 'P_dc', rows=ar, cols=ar,val=1.0)
self.declare_partials('Vm_ac', 'V_dc', rows=ar, cols=ar)
self.declare_partials('Vm_ac', 'M', rows=ar, cols=ar)
self.declare_partials('Vm_ac', 'Vm_ac', rows=ar, cols=ar,val=-1.0)
self.declare_partials('P_loss','P_ac', rows=ar, cols=ar, val=1.0)
self.declare_partials('P_loss','P_dc', rows=ar, cols=ar, val=1.0)
self.declare_partials('P_loss','P_loss', rows=ar, cols=ar, val=-1.0)
if mode == 'Phase':
self.add_input('thetaV_target', val=np.zeros(nn), units='deg', desc='Target voltage phase angle output')
self.declare_partials('thetaV_bus', 'thetaV_target', rows=ar, cols=ar, val=1.0)
self.declare_partials('thetaV_bus', 'thetaV_bus', rows=ar, cols=ar, val=-1.0)
else:
self.add_input('PF', val=np.ones(nn), units=None, desc='Inverter power factor')
self.add_input('Q_ac', val=np.ones(nn), units='V*A', desc='Reactive power leaving inverter')
self.declare_partials('thetaV_bus', 'P_ac', rows=ar, cols=ar)
self.declare_partials('thetaV_bus', 'Q_ac', rows=ar, cols=ar)
self.declare_partials('thetaV_bus', 'PF', rows=ar, cols=ar)
self.declare_partials('thetaV_bus', 'PF', rows=ar, cols=ar, val=-1.0)
def apply_nonlinear(self, inputs, outputs, resids):
mode = self.options['mode']
resids['P_dc'] = inputs['P_ac'] / inputs['eff'] + outputs['P_dc']
resids['Vm_ac'] = inputs['V_dc'] * inputs['M'] - outputs['Vm_ac']
resids['P_loss'] = inputs['P_ac'] + outputs['P_dc'] - outputs['P_loss']
if mode == 'Phase':
resids['thetaV_bus'] = inputs['thetaV_target'] - outputs['thetaV_bus']
else:
resids['thetaV_bus'] = inputs['P_ac'] / (inputs['P_ac']**2 + inputs['Q_ac']**2)**0.5 - inputs['PF']
def solve_nonlinear(self, inputs, outputs):
mode = self.options['mode']
outputs['P_dc'] = -inputs['P_ac'] / inputs['eff']
outputs['Vm_ac'] = inputs['V_dc'] * inputs['M']
outputs['P_loss'] = inputs['P_ac'] + outputs['P_dc']
if mode == 'Phase':
outputs['thetaV_bus'] = inputs['thetaV_target']
def linearize(self, inputs, outputs, J):
mode = self.options['mode']
J['P_dc', 'eff'] = -inputs['P_ac'] / inputs['eff']**2
J['P_dc', 'P_ac'] = 1 / inputs['eff']
J['Vm_ac', 'V_dc'] = inputs['M']
J['Vm_ac', 'M'] = inputs['V_dc']
if mode == 'PF':
J['thetaV_bus', 'P_ac'] = inputs['Q_ac']**2 / (inputs['P_ac']**2 + inputs['Q_ac']**2)**1.5
J['thetaV_bus', 'Q_ac'] = -inputs['P_ac'] * inputs['Q_ac'] / (inputs['P_ac']**2 + inputs['Q_ac']**2)**1.5
class Inverter(Group):
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('mode', default='Phase', desc='Control Mode: Phase or PF')
self.options.declare('Q_min', allow_none=True, default=None, desc='Lower bound for reactive power (Q)')
self.options.declare('Q_max', allow_none=True, default=None, desc='Upper bound for reactive power (Q)')
self.options.declare('Vbase', default=5000.0, desc='Base voltage in units of volts')
self.options.declare('Sbase', default=10.0E6, desc='Base power in units of watts')
def setup(self):
nn = self.options['num_nodes']
mode = self.options['mode']
Q_min = self.options['Q_min']
Q_max = self.options['Q_max']
Vbase = self.options['Vbase']
Sbase = self.options['Sbase']
self.add_subsystem('load', DCload(num_nodes=nn), promotes=[('P','P_dc'),('V_in','V_dc'),('I_in','I_dc')])
self.add_subsystem('gen', ACgenerator(num_nodes=nn, mode='Slack', Q_min=Q_min, Q_max=Q_max, Vbase=Vbase, Sbase=Sbase),
promotes=[('Vm_bus','Vm_ac'),('Vr_out','Vr_ac'),('Vi_out','Vi_ac'),
('Ir_out','Ir_ac'),('Ii_out','Ii_ac'),('P_out','P_ac'),('Q_out','Q_ac'),
'thetaV_bus','P_guess'])
if mode == 'Phase':
self.add_subsystem('calcs', InverterCalcs(num_nodes=nn, mode=mode), promotes=['eff','M','P_ac','V_dc','P_dc',
'Vm_ac','thetaV_bus','thetaV_target'])
else:
self.add_subsystem('calcs', InverterCalcs(num_nodes=nn, mode=mode), promotes=['eff','M','P_ac','V_dc','P_dc',
'Vm_ac','thetaV_bus','Q_ac','PF'])
# newton = self.nonlinear_solver = NewtonSolver()
# newton.options['atol'] = 1e-4
# newton.options['rtol'] = 1e-4
# newton.options['iprint'] = 2
# newton.options['maxiter'] = 10
# newton.options['solve_subsystems'] = True
# newton.options['max_sub_solves'] = 3
# newton.linesearch = BoundsEnforceLS()
# newton.linesearch.options['bound_enforcement'] = 'scalar'
# newton.linesearch.options['print_bound_enforce'] = True
# newton.linesearch.options['iprint'] = -1
# self.linear_solver = DirectSolver(assemble_jac=True)
class InverterCalcs2(ImplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('mode', default='Lead', values=['Lead', 'Lag'], desc='Specifies weather AC currentl leads or lags the voltage')
def setup(self):
nn = self.options['num_nodes']
ar = np.arange(nn)
mode = self.options['mode']
self.add_input('M', val=np.ones(nn), units=None, desc='Inverter modulation index (V_ac/V_dc)')
self.add_input('eff', val=np.ones(nn), units=None, desc='Inverter efficiency (P_ac/P_dc')
self.add_input('PF', val=np.ones(nn), units=None, desc='Inverter power factor')
self.add_input('V_dc', val=np.ones(nn), units='V', desc='Voltage entering inverter')
self.add_input('Vr_ac', val=np.ones(nn), units='V', desc='Real voltage leaving inverter')
self.add_input('Vi_ac', val=np.ones(nn), units='V', desc='Imaginary voltage leaving inverter')
self.add_input('P_dc_guess', val=np.zeros(nn), units='W', desc='Guess for power entering inverter')
self.add_output('P_dc', val=np.ones(nn), units='W', desc='Power entering inverter')
self.add_output('P_ac', val=np.ones(nn), units='W', desc='Real power leaving inverter')
self.add_output('Q_ac', val=np.ones(nn), units='V*A', desc='Reactive power leaving inverter')
self.add_output('P_loss', val=np.ones(nn), units='W', desc='Power lost through inverter')
self.declare_partials('P_dc', 'V_dc', rows=ar, cols=ar)
self.declare_partials('P_dc', 'M', rows=ar, cols=ar)
self.declare_partials('P_dc', 'Vr_ac', rows=ar, cols=ar)
self.declare_partials('P_dc', 'Vi_ac', rows=ar, cols=ar)
self.declare_partials('P_ac', 'P_ac', rows=ar, cols=ar, val=1.0)
self.declare_partials('P_ac', 'P_dc', rows=ar, cols=ar)
self.declare_partials('P_ac', 'eff', rows=ar, cols=ar)
self.declare_partials('Q_ac', 'P_ac', rows=ar, cols=ar)
self.declare_partials('Q_ac', 'PF', rows=ar, cols=ar)
self.declare_partials('P_loss','P_ac', rows=ar, cols=ar, val=1.0)
self.declare_partials('P_loss','P_dc', rows=ar, cols=ar, val=1.0)
self.declare_partials('P_loss','P_loss', rows=ar, cols=ar, val=-1.0)
if mode == 'Lead':
self.declare_partials('Q_ac', 'Q_ac', rows=ar, cols=ar, val=1.0)
else:
self.declare_partials('Q_ac', 'Q_ac', rows=ar, cols=ar, val=-1.0)
def apply_nonlinear(self, inputs, outputs, resids):
mode = self.options['mode']
resids['P_dc'] = inputs['V_dc'] * inputs['M'] - (inputs['Vr_ac']**2 + inputs['Vi_ac']**2)**0.5
resids['P_ac'] = outputs['P_ac'] + outputs['P_dc'] * inputs['eff']
resids['P_loss'] = outputs['P_ac'] + outputs['P_dc'] - outputs['P_loss']
if mode=='Lead':
resids['Q_ac'] = outputs['P_ac'] * ((1.0 / inputs['PF'])**2 - 1.0)**0.5 + outputs['Q_ac']
else:
resids['Q_ac'] = outputs['P_ac'] * ((1.0 / inputs['PF'])**2 - 1.0)**0.5 - outputs['Q_ac']
def guess_nonlinear(self, inputs, outputs, resids):
mode = self.options['mode']
outputs['P_dc'] = inputs['P_dc_guess']
outputs['P_ac'] = -outputs['P_dc'] * inputs['eff']
if mode == 'Lead':
outputs['Q_ac'] = -outputs['P_ac'] * ((1.0 / inputs['PF'])**2 - 1.0)**0.5
else:
outputs['Q_ac'] = outputs['P_ac'] * ((1.0 / inputs['PF'])**2 - 1.0)**0.5
def solve_nonlinear(self, inputs, outputs):
mode = self.options['mode']
outputs['P_ac'] = -outputs['P_dc'] * inputs['eff']
outputs['P_loss'] = outputs['P_ac'] + outputs['P_dc']
if mode == 'Lead':
outputs['Q_ac'] = -outputs['P_ac'] * ((1.0 / inputs['PF'])**2 - 1.0)**0.5
else:
outputs['Q_ac'] = outputs['P_ac'] * ((1.0 / inputs['PF'])**2 - 1.0)**0.5
def linearize(self, inputs, outputs, J):
mode = self.options['mode']
J['P_dc', 'V_dc'] = inputs['M']
J['P_dc', 'M'] = inputs['V_dc']
J['P_dc', 'Vr_ac'] = -inputs['Vr_ac'] / (inputs['Vr_ac']**2 + inputs['Vi_ac']**2)**0.5
J['P_dc', 'Vi_ac'] = -inputs['Vi_ac'] / (inputs['Vr_ac']**2 + inputs['Vi_ac']**2)**0.5
J['P_ac', 'P_dc'] = inputs['eff']
J['P_ac', 'eff'] = outputs['P_dc']
J['Q_ac', 'P_ac'] = ((1.0 / inputs['PF'])**2 - 1.0)**0.5
J['Q_ac', 'PF'] = -outputs['P_ac'] / ((1.0 / inputs['PF'])**2 - 1.0)**0.5 / inputs['PF']**3
class Inverter2(Group):
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('mode', default='Lead', values=['Lead', 'Lag'], desc='Specifies weather AC currentl leads or lags the voltage')
def setup(self):
nn = self.options['num_nodes']
mode = self.options['mode']
self.add_subsystem('dc_load', DCload(num_nodes=nn), promotes=[('P','P_dc'),('V_in','V_dc'),('I_in','I_dc')])
self.add_subsystem('ac_load', ACload(num_nodes=nn), promotes=[('P','P_ac'),('Q','Q_ac'),('Vr_in','Vr_ac'),('Vi_in','Vi_ac'),
('Ir_in','Ir_ac'),('Ii_in','Ii_ac')])
self.add_subsystem('calcs', InverterCalcs2(num_nodes=nn, mode=mode), promotes=['eff','M','PF','V_dc','Vr_ac','Vi_ac',
'P_dc','P_ac','Q_ac','P_dc_guess'])
# newton = self.nonlinear_solver = NewtonSolver()
# newton.options['atol'] = 1e-4
# newton.options['rtol'] = 1e-4
# newton.options['iprint'] = 2
# newton.options['maxiter'] = 10
# newton.options['solve_subsystems'] = True
# newton.options['max_sub_solves'] = 3
# newton.linesearch = BoundsEnforceLS()
# newton.linesearch.options['bound_enforcement'] = 'scalar'
# newton.linesearch.options['print_bound_enforce'] = True
# newton.linesearch.options['iprint'] = -1
# self.linear_solver = DirectSolver(assemble_jac=True)
if __name__ == "__main__":
from openmdao.api import Problem, Group, IndepVarComp
p = Problem()
p.model = Group()
des_vars = p.model.add_subsystem('des_vars', IndepVarComp(), promotes=['*'])
# des_vars.add_output('Vr_ac', 1.05, units='V')
des_vars.add_output('Vr_ac', 0.990032064216588*np.ones(3), units='V')
des_vars.add_output('Vi_ac', 0.0624540777134769*np.ones(3), units='V')
des_vars.add_output('V_dc', 1.0020202020202*np.ones(3), units='V')
des_vars.add_output('M', 0.99*np.ones(3), units=None)
des_vars.add_output('eff', 0.98*np.ones(3), units=None)
des_vars.add_output('PF', 0.95*np.ones(3), units=None)
des_vars.add_output('thetaV_target', 0.0*np.ones(3), units='deg')
# p.model.add_subsystem('con', Inverter2(num_nodes=3, mode='Lead'), promotes=['*'])
p.model.add_subsystem('con', Inverter(num_nodes=3, mode='Phase'), promotes=['*'])
p.setup(check=False)
p.final_setup()
# p.view_model()
p.check_partials(compact_print=False)
|
{"hexsha": "cd2591ad8d4d4bc12fee83408543dabdb8696b22", "size": 13947, "ext": "py", "lang": "Python", "max_stars_repo_path": "zappy/LF_elements/inverter.py", "max_stars_repo_name": "OpenMDAO/zappy", "max_stars_repo_head_hexsha": "2c72048b4c4e0ce0ae83221e4ee5788978254340", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-18T22:41:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T22:41:37.000Z", "max_issues_repo_path": "zappy/LF_elements/inverter.py", "max_issues_repo_name": "OpenMDAO/zappy", "max_issues_repo_head_hexsha": "2c72048b4c4e0ce0ae83221e4ee5788978254340", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zappy/LF_elements/inverter.py", "max_forks_repo_name": "OpenMDAO/zappy", "max_forks_repo_head_hexsha": "2c72048b4c4e0ce0ae83221e4ee5788978254340", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.845659164, "max_line_length": 140, "alphanum_fraction": 0.590234459, "include": true, "reason": "import numpy", "num_tokens": 4064}
|
# This Source Code Form is subject to the terms of the MIT
# License. If a copy of the same was not distributed with this
# file, You can obtain one at
# https://github.com/akhilpandey95/scholarlyimpact/blob/master/LICENSE.
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
# function for computing sigmoid of a value
def sigmoid(value, derivative=False):
"""
Return the sigmoid of a numeric value
Parameters
----------
arg1 | value: int
The numeric value intended to convert into a continuos range
Returns
-------
Float
float
"""
try:
# compute the sigmoid
result = 1. / (1. + np.exp(-x))
# check if derivative is required
if derivative:
# return the sigmoid
return result * (1. - result)
# return the sigmoid
return result
except:
# return zero
return np.zeros(1)[0]
# function for processing the dataset
def data_processing(file_path):
"""
Process the dataset and prepare it for using it against the neural network model
Parameters
----------
arg1 | file_path: str
The file path indicating the location of the dataset
Returns
-------
Dataframe
pandas.DataFrame
"""
try:
# read the dataset
data = pd.read_csv(file_path, low_memory=False)
# create the label encoder
encoder = LabelEncoder()
# transform all the columns
data.Type = encoder.fit_transform(data.Type)
# transform the column profession
data.Profession = encoder.fit_transform(data.Profession)
# transform the column academic status
data.AcademicStatus = encoder.fit_transform(data.AcademicStatus)
# transform the column platform with max mentions
data.PlatformWithMaxMentions = encoder.fit_transform(data.PlatformWithMaxMentions)
# create a target variable for the first experiment
data = data.assign(target_exp_1 =
list(map(lambda x: 1 if x > 0 else 0, tqdm(data['citations']))))
# create a target variable for the second experiment
data = data.assign(target_exp_2 =
list(map(lambda x: 1 if x > 9 else 0, tqdm(data['citations']))))
# create a target variable for the third experiment
data = data.assign(target_exp_3 =
list(map(lambda x: np.log(1 + x), tqdm(data['citations']))))
# create a target variable for the third experiment
data = data.assign(target_exp_4 = list(map(sigmoid, tqdm(data['citations']))))
# drop the columns unecessary
data = data.drop(columns=['Type', 'citations', 'citations(Log_Transformed)'])
# return the dataframe
return data
except:
return pd.DataFrame()
# function for preparing the X & Y for the dataset
def prepare_X_Y(data_frame, target):
"""
Process the dataframe and return the X and Y for the experiment
Parameters
----------
arg1 | data_frame: pandas.DataFrame
A loaded dataframe for preparing X and Y
arg1 | target: str
The intended target variable for the experiment
Returns
-------
Tuple
numpy.ndarray, numpy.ndarray
"""
try:
# the following data columns will be considered as features
data_columns = ['mendeley', 'citeulike', 'News', 'Blogs',
'Reddit', 'Twitter', 'Facebook',
'GooglePlus', 'PeerReviews','Wikipedia',
'TotalPlatforms', 'SincePublication','PlatformWithMaxMentions',
'Countries', 'MaxFollowers','Profession',
'AcademicStatus', 'PostLength', 'HashTags', 'Mentions',
'AuthorCount']
# set the X column
X = data_frame.as_matrix(columns = data_columns)
# set the target variable
Y = data_frame[target]
# return the tuple
return X, Y
except:
return np.zeros((len(data_frame), 21)), np.zeros((len(data_frame), 21))
|
{"hexsha": "686ffc27dec9d30aac5f98d02724579efd281ae0", "size": 4187, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data.py", "max_stars_repo_name": "akhilpandey95/scholarlyimpact", "max_stars_repo_head_hexsha": "215ae832c90f0564fa0301e4c3f1c99525617625", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data.py", "max_issues_repo_name": "akhilpandey95/scholarlyimpact", "max_issues_repo_head_hexsha": "215ae832c90f0564fa0301e4c3f1c99525617625", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-02-20T23:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-20T04:05:43.000Z", "max_forks_repo_path": "src/data.py", "max_forks_repo_name": "akhilpandey95/scholarlyimpact", "max_forks_repo_head_hexsha": "215ae832c90f0564fa0301e4c3f1c99525617625", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0148148148, "max_line_length": 91, "alphanum_fraction": 0.6102221161, "include": true, "reason": "import numpy", "num_tokens": 908}
|
# using StaticArrays: later package all types as static/mutable fixed size arrays/matrices
"""
Definition of abstract spatial types to be used across the package
"""
# todo: make types parametrized: {N(dims), T(type)}
# todo: look for packages to depend on: GeometryTypes
# todo: quaternions?
abstract type AbstractPoint <: AbstractVector{Float64} end # coords
abstract type AbstractFrame <: AbstractVector{Float64} end # location vec, orientation vec
abstract type AbstractPose <: AbstractVector{Float64} end # transl & rot components
abstract type AbstractTransform <: AbstractMatrix{Float64} end
abstract type SO2 <: AbstractTransform end
abstract type SE2 <: AbstractTransform end
abstract type SO3 <: AbstractTransform end
abstract type SE3 <: AbstractTransform end
|
{"hexsha": "43546a92d81109967509aeee3f77de1a5655b455", "size": 780, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/spatialmaths/spatial_types.jl", "max_stars_repo_name": "pranshumalik14/RoboticsToolkit.jl", "max_stars_repo_head_hexsha": "652daaafc8f5ab27e2817252007a28ff3427a311", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/spatialmaths/spatial_types.jl", "max_issues_repo_name": "pranshumalik14/RoboticsToolkit.jl", "max_issues_repo_head_hexsha": "652daaafc8f5ab27e2817252007a28ff3427a311", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/spatialmaths/spatial_types.jl", "max_forks_repo_name": "pranshumalik14/RoboticsToolkit.jl", "max_forks_repo_head_hexsha": "652daaafc8f5ab27e2817252007a28ff3427a311", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3333333333, "max_line_length": 92, "alphanum_fraction": 0.7846153846, "num_tokens": 174}
|
@testset "1.1.2.3 (a+b x^2)^p (c+d x^2)^q" begin
(a, b, c, d, m, p, q, x, ) = @variables a b c d m p q x
#= ::Package:: =#
#= ::Title:: =#
#=Integrands*of*the*form*(a+b*x^2)^p*(c+d*x^2)^q=#
#= ::Section::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^p*(c+d*x^2)^q=#
#= ::Subsubsection::Closed:: =#
#=p>0=#
@test_int [(a + b*x^2)*(c + d*x^2)^4, x, 2, a*c^4*x + (1/3)*c^3*(b*c + 4*a*d)*x^3 + (2/5)*c^2*d*(2*b*c + 3*a*d)*x^5 + (2/7)*c*d^2*(3*b*c + 2*a*d)*x^7 + (1/9)*d^3*(4*b*c + a*d)*x^9 + (1/11)*b*d^4*x^11]
@test_int [(a + b*x^2)*(c + d*x^2)^3, x, 2, a*c^3*x + (1/3)*c^2*(b*c + 3*a*d)*x^3 + (3/5)*c*d*(b*c + a*d)*x^5 + (1/7)*d^2*(3*b*c + a*d)*x^7 + (1/9)*b*d^3*x^9]
@test_int [(a + b*x^2)*(c + d*x^2)^2, x, 2, a*c^2*x + (1/3)*c*(b*c + 2*a*d)*x^3 + (1/5)*d*(2*b*c + a*d)*x^5 + (1/7)*b*d^2*x^7]
@test_int [(a + b*x^2)*(c + d*x^2)^1, x, 2, a*c*x + (1/3)*(b*c + a*d)*x^3 + (1/5)*b*d*x^5]
@test_int [(a + b*x^2)/(c + d*x^2)^1, x, 2, (b*x)/d - ((b*c - a*d)*atan((sqrt(d)*x)/sqrt(c)))/(sqrt(c)*d^(3/2))]
@test_int [(a + b*x^2)/(c + d*x^2)^2, x, 2, -(((b*c - a*d)*x)/(2*c*d*(c + d*x^2))) + ((b*c + a*d)*atan((sqrt(d)*x)/sqrt(c)))/(2*c^(3/2)*d^(3/2))]
@test_int [(a + b*x^2)/(c + d*x^2)^3, x, 3, -(((b*c - a*d)*x)/(4*c*d*(c + d*x^2)^2)) + ((b*c + 3*a*d)*x)/(8*c^2*d*(c + d*x^2)) + ((b*c + 3*a*d)*atan((sqrt(d)*x)/sqrt(c)))/(8*c^(5/2)*d^(3/2))]
@test_int [(a + b*x^2)^2*(c + d*x^2)^3, x, 2, a^2*c^3*x + (1/3)*a*c^2*(2*b*c + 3*a*d)*x^3 + (1/5)*c*(b^2*c^2 + 6*a*b*c*d + 3*a^2*d^2)*x^5 + (1/7)*d*(3*b^2*c^2 + 6*a*b*c*d + a^2*d^2)*x^7 + (1/9)*b*d^2*(3*b*c + 2*a*d)*x^9 + (1/11)*b^2*d^3*x^11]
@test_int [(a + b*x^2)^2*(c + d*x^2)^2, x, 2, a^2*c^2*x + (2/3)*a*c*(b*c + a*d)*x^3 + (1/5)*(b^2*c^2 + 4*a*b*c*d + a^2*d^2)*x^5 + (2/7)*b*d*(b*c + a*d)*x^7 + (1/9)*b^2*d^2*x^9]
@test_int [(a + b*x^2)^2*(c + d*x^2)^1, x, 2, a^2*c*x + (1/3)*a*(2*b*c + a*d)*x^3 + (1/5)*b*(b*c + 2*a*d)*x^5 + (1/7)*b^2*d*x^7]
@test_int [(a + b*x^2)^2/(c + d*x^2)^1, x, 3, -((b*(b*c - 2*a*d)*x)/d^2) + (b^2*x^3)/(3*d) + ((b*c - a*d)^2*atan((sqrt(d)*x)/sqrt(c)))/(sqrt(c)*d^(5/2))]
@test_int [(a + b*x^2)^2/(c + d*x^2)^2, x, 4, (b^2*x)/d^2 + ((b*c - a*d)^2*x)/(2*c*d^2*(c + d*x^2)) - ((b*c - a*d)*(3*b*c + a*d)*atan((sqrt(d)*x)/sqrt(c)))/(2*c^(3/2)*d^(5/2))]
@test_int [(a + b*x^2)^2/(c + d*x^2)^3, x, 3, -(((b*c - a*d)*x*(a + b*x^2))/(4*c*d*(c + d*x^2)^2)) + (3*(a^2/c^2 - b^2/d^2)*x)/(8*(c + d*x^2)) + ((3*b^2*c^2 + 2*a*b*c*d + 3*a^2*d^2)*atan((sqrt(d)*x)/sqrt(c)))/(8*c^(5/2)*d^(5/2))]
@test_int [(a + b*x^2)^3*(c + d*x^2)^3, x, 2, a^3*c^3*x + a^2*c^2*(b*c + a*d)*x^3 + (3/5)*a*c*(b^2*c^2 + 3*a*b*c*d + a^2*d^2)*x^5 + (1/7)*(b*c + a*d)*(b^2*c^2 + 8*a*b*c*d + a^2*d^2)*x^7 + (1/3)*b*d*(b^2*c^2 + 3*a*b*c*d + a^2*d^2)*x^9 + (3/11)*b^2*d^2*(b*c + a*d)*x^11 + (1/13)*b^3*d^3*x^13]
@test_int [(a + b*x^2)^3*(c + d*x^2)^2, x, 2, a^3*c^2*x + (1/3)*a^2*c*(3*b*c + 2*a*d)*x^3 + (1/5)*a*(3*b^2*c^2 + 6*a*b*c*d + a^2*d^2)*x^5 + (1/7)*b*(b^2*c^2 + 6*a*b*c*d + 3*a^2*d^2)*x^7 + (1/9)*b^2*d*(2*b*c + 3*a*d)*x^9 + (1/11)*b^3*d^2*x^11]
@test_int [(a + b*x^2)^3*(c + d*x^2)^1, x, 2, a^3*c*x + (1/3)*a^2*(3*b*c + a*d)*x^3 + (3/5)*a*b*(b*c + a*d)*x^5 + (1/7)*b^2*(b*c + 3*a*d)*x^7 + (1/9)*b^3*d*x^9]
@test_int [(a + b*x^2)^3/(c + d*x^2)^1, x, 3, (b*(b^2*c^2 - 3*a*b*c*d + 3*a^2*d^2)*x)/d^3 - (b^2*(b*c - 3*a*d)*x^3)/(3*d^2) + (b^3*x^5)/(5*d) - ((b*c - a*d)^3*atan((sqrt(d)*x)/sqrt(c)))/(sqrt(c)*d^(7/2))]
@test_int [(a + b*x^2)^3/(c + d*x^2)^2, x, 4, -((b^2*(2*b*c - 3*a*d)*x)/d^3) + (b^3*x^3)/(3*d^2) - ((b*c - a*d)^3*x)/(2*c*d^3*(c + d*x^2)) + ((b*c - a*d)^2*(5*b*c + a*d)*atan((sqrt(d)*x)/sqrt(c)))/(2*c^(3/2)*d^(7/2))]
@test_int [(a + b*x^2)^3/(c + d*x^2)^3, x, 5, (b^3*x)/d^3 - ((b*c - a*d)^3*x)/(4*c*d^3*(c + d*x^2)^2) + (3*(b*c - a*d)^2*(3*b*c + a*d)*x)/(8*c^2*d^3*(c + d*x^2)) - (3*(b*c - a*d)*(4*b^2*c^2 + (b*c + a*d)^2)*atan((sqrt(d)*x)/sqrt(c)))/(8*c^(5/2)*d^(7/2))]
#= ::Subsubsection::Closed:: =#
#=p<0=#
@test_int [1/(a + b*x^2)*(c + d*x^2)^4, x, 3, (d*(2*b*c - a*d)*(2*b^2*c^2 - 2*a*b*c*d + a^2*d^2)*x)/b^4 + (d^2*(6*b^2*c^2 - 4*a*b*c*d + a^2*d^2)*x^3)/(3*b^3) + (d^3*(4*b*c - a*d)*x^5)/(5*b^2) + (d^4*x^7)/(7*b) + ((b*c - a*d)^4*atan((sqrt(b)*x)/sqrt(a)))/(sqrt(a)*b^(9/2))]
@test_int [1/(a + b*x^2)*(c + d*x^2)^3, x, 3, (d*(3*b^2*c^2 - 3*a*b*c*d + a^2*d^2)*x)/b^3 + (d^2*(3*b*c - a*d)*x^3)/(3*b^2) + (d^3*x^5)/(5*b) + ((b*c - a*d)^3*atan((sqrt(b)*x)/sqrt(a)))/(sqrt(a)*b^(7/2))]
@test_int [1/(a + b*x^2)*(c + d*x^2)^2, x, 3, (d*(2*b*c - a*d)*x)/b^2 + (d^2*x^3)/(3*b) + ((b*c - a*d)^2*atan((sqrt(b)*x)/sqrt(a)))/(sqrt(a)*b^(5/2))]
@test_int [1/(a + b*x^2)*(c + d*x^2)^1, x, 2, (d*x)/b + ((b*c - a*d)*atan((sqrt(b)*x)/sqrt(a)))/(sqrt(a)*b^(3/2))]
@test_int [1/(a + b*x^2)/(c + d*x^2)^1, x, 3, (sqrt(b)*atan((sqrt(b)*x)/sqrt(a)))/(sqrt(a)*(b*c - a*d)) - (sqrt(d)*atan((sqrt(d)*x)/sqrt(c)))/(sqrt(c)*(b*c - a*d))]
@test_int [1/(a + b*x^2)/(c + d*x^2)^2, x, 4, -((d*x)/(2*c*(b*c - a*d)*(c + d*x^2))) + (b^(3/2)*atan((sqrt(b)*x)/sqrt(a)))/(sqrt(a)*(b*c - a*d)^2) - (sqrt(d)*(3*b*c - a*d)*atan((sqrt(d)*x)/sqrt(c)))/(2*c^(3/2)*(b*c - a*d)^2)]
@test_int [1/(a + b*x^2)/(c + d*x^2)^3, x, 5, -((d*x)/(4*c*(b*c - a*d)*(c + d*x^2)^2)) - (d*(7*b*c - 3*a*d)*x)/(8*c^2*(b*c - a*d)^2*(c + d*x^2)) + (b^(5/2)*atan((sqrt(b)*x)/sqrt(a)))/(sqrt(a)*(b*c - a*d)^3) - (sqrt(d)*(15*b^2*c^2 - 10*a*b*c*d + 3*a^2*d^2)*atan((sqrt(d)*x)/sqrt(c)))/(8*c^(5/2)*(b*c - a*d)^3)]
@test_int [1/(a + b*x^2)^2*(c + d*x^2)^5, x, 4, (d^2*(10*b^3*c^3 - 20*a*b^2*c^2*d + 15*a^2*b*c*d^2 - 4*a^3*d^3)*x)/b^5 + (d^3*(10*b^2*c^2 - 10*a*b*c*d + 3*a^2*d^2)*x^3)/(3*b^4) + (d^4*(5*b*c - 2*a*d)*x^5)/(5*b^3) + (d^5*x^7)/(7*b^2) + ((b*c - a*d)^5*x)/(2*a*b^5*(a + b*x^2)) + ((b*c - a*d)^4*(b*c + 9*a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*b^(11/2))]
@test_int [1/(a + b*x^2)^2*(c + d*x^2)^4, x, 4, (d^2*(6*b^2*c^2 - 8*a*b*c*d + 3*a^2*d^2)*x)/b^4 + (2*d^3*(2*b*c - a*d)*x^3)/(3*b^3) + (d^4*x^5)/(5*b^2) + ((b*c - a*d)^4*x)/(2*a*b^4*(a + b*x^2)) + ((b*c - a*d)^3*(b*c + 7*a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*b^(9/2))]
@test_int [1/(a + b*x^2)^2*(c + d*x^2)^3, x, 4, (d^2*(3*b*c - 2*a*d)*x)/b^3 + (d^3*x^3)/(3*b^2) + ((b*c - a*d)^3*x)/(2*a*b^3*(a + b*x^2)) + ((b*c - a*d)^2*(b*c + 5*a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*b^(7/2))]
@test_int [1/(a + b*x^2)^2*(c + d*x^2)^2, x, 4, (d^2*x)/b^2 + ((b*c - a*d)^2*x)/(2*a*b^2*(a + b*x^2)) + ((b*c - a*d)*(b*c + 3*a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*b^(5/2))]
@test_int [1/(a + b*x^2)^2*(c + d*x^2)^1, x, 2, ((b*c - a*d)*x)/(2*a*b*(a + b*x^2)) + ((b*c + a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*b^(3/2))]
@test_int [1/(a + b*x^2)^2/(c + d*x^2)^1, x, 4, (b*x)/(2*a*(b*c - a*d)*(a + b*x^2)) + (sqrt(b)*(b*c - 3*a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*(b*c - a*d)^2) + (d^(3/2)*atan((sqrt(d)*x)/sqrt(c)))/(sqrt(c)*(b*c - a*d)^2)]
@test_int [1/(a + b*x^2)^2/(c + d*x^2)^2, x, 5, (d*(b*c + a*d)*x)/(2*a*c*(b*c - a*d)^2*(c + d*x^2)) + (b*x)/(2*a*(b*c - a*d)*(a + b*x^2)*(c + d*x^2)) + (b^(3/2)*(b*c - 5*a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*(b*c - a*d)^3) + (d^(3/2)*(5*b*c - a*d)*atan((sqrt(d)*x)/sqrt(c)))/(2*c^(3/2)*(b*c - a*d)^3)]
@test_int [1/(a + b*x^2)^2/(c + d*x^2)^3, x, 6, (d*(2*b*c + a*d)*x)/(4*a*c*(b*c - a*d)^2*(c + d*x^2)^2) + (b*x)/(2*a*(b*c - a*d)*(a + b*x^2)*(c + d*x^2)^2) + (d*(4*b*c - a*d)*(b*c + 3*a*d)*x)/(8*a*c^2*(b*c - a*d)^3*(c + d*x^2)) + (b^(5/2)*(b*c - 7*a*d)*atan((sqrt(b)*x)/sqrt(a)))/(2*a^(3/2)*(b*c - a*d)^4) + (d^(3/2)*(35*b^2*c^2 - 14*a*b*c*d + 3*a^2*d^2)*atan((sqrt(d)*x)/sqrt(c)))/(8*c^(5/2)*(b*c - a*d)^4)]
@test_int [1/(a + b*x^2)^3*(c + d*x^2)^5, x, 5, (d^3*(10*b^2*c^2 - 15*a*b*c*d + 6*a^2*d^2)*x)/b^5 + (d^4*(5*b*c - 3*a*d)*x^3)/(3*b^4) + (d^5*x^5)/(5*b^3) + ((b*c - a*d)^5*x)/(4*a*b^5*(a + b*x^2)^2) + ((b*c - a*d)^4*(3*b*c + 17*a*d)*x)/(8*a^2*b^5*(a + b*x^2)) + ((b*c - a*d)^3*(3*b^2*c^2 + 14*a*b*c*d + 63*a^2*d^2)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*b^(11/2))]
@test_int [1/(a + b*x^2)^3*(c + d*x^2)^4, x, 5, (d^3*(4*b*c - 3*a*d)*x)/b^4 + (d^4*x^3)/(3*b^3) + ((b*c - a*d)^4*x)/(4*a*b^4*(a + b*x^2)^2) + ((b*c - a*d)^3*(3*b*c + 13*a*d)*x)/(8*a^2*b^4*(a + b*x^2)) + ((b*c - a*d)^2*(3*b^2*c^2 + 10*a*b*c*d + 35*a^2*d^2)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*b^(9/2))]
@test_int [1/(a + b*x^2)^3*(c + d*x^2)^3, x, 5, (d^3*x)/b^3 + ((b*c - a*d)^3*x)/(4*a*b^3*(a + b*x^2)^2) + (3*(b*c - a*d)^2*(b*c + 3*a*d)*x)/(8*a^2*b^3*(a + b*x^2)) + (3*(b*c - a*d)*(4*a^2*d^2 + (b*c + a*d)^2)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*b^(7/2))]
@test_int [1/(a + b*x^2)^3*(c + d*x^2)^2, x, 3, (3*(c^2/a^2 - d^2/b^2)*x)/(8*(a + b*x^2)) + ((b*c - a*d)*x*(c + d*x^2))/(4*a*b*(a + b*x^2)^2) + ((3*b^2*c^2 + 2*a*b*c*d + 3*a^2*d^2)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*b^(5/2))]
@test_int [1/(a + b*x^2)^3*(c + d*x^2)^1, x, 3, ((b*c - a*d)*x)/(4*a*b*(a + b*x^2)^2) + ((3*b*c + a*d)*x)/(8*a^2*b*(a + b*x^2)) + ((3*b*c + a*d)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*b^(3/2))]
@test_int [1/(a + b*x^2)^3/(c + d*x^2)^1, x, 5, (b*x)/(4*a*(b*c - a*d)*(a + b*x^2)^2) + (b*(3*b*c - 7*a*d)*x)/(8*a^2*(b*c - a*d)^2*(a + b*x^2)) + (sqrt(b)*(3*b^2*c^2 - 10*a*b*c*d + 15*a^2*d^2)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*(b*c - a*d)^3) - (d^(5/2)*atan((sqrt(d)*x)/sqrt(c)))/(sqrt(c)*(b*c - a*d)^3)]
@test_int [1/(a + b*x^2)^3/(c + d*x^2)^2, x, 6, (d*(b*c - 4*a*d)*(3*b*c + a*d)*x)/(8*a^2*c*(b*c - a*d)^3*(c + d*x^2)) + (b*x)/(4*a*(b*c - a*d)*(a + b*x^2)^2*(c + d*x^2)) + (3*b*(b*c - 3*a*d)*x)/(8*a^2*(b*c - a*d)^2*(a + b*x^2)*(c + d*x^2)) + (b^(3/2)*(3*b^2*c^2 - 14*a*b*c*d + 35*a^2*d^2)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*(b*c - a*d)^4) - (d^(5/2)*(7*b*c - a*d)*atan((sqrt(d)*x)/sqrt(c)))/(2*c^(3/2)*(b*c - a*d)^4)]
@test_int [1/(a + b*x^2)^3/(c + d*x^2)^3, x, 7, (d*(3*b^2*c^2 - 13*a*b*c*d - 2*a^2*d^2)*x)/(8*a^2*c*(b*c - a*d)^3*(c + d*x^2)^2) + (b*x)/(4*a*(b*c - a*d)*(a + b*x^2)^2*(c + d*x^2)^2) + (b*(3*b*c - 11*a*d)*x)/(8*a^2*(b*c - a*d)^2*(a + b*x^2)*(c + d*x^2)^2) + (3*d*(b*c + a*d)*(b^2*c^2 - 6*a*b*c*d + a^2*d^2)*x)/(8*a^2*c^2*(b*c - a*d)^4*(c + d*x^2)) + (3*b^(5/2)*(b^2*c^2 - 6*a*b*c*d + 21*a^2*d^2)*atan((sqrt(b)*x)/sqrt(a)))/(8*a^(5/2)*(b*c - a*d)^5) - (3*d^(5/2)*(21*b^2*c^2 - 6*a*b*c*d + a^2*d^2)*atan((sqrt(d)*x)/sqrt(c)))/(8*c^(5/2)*(b*c - a*d)^5)]
@test_int [(-1 + x^2)^3/(1 + x^2)^4, x, 3, -((x*(1 - x^2)^2)/(3*(1 + x^2)^3)) - (2*x)/(3*(1 + x^2))]
@test_int [(-1 + x^2)^4/(1 + x^2)^5, x, 5, (x*(1 - x^2)^3)/(4*(1 + x^2)^4) + (3*x*(1 - x^2))/(8*(1 + x^2)^2) + (3*atan(x))/8]
#= ::Section::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/2)*(c+d*x^2)^q=#
#= ::Subsubsection::Closed:: =#
#=p>0=#
@test_int [(a + b*x^2)^(1/2)*(c + d*x^2)^3, x, 6, ((64*b^3*c^3 - 48*a*b^2*c^2*d + 24*a^2*b*c*d^2 - 5*a^3*d^3)*x*sqrt(a + b*x^2))/(128*b^3) + (d*(72*b^2*c^2 - 52*a*b*c*d + 15*a^2*d^2)*x*(a + b*x^2)^(3/2))/(192*b^3) + (d*(12*b*c - 5*a*d)*x*(a + b*x^2)^(3/2)*(c + d*x^2))/(48*b^2) + (d*x*(a + b*x^2)^(3/2)*(c + d*x^2)^2)/(8*b) + (a*(64*b^3*c^3 - 48*a*b^2*c^2*d + 24*a^2*b*c*d^2 - 5*a^3*d^3)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(128*b^(7/2))]
@test_int [(a + b*x^2)^(1/2)*(c + d*x^2)^2, x, 5, ((8*b^2*c^2 - 4*a*b*c*d + a^2*d^2)*x*sqrt(a + b*x^2))/(16*b^2) + (d*(8*b*c - 3*a*d)*x*(a + b*x^2)^(3/2))/(24*b^2) + (d*x*(a + b*x^2)^(3/2)*(c + d*x^2))/(6*b) + (a*(8*b^2*c^2 - 4*a*b*c*d + a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(16*b^(5/2))]
@test_int [(a + b*x^2)^(1/2)*(c + d*x^2)^1, x, 4, ((4*b*c - a*d)*x*sqrt(a + b*x^2))/(8*b) + (d*x*(a + b*x^2)^(3/2))/(4*b) + (a*(4*b*c - a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(8*b^(3/2))]
@test_int [(a + b*x^2)^(1/2)*(c + d*x^2)^0, x, 3, (1/2)*x*sqrt(a + b*x^2) + (a*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(2*sqrt(b))]
@test_int [(a + b*x^2)^(1/2)/(c + d*x^2)^1, x, 5, (sqrt(b)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/d - (sqrt(b*c - a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(sqrt(c)*d)]
@test_int [(a + b*x^2)^(1/2)/(c + d*x^2)^2, x, 3, (x*sqrt(a + b*x^2))/(2*c*(c + d*x^2)) + (a*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(2*c^(3/2)*sqrt(b*c - a*d))]
@test_int [(a + b*x^2)^(1/2)/(c + d*x^2)^3, x, 4, -((d*x*(a + b*x^2)^(3/2))/(4*c*(b*c - a*d)*(c + d*x^2)^2)) + ((4*b*c - 3*a*d)*x*sqrt(a + b*x^2))/(8*c^2*(b*c - a*d)*(c + d*x^2)) + (a*(4*b*c - 3*a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(8*c^(5/2)*(b*c - a*d)^(3/2))]
@test_int [(a + b*x^2)^(1/2)/(c + d*x^2)^4, x, 6, (x*sqrt(a + b*x^2))/(6*c*(c + d*x^2)^3) + ((4*b*c - 5*a*d)*x*sqrt(a + b*x^2))/(24*c^2*(b*c - a*d)*(c + d*x^2)^2) + ((2*b*c - 5*a*d)*(4*b*c - 3*a*d)*x*sqrt(a + b*x^2))/(48*c^3*(b*c - a*d)^2*(c + d*x^2)) + (a*(8*b^2*c^2 - 12*a*b*c*d + 5*a^2*d^2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(16*c^(7/2)*(b*c - a*d)^(5/2))]
@test_int [(a + b*x^2)^(3/2)*(c + d*x^2)^3, x, 7, (3*a*(4*b*c - a*d)*(8*b^2*c^2 - 2*a*b*c*d + a^2*d^2)*x*sqrt(a + b*x^2))/(256*b^3) + ((4*b*c - a*d)*(8*b^2*c^2 - 2*a*b*c*d + a^2*d^2)*x*(a + b*x^2)^(3/2))/(128*b^3) + (d*(36*b^2*c^2 - 20*a*b*c*d + 5*a^2*d^2)*x*(a + b*x^2)^(5/2))/(160*b^3) + (d*(14*b*c - 5*a*d)*x*(a + b*x^2)^(5/2)*(c + d*x^2))/(80*b^2) + (d*x*(a + b*x^2)^(5/2)*(c + d*x^2)^2)/(10*b) + (3*a^2*(4*b*c - a*d)*(8*b^2*c^2 - 2*a*b*c*d + a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(256*b^(7/2))]
@test_int [(a + b*x^2)^(3/2)*(c + d*x^2)^2, x, 6, (a*(48*b^2*c^2 - 16*a*b*c*d + 3*a^2*d^2)*x*sqrt(a + b*x^2))/(128*b^2) + ((48*b^2*c^2 - 16*a*b*c*d + 3*a^2*d^2)*x*(a + b*x^2)^(3/2))/(192*b^2) + (d*(10*b*c - 3*a*d)*x*(a + b*x^2)^(5/2))/(48*b^2) + (d*x*(a + b*x^2)^(5/2)*(c + d*x^2))/(8*b) + (a^2*(48*b^2*c^2 - 16*a*b*c*d + 3*a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(128*b^(5/2))]
@test_int [(a + b*x^2)^(3/2)*(c + d*x^2)^1, x, 5, (a*(6*b*c - a*d)*x*sqrt(a + b*x^2))/(16*b) + ((6*b*c - a*d)*x*(a + b*x^2)^(3/2))/(24*b) + (d*x*(a + b*x^2)^(5/2))/(6*b) + (a^2*(6*b*c - a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(16*b^(3/2))]
@test_int [(a + b*x^2)^(3/2)*(c + d*x^2)^0, x, 4, (3/8)*a*x*sqrt(a + b*x^2) + (1/4)*x*(a + b*x^2)^(3/2) + (3*a^2*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(8*sqrt(b))]
@test_int [(a + b*x^2)^(3/2)/(c + d*x^2)^1, x, 6, (b*x*sqrt(a + b*x^2))/(2*d) - (sqrt(b)*(2*b*c - 3*a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(2*d^2) + ((b*c - a*d)^(3/2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(sqrt(c)*d^2)]
@test_int [(a + b*x^2)^(3/2)/(c + d*x^2)^2, x, 6, -(((b*c - a*d)*x*sqrt(a + b*x^2))/(2*c*d*(c + d*x^2))) + (b^(3/2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/d^2 - (sqrt(b*c - a*d)*(2*b*c + a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(2*c^(3/2)*d^2)]
@test_int [(a + b*x^2)^(3/2)/(c + d*x^2)^3, x, 4, (x*(a + b*x^2)^(3/2))/(4*c*(c + d*x^2)^2) + (3*a*x*sqrt(a + b*x^2))/(8*c^2*(c + d*x^2)) + (3*a^2*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(8*c^(5/2)*sqrt(b*c - a*d))]
@test_int [(a + b*x^2)^(3/2)/(c + d*x^2)^4, x, 5, -((d*x*(a + b*x^2)^(5/2))/(6*c*(b*c - a*d)*(c + d*x^2)^3)) + ((6*b*c - 5*a*d)*x*(a + b*x^2)^(3/2))/(24*c^2*(b*c - a*d)*(c + d*x^2)^2) + (a*(6*b*c - 5*a*d)*x*sqrt(a + b*x^2))/(16*c^3*(b*c - a*d)*(c + d*x^2)) + (a^2*(6*b*c - 5*a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(16*c^(7/2)*(b*c - a*d)^(3/2))]
@test_int [(a + b*x^2)^(3/2)/(c + d*x^2)^5, x, 7, -(((b*c - a*d)*x*sqrt(a + b*x^2))/(8*c*d*(c + d*x^2)^4)) + ((2*b*c + 7*a*d)*x*sqrt(a + b*x^2))/(48*c^2*d*(c + d*x^2)^3) + ((8*b^2*c^2 + 24*a*b*c*d - 35*a^2*d^2)*x*sqrt(a + b*x^2))/(192*c^3*d*(b*c - a*d)*(c + d*x^2)^2) + ((16*b^3*c^3 + 40*a*b^2*c^2*d - 170*a^2*b*c*d^2 + 105*a^3*d^3)*x*sqrt(a + b*x^2))/(384*c^4*d*(b*c - a*d)^2*(c + d*x^2)) + (a^2*(48*b^2*c^2 - 80*a*b*c*d + 35*a^2*d^2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(128*c^(9/2)*(b*c - a*d)^(5/2))]
@test_int [(a + b*x^2)^(5/2)*(c + d*x^2)^3, x, 8, (a^2*(320*b^3*c^3 - 120*a*b^2*c^2*d + 36*a^2*b*c*d^2 - 5*a^3*d^3)*x*sqrt(a + b*x^2))/(1024*b^3) + (a*(320*b^3*c^3 - 120*a*b^2*c^2*d + 36*a^2*b*c*d^2 - 5*a^3*d^3)*x*(a + b*x^2)^(3/2))/(1536*b^3) + ((320*b^3*c^3 - 120*a*b^2*c^2*d + 36*a^2*b*c*d^2 - 5*a^3*d^3)*x*(a + b*x^2)^(5/2))/(1920*b^3) + (d*(152*b^2*c^2 - 68*a*b*c*d + 15*a^2*d^2)*x*(a + b*x^2)^(7/2))/(960*b^3) + (d*(16*b*c - 5*a*d)*x*(a + b*x^2)^(7/2)*(c + d*x^2))/(120*b^2) + (d*x*(a + b*x^2)^(7/2)*(c + d*x^2)^2)/(12*b) + (a^3*(320*b^3*c^3 - 120*a*b^2*c^2*d + 36*a^2*b*c*d^2 - 5*a^3*d^3)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(1024*b^(7/2))]
@test_int [(a + b*x^2)^(5/2)*(c + d*x^2)^2, x, 7, (a^2*(80*b^2*c^2 - 20*a*b*c*d + 3*a^2*d^2)*x*sqrt(a + b*x^2))/(256*b^2) + (a*(80*b^2*c^2 - 20*a*b*c*d + 3*a^2*d^2)*x*(a + b*x^2)^(3/2))/(384*b^2) + ((80*b^2*c^2 - 20*a*b*c*d + 3*a^2*d^2)*x*(a + b*x^2)^(5/2))/(480*b^2) + (3*d*(4*b*c - a*d)*x*(a + b*x^2)^(7/2))/(80*b^2) + (d*x*(a + b*x^2)^(7/2)*(c + d*x^2))/(10*b) + (a^3*(80*b^2*c^2 - 20*a*b*c*d + 3*a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(256*b^(5/2))]
@test_int [(a + b*x^2)^(5/2)*(c + d*x^2)^1, x, 6, (5*a^2*(8*b*c - a*d)*x*sqrt(a + b*x^2))/(128*b) + (5*a*(8*b*c - a*d)*x*(a + b*x^2)^(3/2))/(192*b) + ((8*b*c - a*d)*x*(a + b*x^2)^(5/2))/(48*b) + (d*x*(a + b*x^2)^(7/2))/(8*b) + (5*a^3*(8*b*c - a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(128*b^(3/2))]
@test_int [(a + b*x^2)^(5/2)*(c + d*x^2)^0, x, 5, (5/16)*a^2*x*sqrt(a + b*x^2) + (5/24)*a*x*(a + b*x^2)^(3/2) + (1/6)*x*(a + b*x^2)^(5/2) + (5*a^3*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(16*sqrt(b))]
@test_int [(a + b*x^2)^(5/2)/(c + d*x^2)^1, x, 7, -((b*(4*b*c - 7*a*d)*x*sqrt(a + b*x^2))/(8*d^2)) + (b*x*(a + b*x^2)^(3/2))/(4*d) + (sqrt(b)*(8*b^2*c^2 - 20*a*b*c*d + 15*a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(8*d^3) - ((b*c - a*d)^(5/2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(sqrt(c)*d^3)]
@test_int [(a + b*x^2)^(5/2)/(c + d*x^2)^2, x, 7, (b*(2*b*c - a*d)*x*sqrt(a + b*x^2))/(2*c*d^2) - ((b*c - a*d)*x*(a + b*x^2)^(3/2))/(2*c*d*(c + d*x^2)) - (b^(3/2)*(4*b*c - 5*a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(2*d^3) + ((b*c - a*d)^(3/2)*(4*b*c + a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(2*c^(3/2)*d^3)]
@test_int [(a + b*x^2)^(5/2)/(c + d*x^2)^3, x, 7, -(((b*c - a*d)*x*(a + b*x^2)^(3/2))/(4*c*d*(c + d*x^2)^2)) - ((b*c - a*d)*(4*b*c + 3*a*d)*x*sqrt(a + b*x^2))/(8*c^2*d^2*(c + d*x^2)) + (b^(5/2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/d^3 - (sqrt(b*c - a*d)*(8*b^2*c^2 + 4*a*b*c*d + 3*a^2*d^2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(8*c^(5/2)*d^3)]
@test_int [(a + b*x^2)^(5/2)/(c + d*x^2)^4, x, 5, (x*(a + b*x^2)^(5/2))/(6*c*(c + d*x^2)^3) + (5*a*x*(a + b*x^2)^(3/2))/(24*c^2*(c + d*x^2)^2) + (5*a^2*x*sqrt(a + b*x^2))/(16*c^3*(c + d*x^2)) + (5*a^3*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(16*c^(7/2)*sqrt(b*c - a*d))]
@test_int [(a + b*x^2)^(5/2)/(c + d*x^2)^5, x, 6, -((d*x*(a + b*x^2)^(7/2))/(8*c*(b*c - a*d)*(c + d*x^2)^4)) + ((8*b*c - 7*a*d)*x*(a + b*x^2)^(5/2))/(48*c^2*(b*c - a*d)*(c + d*x^2)^3) + (5*a*(8*b*c - 7*a*d)*x*(a + b*x^2)^(3/2))/(192*c^3*(b*c - a*d)*(c + d*x^2)^2) + (5*a^2*(8*b*c - 7*a*d)*x*sqrt(a + b*x^2))/(128*c^4*(b*c - a*d)*(c + d*x^2)) + (5*a^3*(8*b*c - 7*a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(128*c^(9/2)*(b*c - a*d)^(3/2))]
@test_int [sqrt(1 - x^2)/(1 + x^2), x, 4, -asin(x) + sqrt(2)*atan((sqrt(2)*x)/sqrt(1 - x^2))]
@test_int [sqrt(1 + x^2)/(-1 + x^2), x, 4, asinh(x) - sqrt(2)*atanh((sqrt(2)*x)/sqrt(1 + x^2))]
@test_int [sqrt(1 - x^2)/(-1 + 2*x^2), x, 4, -(asin(x)/2) - (1/2)*atanh(x/sqrt(1 - x^2))]
#= ::Subsubsection::Closed:: =#
#=p<0=#
@test_int [1/(a + b*x^2)^(1/2)*(c + d*x^2)^3, x, 5, (d*(44*b^2*c^2 - 44*a*b*c*d + 15*a^2*d^2)*x*sqrt(a + b*x^2))/(48*b^3) + (5*d*(2*b*c - a*d)*x*sqrt(a + b*x^2)*(c + d*x^2))/(24*b^2) + (d*x*sqrt(a + b*x^2)*(c + d*x^2)^2)/(6*b) + ((2*b*c - a*d)*(8*b^2*c^2 - 8*a*b*c*d + 5*a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(16*b^(7/2))]
@test_int [1/(a + b*x^2)^(1/2)*(c + d*x^2)^2, x, 4, (3*d*(2*b*c - a*d)*x*sqrt(a + b*x^2))/(8*b^2) + (d*x*sqrt(a + b*x^2)*(c + d*x^2))/(4*b) + ((8*b^2*c^2 - 8*a*b*c*d + 3*a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(8*b^(5/2))]
@test_int [1/(a + b*x^2)^(1/2)*(c + d*x^2)^1, x, 3, (d*x*sqrt(a + b*x^2))/(2*b) + ((2*b*c - a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(2*b^(3/2))]
@test_int [1/(a + b*x^2)^(1/2)*(c + d*x^2)^0, x, 2, atanh((sqrt(b)*x)/sqrt(a + b*x^2))/sqrt(b)]
@test_int [1/(a + b*x^2)^(1/2)/(c + d*x^2)^1, x, 2, atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2)))/(sqrt(c)*sqrt(b*c - a*d))]
@test_int [1/(a + b*x^2)^(1/2)/(c + d*x^2)^2, x, 3, -((d*x*sqrt(a + b*x^2))/(2*c*(b*c - a*d)*(c + d*x^2))) + ((2*b*c - a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(2*c^(3/2)*(b*c - a*d)^(3/2))]
@test_int [1/(a + b*x^2)^(1/2)/(c + d*x^2)^3, x, 5, -((d*x*sqrt(a + b*x^2))/(4*c*(b*c - a*d)*(c + d*x^2)^2)) - (3*d*(2*b*c - a*d)*x*sqrt(a + b*x^2))/(8*c^2*(b*c - a*d)^2*(c + d*x^2)) + ((8*b^2*c^2 - 8*a*b*c*d + 3*a^2*d^2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(8*c^(5/2)*(b*c - a*d)^(5/2))]
@test_int [1/(a + b*x^2)^(3/2)*(c + d*x^2)^4, x, 6, -((d*(48*b^3*c^3 - 248*a*b^2*c^2*d + 290*a^2*b*c*d^2 - 105*a^3*d^3)*x*sqrt(a + b*x^2))/(48*a*b^4)) - (d*(24*b^2*c^2 - 64*a*b*c*d + 35*a^2*d^2)*x*sqrt(a + b*x^2)*(c + d*x^2))/(24*a*b^3) - (d*(6*b*c - 7*a*d)*x*sqrt(a + b*x^2)*(c + d*x^2)^2)/(6*a*b^2) + ((b*c - a*d)*x*(c + d*x^2)^3)/(a*b*sqrt(a + b*x^2)) + (d*(64*b^3*c^3 - 144*a*b^2*c^2*d + 120*a^2*b*c*d^2 - 35*a^3*d^3)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(16*b^(9/2))]
@test_int [1/(a + b*x^2)^(3/2)*(c + d*x^2)^3, x, 5, -((d*(2*b*c - 5*a*d)*(4*b*c - 3*a*d)*x*sqrt(a + b*x^2))/(8*a*b^3)) - (d*(4*b*c - 5*a*d)*x*sqrt(a + b*x^2)*(c + d*x^2))/(4*a*b^2) + ((b*c - a*d)*x*(c + d*x^2)^2)/(a*b*sqrt(a + b*x^2)) + (3*d*(8*b^2*c^2 - 12*a*b*c*d + 5*a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(8*b^(7/2))]
@test_int [1/(a + b*x^2)^(3/2)*(c + d*x^2)^2, x, 4, ((b*c - a*d)^2*x)/(a*b^2*sqrt(a + b*x^2)) + (d^2*x*sqrt(a + b*x^2))/(2*b^2) + (d*(4*b*c - 3*a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(2*b^(5/2)), -((d*(2*b*c - 3*a*d)*x*sqrt(a + b*x^2))/(2*a*b^2)) + ((b*c - a*d)*x*(c + d*x^2))/(a*b*sqrt(a + b*x^2)) + (d*(4*b*c - 3*a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(2*b^(5/2))]
@test_int [1/(a + b*x^2)^(3/2)*(c + d*x^2)^1, x, 3, ((b*c - a*d)*x)/(a*b*sqrt(a + b*x^2)) + (d*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/b^(3/2)]
@test_int [1/(a + b*x^2)^(3/2)*(c + d*x^2)^0, x, 1, x/(a*sqrt(a + b*x^2))]
@test_int [1/(a + b*x^2)^(3/2)/(c + d*x^2)^1, x, 3, (b*x)/(a*(b*c - a*d)*sqrt(a + b*x^2)) - (d*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(sqrt(c)*(b*c - a*d)^(3/2))]
@test_int [1/(a + b*x^2)^(3/2)/(c + d*x^2)^2, x, 5, (b*(2*b*c + a*d)*x)/(2*a*c*(b*c - a*d)^2*sqrt(a + b*x^2)) - (d*x)/(2*c*(b*c - a*d)*sqrt(a + b*x^2)*(c + d*x^2)) - (d*(4*b*c - a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(2*c^(3/2)*(b*c - a*d)^(5/2))]
@test_int [1/(a + b*x^2)^(3/2)/(c + d*x^2)^3, x, 6, -((d*x)/(4*c*(b*c - a*d)*sqrt(a + b*x^2)*(c + d*x^2)^2)) + (b*(4*b*c + a*d)*x)/(4*a*c*(b*c - a*d)^2*sqrt(a + b*x^2)*(c + d*x^2)) + (d*(4*b*c - a*d)*(2*b*c + 3*a*d)*x*sqrt(a + b*x^2))/(8*a*c^2*(b*c - a*d)^3*(c + d*x^2)) - (3*d*(8*b^2*c^2 - 4*a*b*c*d + a^2*d^2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(8*c^(5/2)*(b*c - a*d)^(7/2))]
@test_int [1/(a + b*x^2)^(5/2)*(c + d*x^2)^4, x, 6, -((d*(16*b^3*c^3 + 40*a*b^2*c^2*d - 170*a^2*b*c*d^2 + 105*a^3*d^3)*x*sqrt(a + b*x^2))/(24*a^2*b^4)) - (d*(8*b^2*c^2 + 24*a*b*c*d - 35*a^2*d^2)*x*sqrt(a + b*x^2)*(c + d*x^2))/(12*a^2*b^3) + ((b*c - a*d)*(2*b*c + 7*a*d)*x*(c + d*x^2)^2)/(3*a^2*b^2*sqrt(a + b*x^2)) + ((b*c - a*d)*x*(c + d*x^2)^3)/(3*a*b*(a + b*x^2)^(3/2)) + (d^2*(48*b^2*c^2 - 80*a*b*c*d + 35*a^2*d^2)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(8*b^(9/2))]
@test_int [1/(a + b*x^2)^(5/2)*(c + d*x^2)^3, x, 5, -((d*(4*b^2*c^2 + 8*a*b*c*d - 15*a^2*d^2)*x*sqrt(a + b*x^2))/(6*a^2*b^3)) + ((b*c - a*d)*(2*b*c + 5*a*d)*x*(c + d*x^2))/(3*a^2*b^2*sqrt(a + b*x^2)) + ((b*c - a*d)*x*(c + d*x^2)^2)/(3*a*b*(a + b*x^2)^(3/2)) + (d^2*(6*b*c - 5*a*d)*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/(2*b^(7/2))]
@test_int [1/(a + b*x^2)^(5/2)*(c + d*x^2)^2, x, 4, ((b*c - a*d)*(2*b*c + 3*a*d)*x)/(3*a^2*b^2*sqrt(a + b*x^2)) + ((b*c - a*d)*x*(c + d*x^2))/(3*a*b*(a + b*x^2)^(3/2)) + (d^2*atanh((sqrt(b)*x)/sqrt(a + b*x^2)))/b^(5/2)]
@test_int [1/(a + b*x^2)^(5/2)*(c + d*x^2)^1, x, 2, (2*c*x)/(3*a^2*sqrt(a + b*x^2)) + (x*(c + d*x^2))/(3*a*(a + b*x^2)^(3/2))]
@test_int [1/(a + b*x^2)^(5/2)*(c + d*x^2)^0, x, 2, x/(3*a*(a + b*x^2)^(3/2)) + (2*x)/(3*a^2*sqrt(a + b*x^2))]
@test_int [1/(a + b*x^2)^(5/2)/(c + d*x^2)^1, x, 5, (b*x)/(3*a*(b*c - a*d)*(a + b*x^2)^(3/2)) + (b*(2*b*c - 5*a*d)*x)/(3*a^2*(b*c - a*d)^2*sqrt(a + b*x^2)) + (d^2*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(sqrt(c)*(b*c - a*d)^(5/2))]
@test_int [1/(a + b*x^2)^(5/2)/(c + d*x^2)^2, x, 6, (b*(2*b*c + 3*a*d)*x)/(6*a*c*(b*c - a*d)^2*(a + b*x^2)^(3/2)) + (b*(4*b^2*c^2 - 16*a*b*c*d - 3*a^2*d^2)*x)/(6*a^2*c*(b*c - a*d)^3*sqrt(a + b*x^2)) - (d*x)/(2*c*(b*c - a*d)*(a + b*x^2)^(3/2)*(c + d*x^2)) + (d^2*(6*b*c - a*d)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(2*c^(3/2)*(b*c - a*d)^(7/2))]
@test_int [1/(a + b*x^2)^(5/2)/(c + d*x^2)^3, x, 7, -((d*x)/(4*c*(b*c - a*d)*(a + b*x^2)^(3/2)*(c + d*x^2)^2)) + (b*(4*b*c + 3*a*d)*x)/(12*a*c*(b*c - a*d)^2*(a + b*x^2)^(3/2)*(c + d*x^2)) + (b*(8*b^2*c^2 - 40*a*b*c*d - 3*a^2*d^2)*x)/(12*a^2*c*(b*c - a*d)^3*sqrt(a + b*x^2)*(c + d*x^2)) + (d*(16*b^3*c^3 - 88*a*b^2*c^2*d - 42*a^2*b*c*d^2 + 9*a^3*d^3)*x*sqrt(a + b*x^2))/(24*a^2*c^2*(b*c - a*d)^4*(c + d*x^2)) + (d^2*(48*b^2*c^2 - 16*a*b*c*d + 3*a^2*d^2)*atanh((sqrt(b*c - a*d)*x)/(sqrt(c)*sqrt(a + b*x^2))))/(8*c^(5/2)*(b*c - a*d)^(9/2))]
@test_int [(a + b*x^2)^3/(c + d*x^2)^(11/2), x, 5, -((d*x*(a + b*x^2)^4)/(9*c*(b*c - a*d)*(c + d*x^2)^(9/2))) + ((9*b*c - 8*a*d)*x*(a + b*x^2)^3)/(63*c^2*(b*c - a*d)*(c + d*x^2)^(7/2)) + (2*a*(9*b*c - 8*a*d)*x*(a + b*x^2)^2)/(105*c^3*(b*c - a*d)*(c + d*x^2)^(5/2)) + (8*a^2*(9*b*c - 8*a*d)*x*(a + b*x^2))/(315*c^4*(b*c - a*d)*(c + d*x^2)^(3/2)) + (16*a^3*(9*b*c - 8*a*d)*x)/(315*c^5*(b*c - a*d)*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^2/(c + d*x^2)^(9/2), x, 4, -((d*x*(a + b*x^2)^3)/(7*c*(b*c - a*d)*(c + d*x^2)^(7/2))) + ((7*b*c - 6*a*d)*x*(a + b*x^2)^2)/(35*c^2*(b*c - a*d)*(c + d*x^2)^(5/2)) + (4*a*(7*b*c - 6*a*d)*x*(a + b*x^2))/(105*c^3*(b*c - a*d)*(c + d*x^2)^(3/2)) + (8*a^2*(7*b*c - 6*a*d)*x)/(105*c^4*(b*c - a*d)*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^1/(c + d*x^2)^(7/2), x, 3, -(((b*c - a*d)*x)/(5*c*d*(c + d*x^2)^(5/2))) + ((b*c + 4*a*d)*x)/(15*c^2*d*(c + d*x^2)^(3/2)) + (2*(b*c + 4*a*d)*x)/(15*c^3*d*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^0/(c + d*x^2)^(5/2), x, 2, x/(3*c*(c + d*x^2)^(3/2)) + (2*x)/(3*c^2*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^1*(c + d*x^2)^(3/2)), x, 3, -((d*x)/(c*(b*c - a*d)*sqrt(c + d*x^2))) + (b*atan((sqrt(b*c - a*d)*x)/(sqrt(a)*sqrt(c + d*x^2))))/(sqrt(a)*(b*c - a*d)^(3/2))]
@test_int [1/((a + b*x^2)^2*(c + d*x^2)^(1/2)), x, 3, (b*x*sqrt(c + d*x^2))/(2*a*(b*c - a*d)*(a + b*x^2)) + ((b*c - 2*a*d)*atan((sqrt(b*c - a*d)*x)/(sqrt(a)*sqrt(c + d*x^2))))/(2*a^(3/2)*(b*c - a*d)^(3/2))]
@test_int [(c + d*x^2)^(1/2)/(a + b*x^2)^3, x, 4, ((3*b*c - 4*a*d)*x*sqrt(c + d*x^2))/(8*a^2*(b*c - a*d)*(a + b*x^2)) + (b*x*(c + d*x^2)^(3/2))/(4*a*(b*c - a*d)*(a + b*x^2)^2) + (c*(3*b*c - 4*a*d)*atan((sqrt(b*c - a*d)*x)/(sqrt(a)*sqrt(c + d*x^2))))/(8*a^(5/2)*(b*c - a*d)^(3/2))]
@test_int [(c + d*x^2)^(3/2)/(a + b*x^2)^4, x, 5, (c*(5*b*c - 6*a*d)*x*sqrt(c + d*x^2))/(16*a^3*(b*c - a*d)*(a + b*x^2)) + ((5*b*c - 6*a*d)*x*(c + d*x^2)^(3/2))/(24*a^2*(b*c - a*d)*(a + b*x^2)^2) + (b*x*(c + d*x^2)^(5/2))/(6*a*(b*c - a*d)*(a + b*x^2)^3) + (c^2*(5*b*c - 6*a*d)*atan((sqrt(b*c - a*d)*x)/(sqrt(a)*sqrt(c + d*x^2))))/(16*a^(7/2)*(b*c - a*d)^(3/2))]
@test_int [1/((b*c/d + b*x^2)*sqrt(c + d*x^2)), x, 2, (d*x)/(b*c*sqrt(c + d*x^2))]
@test_int [1/((1 + x^2)*sqrt(1 - x^2)), x, 2, atan((sqrt(2)*x)/sqrt(1 - x^2))/sqrt(2)]
@test_int [1/((a + b*x^2)*sqrt(c + d*x^2)), x, 2, atan((sqrt(b*c - a*d)*x)/(sqrt(a)*sqrt(c + d*x^2)))/(sqrt(a)*sqrt(b*c - a*d))]
@test_int [(-1 + x^2)/(1 + x^2)^(3/2), x, 2, (-2*x)/sqrt(1 + x^2) + asinh(x)]
#= ::Section::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/3)*(c+d*x^2)^q=#
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/3)*(c+d*x^2)^q*when*b*c+3*a*d=0=#
#= ::Subsubsection::Closed:: =#
#=p>0=#
@test_int [(a - b*x^2)^(2/3)*(3*a + b*x^2)^3, x, 8, (18144*a^3*x*(a - b*x^2)^(2/3))/1235 - (23544*a^2*x*(a - b*x^2)^(5/3))/6175 - (378/475)*a*x*(a - b*x^2)^(5/3)*(3*a + b*x^2) - (3/25)*x*(a - b*x^2)^(5/3)*(3*a + b*x^2)^2 - (72576*a^4*x)/(1235*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (36288*3^(1/4)*sqrt(2 + sqrt(3))*a^(13/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1235*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (24192*sqrt(2)*3^(3/4)*a^(13/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1235*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(2/3)*(3*a + b*x^2)^2, x, 7, (7776*a^2*x*(a - b*x^2)^(2/3))/1729 - (252/247)*a*x*(a - b*x^2)^(5/3) - (3/19)*x*(a - b*x^2)^(5/3)*(3*a + b*x^2) - (31104*a^3*x)/(1729*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (15552*3^(1/4)*sqrt(2 + sqrt(3))*a^(10/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1729*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (10368*sqrt(2)*3^(3/4)*a^(10/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1729*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(2/3)*(3*a + b*x^2)^1, x, 6, (18/13)*a*x*(a - b*x^2)^(2/3) - (3/13)*x*(a - b*x^2)^(5/3) - (72*a^2*x)/(13*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (36*3^(1/4)*sqrt(2 + sqrt(3))*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(13*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (24*sqrt(2)*3^(3/4)*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(13*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(2/3)/(3*a + b*x^2)^1, x, 6, (3*x)/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3)) + (2^(1/3)*a^(1/6)*atan((sqrt(3)*sqrt(a))/(sqrt(b)*x)))/(sqrt(3)*sqrt(b)) + (2^(1/3)*a^(1/6)*atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(sqrt(3)*sqrt(b)) - (2^(1/3)*a^(1/6)*atanh((sqrt(b)*x)/sqrt(a)))/(3*sqrt(b)) + (2^(1/3)*a^(1/6)*atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3)))))/sqrt(b) + (3*3^(1/4)*sqrt(2 + sqrt(3))*a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(2*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (sqrt(2)*3^(3/4)*a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(2/3)/(3*a + b*x^2)^2, x, 6, (x*(a - b*x^2)^(2/3))/(6*a*(3*a + b*x^2)) - x/(6*a*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(4*3^(3/4)*a^(2/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + ((a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(3*sqrt(2)*3^(1/4)*a^(2/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(2/3)/(3*a + b*x^2)^3, x, 8, (x*(a - b*x^2)^(2/3))/(12*a*(3*a + b*x^2)^2) + (x*(a - b*x^2)^(2/3))/(36*a^2*(3*a + b*x^2)) - x/(36*a^2*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(72*2^(2/3)*sqrt(3)*a^(11/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(72*2^(2/3)*sqrt(3)*a^(11/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(216*2^(2/3)*a^(11/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(72*2^(2/3)*a^(11/6)*sqrt(b)) - (sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(24*3^(3/4)*a^(5/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + ((a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(18*sqrt(2)*3^(1/4)*a^(5/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(2/3)/(3*a + b*x^2)^4, x, 9, (x*(a - b*x^2)^(2/3))/(18*a*(3*a + b*x^2)^3) + (x*(a - b*x^2)^(2/3))/(54*a^2*(3*a + b*x^2)^2) + (x*(a - b*x^2)^(2/3))/(144*a^3*(3*a + b*x^2)) - x/(144*a^3*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (7*atan((sqrt(3)*sqrt(a))/(sqrt(b)*x)))/(1296*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) + (7*atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(1296*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) - (7*atanh((sqrt(b)*x)/sqrt(a)))/(3888*2^(2/3)*a^(17/6)*sqrt(b)) + (7*atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3)))))/(1296*2^(2/3)*a^(17/6)*sqrt(b)) - (sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(96*3^(3/4)*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + ((a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(72*sqrt(2)*3^(1/4)*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(5/3)*(3*a + b*x^2)^3, x, 9, (2809728*a^4*x*(a - b*x^2)^(2/3))/267995 + (1404864*a^3*x*(a - b*x^2)^(5/3))/191425 - (33264*a^2*x*(a - b*x^2)^(8/3))/14725 - (432/775)*a*x*(a - b*x^2)^(8/3)*(3*a + b*x^2) - (3/31)*x*(a - b*x^2)^(8/3)*(3*a + b*x^2)^2 - (11238912*a^5*x)/(267995*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (5619456*3^(1/4)*sqrt(2 + sqrt(3))*a^(16/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(267995*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (3746304*sqrt(2)*3^(3/4)*a^(16/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(267995*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(5/3)*(3*a + b*x^2)^2, x, 8, (28512*a^3*x*(a - b*x^2)^(2/3))/8645 + (14256*a^2*x*(a - b*x^2)^(5/3))/6175 - (306/475)*a*x*(a - b*x^2)^(8/3) - (3/25)*x*(a - b*x^2)^(8/3)*(3*a + b*x^2) - (114048*a^4*x)/(8645*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (57024*3^(1/4)*sqrt(2 + sqrt(3))*a^(13/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(8645*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (38016*sqrt(2)*3^(3/4)*a^(13/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(8645*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(5/3)*(3*a + b*x^2)^1, x, 7, (1800*a^2*x*(a - b*x^2)^(2/3))/1729 + (180/247)*a*x*(a - b*x^2)^(5/3) - (3/19)*x*(a - b*x^2)^(8/3) - (7200*a^3*x)/(1729*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (3600*3^(1/4)*sqrt(2 + sqrt(3))*a^(10/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1729*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (2400*sqrt(2)*3^(3/4)*a^(10/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1729*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(5/3)/(3*a + b*x^2)^1, x, 7, (-(3/7))*x*(a - b*x^2)^(2/3) + (96*a*x)/(7*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (4*2^(1/3)*a^(7/6)*atan((sqrt(3)*sqrt(a))/(sqrt(b)*x)))/(sqrt(3)*sqrt(b)) + (4*2^(1/3)*a^(7/6)*atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(sqrt(3)*sqrt(b)) - (4*2^(1/3)*a^(7/6)*atanh((sqrt(b)*x)/sqrt(a)))/(3*sqrt(b)) + (4*2^(1/3)*a^(7/6)*atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3)))))/sqrt(b) + (48*3^(1/4)*sqrt(2 + sqrt(3))*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (32*sqrt(2)*3^(3/4)*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(5/3)/(3*a + b*x^2)^2, x, 7, (2*x*(a - b*x^2)^(2/3))/(3*(3*a + b*x^2)) - (11*x)/(3*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (2^(1/3)*a^(1/6)*atan((sqrt(3)*sqrt(a))/(sqrt(b)*x)))/(sqrt(3)*sqrt(b)) - (2^(1/3)*a^(1/6)*atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(sqrt(3)*sqrt(b)) + (2^(1/3)*a^(1/6)*atanh((sqrt(b)*x)/sqrt(a)))/(3*sqrt(b)) - (2^(1/3)*a^(1/6)*atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3)))))/sqrt(b) - (11*sqrt(2 + sqrt(3))*a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(2*3^(3/4)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (11*sqrt(2)*a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(3*3^(1/4)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(a - b*x^2)^(5/3)/(3*a + b*x^2)^3, x, 9, (x*(a - b*x^2)^(2/3))/(3*(3*a + b*x^2)^2) - (x*(a - b*x^2)^(2/3))/(18*a*(3*a + b*x^2)) + x/(18*a*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(18*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(18*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(54*2^(2/3)*a^(5/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(18*2^(2/3)*a^(5/6)*sqrt(b)) + (sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(12*3^(3/4)*a^(2/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - ((a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(9*sqrt(2)*3^(1/4)*a^(2/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
#= ::Subsubsection::Closed:: =#
#=p<0=#
@test_int [(3*a + b*x^2)^4/(a - b*x^2)^(1/3), x, 8, -((1552608*a^3*x*(a - b*x^2)^(2/3))/43225) - (36288*a^2*x*(a - b*x^2)^(2/3)*(3*a + b*x^2))/6175 - (18/19)*a*x*(a - b*x^2)^(2/3)*(3*a + b*x^2)^2 - (3/25)*x*(a - b*x^2)^(2/3)*(3*a + b*x^2)^3 - (3794688*a^4*x)/(8645*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (1897344*3^(1/4)*sqrt(2 + sqrt(3))*a^(13/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(8645*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (1264896*sqrt(2)*3^(3/4)*a^(13/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(8645*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^3/(a - b*x^2)^(1/3), x, 7, -((15768*a^2*x*(a - b*x^2)^(2/3))/1729) - (324/247)*a*x*(a - b*x^2)^(2/3)*(3*a + b*x^2) - (3/19)*x*(a - b*x^2)^(2/3)*(3*a + b*x^2)^2 - (215136*a^3*x)/(1729*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (107568*3^(1/4)*sqrt(2 + sqrt(3))*a^(10/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1729*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (71712*sqrt(2)*3^(3/4)*a^(10/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(1729*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^2/(a - b*x^2)^(1/3), x, 6, (-(198/91))*a*x*(a - b*x^2)^(2/3) - (3/13)*x*(a - b*x^2)^(2/3)*(3*a + b*x^2) - (3240*a^2*x)/(91*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (1620*3^(1/4)*sqrt(2 + sqrt(3))*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(91*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (1080*sqrt(2)*3^(3/4)*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(91*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^1/(a - b*x^2)^(1/3), x, 5, (-(3/7))*x*(a - b*x^2)^(2/3) - (72*a*x)/(7*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (36*3^(1/4)*sqrt(2 + sqrt(3))*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (24*sqrt(2)*3^(3/4)*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((3*a + b*x^2)^1*(a - b*x^2)^(1/3)), x, 1, atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(6*2^(2/3)*a^(5/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(2*2^(2/3)*a^(5/6)*sqrt(b))]
@test_int [1/((3*a + b*x^2)^2*(a - b*x^2)^(1/3)), x, 7, (x*(a - b*x^2)^(2/3))/(24*a^2*(3*a + b*x^2)) - x/(24*a^2*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(8*2^(2/3)*sqrt(3)*a^(11/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(8*2^(2/3)*sqrt(3)*a^(11/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(24*2^(2/3)*a^(11/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(8*2^(2/3)*a^(11/6)*sqrt(b)) - (sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(16*3^(3/4)*a^(5/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + ((a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(12*sqrt(2)*3^(1/4)*a^(5/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((3*a + b*x^2)^3*(a - b*x^2)^(1/3)), x, 8, (x*(a - b*x^2)^(2/3))/(48*a^2*(3*a + b*x^2)^2) + (5*x*(a - b*x^2)^(2/3))/(288*a^3*(3*a + b*x^2)) - (5*x)/(288*a^3*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (5*atan((sqrt(3)*sqrt(a))/(sqrt(b)*x)))/(144*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) + (5*atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(144*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) - (5*atanh((sqrt(b)*x)/sqrt(a)))/(432*2^(2/3)*a^(17/6)*sqrt(b)) + (5*atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3)))))/(144*2^(2/3)*a^(17/6)*sqrt(b)) - (5*sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(192*3^(3/4)*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (5*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(144*sqrt(2)*3^(1/4)*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^3/(a - b*x^2)^(4/3), x, 7, (2538/91)*a*x*(a - b*x^2)^(2/3) + (81/13)*x*(a - b*x^2)^(2/3)*(3*a + b*x^2) + (6*x*(3*a + b*x^2)^2)/(a - b*x^2)^(1/3) + (20088*a^2*x)/(91*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (10044*3^(1/4)*sqrt(2 + sqrt(3))*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(91*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (6696*sqrt(2)*3^(3/4)*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(91*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^2/(a - b*x^2)^(4/3), x, 6, (45/7)*x*(a - b*x^2)^(2/3) + (6*x*(3*a + b*x^2))/(a - b*x^2)^(1/3) + (324*a*x)/(7*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (162*3^(1/4)*sqrt(2 + sqrt(3))*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (108*sqrt(2)*3^(3/4)*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^1/(a - b*x^2)^(4/3), x, 5, (6*x)/(a - b*x^2)^(1/3) + (9*x)/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3)) + (9*3^(1/4)*sqrt(2 + sqrt(3))*a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(2*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (3*sqrt(2)*3^(3/4)*a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((3*a + b*x^2)^1*(a - b*x^2)^(4/3)), x, 7, (3*x)/(8*a^2*(a - b*x^2)^(1/3)) + (3*x)/(8*a^2*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(8*2^(2/3)*sqrt(3)*a^(11/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(8*2^(2/3)*sqrt(3)*a^(11/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(24*2^(2/3)*a^(11/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(8*2^(2/3)*a^(11/6)*sqrt(b)) + (3*3^(1/4)*sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(16*a^(5/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (3^(3/4)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(4*sqrt(2)*a^(5/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((3*a + b*x^2)^2*(a - b*x^2)^(4/3)), x, 8, x/(12*a^3*(a - b*x^2)^(1/3)) + x/(24*a^2*(a - b*x^2)^(1/3)*(3*a + b*x^2)) + x/(12*a^3*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(16*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(16*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(48*2^(2/3)*a^(17/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(16*2^(2/3)*a^(17/6)*sqrt(b)) + (sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(8*3^(3/4)*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - ((a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(6*sqrt(2)*3^(1/4)*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((3*a + b*x^2)^3*(a - b*x^2)^(4/3)), x, 9, x/(48*a^2*(a - b*x^2)^(1/3)*(3*a + b*x^2)^2) + (17*x)/(192*a^3*(a - b*x^2)^(1/3)*(3*a + b*x^2)) - (19*x*(a - b*x^2)^(2/3))/(1152*a^4*(3*a + b*x^2)) + (19*x)/(1152*a^4*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (7*atan((sqrt(3)*sqrt(a))/(sqrt(b)*x)))/(288*2^(2/3)*sqrt(3)*a^(23/6)*sqrt(b)) + (7*atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(288*2^(2/3)*sqrt(3)*a^(23/6)*sqrt(b)) - (7*atanh((sqrt(b)*x)/sqrt(a)))/(864*2^(2/3)*a^(23/6)*sqrt(b)) + (7*atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3)))))/(288*2^(2/3)*a^(23/6)*sqrt(b)) + (19*sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(768*3^(3/4)*a^(11/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (19*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(576*sqrt(2)*3^(1/4)*a^(11/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^4/(a - b*x^2)^(7/3), x, 8, (-(3240/91))*a*x*(a - b*x^2)^(2/3) - (81/13)*x*(a - b*x^2)^(2/3)*(3*a + b*x^2) - (9*x*(3*a + b*x^2)^2)/(2*(a - b*x^2)^(1/3)) + (3*x*(3*a + b*x^2)^3)/(2*(a - b*x^2)^(4/3)) - (36936*a^2*x)/(91*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (18468*3^(1/4)*sqrt(2 + sqrt(3))*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(91*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (12312*sqrt(2)*3^(3/4)*a^(7/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(91*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^3/(a - b*x^2)^(7/3), x, 7, (-(27/14))*x*(a - b*x^2)^(2/3) + (3*x*(3*a + b*x^2)^2)/(2*(a - b*x^2)^(4/3)) - (324*a*x)/(7*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) - (162*3^(1/4)*sqrt(2 + sqrt(3))*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) + (108*sqrt(2)*3^(3/4)*a^(4/3)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(7*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [(3*a + b*x^2)^2/(a - b*x^2)^(7/3), x, 2, (9*x)/(2*(a - b*x^2)^(1/3)) + (3*x*(3*a + b*x^2))/(2*(a - b*x^2)^(4/3))]
@test_int [(3*a + b*x^2)^1/(a - b*x^2)^(7/3), x, 6, (3*x)/(2*(a - b*x^2)^(4/3)) + (9*x)/(4*a*(a - b*x^2)^(1/3)) + (9*x)/(4*a*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (9*3^(1/4)*sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(8*a^(2/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (3*3^(3/4)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(2*sqrt(2)*a^(2/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((3*a + b*x^2)^1*(a - b*x^2)^(7/3)), x, 8, (3*x)/(32*a^2*(a - b*x^2)^(4/3)) + (21*x)/(64*a^3*(a - b*x^2)^(1/3)) + (21*x)/(64*a^3*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(32*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(32*2^(2/3)*sqrt(3)*a^(17/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(96*2^(2/3)*a^(17/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(32*2^(2/3)*a^(17/6)*sqrt(b)) + (21*3^(1/4)*sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(128*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (7*3^(3/4)*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(32*sqrt(2)*a^(8/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((3*a + b*x^2)^2*(a - b*x^2)^(7/3)), x, 9, (5*x)/(384*a^3*(a - b*x^2)^(4/3)) + (79*x)/(768*a^4*(a - b*x^2)^(1/3)) + x/(24*a^2*(a - b*x^2)^(4/3)*(3*a + b*x^2)) + (79*x)/(768*a^4*((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))) + (sqrt(3)*atan((sqrt(3)*sqrt(a))/(sqrt(b)*x)))/(128*2^(2/3)*a^(23/6)*sqrt(b)) + (sqrt(3)*atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(128*2^(2/3)*a^(23/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(128*2^(2/3)*a^(23/6)*sqrt(b)) + (3*atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3)))))/(128*2^(2/3)*a^(23/6)*sqrt(b)) + (79*sqrt(2 + sqrt(3))*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.E(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(512*3^(3/4)*a^(11/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2))) - (79*(a^(1/3) - (a - b*x^2)^(1/3))*sqrt((a^(2/3) + a^(1/3)*(a - b*x^2)^(1/3) + (a - b*x^2)^(2/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)*Elliptic.F(asin(((1 + sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))), -7 + 4*sqrt(3)))/(384*sqrt(2)*3^(1/4)*a^(11/3)*b*x*sqrt(-((a^(1/3)*(a^(1/3) - (a - b*x^2)^(1/3)))/((1 - sqrt(3))*a^(1/3) - (a - b*x^2)^(1/3))^2)))]
@test_int [1/((-a + b*x^2)^(1/3)*(-3*a - b*x^2)), x, 1, -(atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*(-a)^(1/3)*sqrt(a)*sqrt(b))) - atan((sqrt(3)*sqrt(a)*((-a)^(1/3) - 2^(1/3)*(-a + b*x^2)^(1/3)))/((-a)^(1/3)*sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*(-a)^(1/3)*sqrt(a)*sqrt(b)) + atanh((sqrt(b)*x)/sqrt(a))/(6*2^(2/3)*(-a)^(1/3)*sqrt(a)*sqrt(b)) - atanh(((-a)^(1/3)*sqrt(b)*x)/(sqrt(a)*((-a)^(1/3) + 2^(1/3)*(-a + b*x^2)^(1/3))))/(2*2^(2/3)*(-a)^(1/3)*sqrt(a)*sqrt(b))]
@test_int [1/((a + b*x^2)^(1/3)*(3*a - b*x^2)), x, 1, -(atan((sqrt(b)*x)/sqrt(a))/(6*2^(2/3)*a^(5/6)*sqrt(b))) + atan((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a + b*x^2)^(1/3))))/(2*2^(2/3)*a^(5/6)*sqrt(b)) - atanh((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b)) - atanh((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a + b*x^2)^(1/3)))/(sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b))]
@test_int [1/((c + 3*d*x^2)^(1/3)*(c - d*x^2)), x, 1, -(atan((sqrt(3)*sqrt(d)*x)/sqrt(c))/(2*2^(2/3)*sqrt(3)*c^(5/6)*sqrt(d))) + (sqrt(3)*atan((sqrt(3)*sqrt(d)*x)/(c^(1/6)*(c^(1/3) + 2^(1/3)*(c + 3*d*x^2)^(1/3)))))/(2*2^(2/3)*c^(5/6)*sqrt(d)) - atanh(sqrt(c)/(sqrt(d)*x))/(2*2^(2/3)*c^(5/6)*sqrt(d)) - atanh((c^(1/6)*(c^(1/3) - 2^(1/3)*(c + 3*d*x^2)^(1/3)))/(sqrt(d)*x))/(2*2^(2/3)*c^(5/6)*sqrt(d))]
@test_int [1/((a - b*x^2)^(1/3)*(3*a + b*x^2)), x, 1, atan((sqrt(3)*sqrt(a))/(sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b)) + atan((sqrt(3)*a^(1/6)*(a^(1/3) - 2^(1/3)*(a - b*x^2)^(1/3)))/(sqrt(b)*x))/(2*2^(2/3)*sqrt(3)*a^(5/6)*sqrt(b)) - atanh((sqrt(b)*x)/sqrt(a))/(6*2^(2/3)*a^(5/6)*sqrt(b)) + atanh((sqrt(b)*x)/(a^(1/6)*(a^(1/3) + 2^(1/3)*(a - b*x^2)^(1/3))))/(2*2^(2/3)*a^(5/6)*sqrt(b))]
@test_int [1/((c - 3*d*x^2)^(1/3)*(c + d*x^2)), x, 1, atan(sqrt(c)/(sqrt(d)*x))/(2*2^(2/3)*c^(5/6)*sqrt(d)) + atan((c^(1/6)*(c^(1/3) - 2^(1/3)*(c - 3*d*x^2)^(1/3)))/(sqrt(d)*x))/(2*2^(2/3)*c^(5/6)*sqrt(d)) - atanh((sqrt(3)*sqrt(d)*x)/sqrt(c))/(2*2^(2/3)*sqrt(3)*c^(5/6)*sqrt(d)) + (sqrt(3)*atanh((sqrt(3)*sqrt(d)*x)/(c^(1/6)*(c^(1/3) + 2^(1/3)*(c - 3*d*x^2)^(1/3)))))/(2*2^(2/3)*c^(5/6)*sqrt(d))]
@test_int [1/((1 - x^2)^(1/3)*(3 + x^2)), x, 1, atan(sqrt(3)/x)/(2*2^(2/3)*sqrt(3)) + atan((sqrt(3)*(1 - 2^(1/3)*(1 - x^2)^(1/3)))/x)/(2*2^(2/3)*sqrt(3)) - atanh(x)/(6*2^(2/3)) + atanh(x/(1 + 2^(1/3)*(1 - x^2)^(1/3)))/(2*2^(2/3))]
@test_int [1/((3 - x^2)*(1 + x^2)^(1/3)), x, 1, -(atan(x)/(6*2^(2/3))) + atan(x/(1 + 2^(1/3)*(1 + x^2)^(1/3)))/(2*2^(2/3)) - atanh(sqrt(3)/x)/(2*2^(2/3)*sqrt(3)) - atanh((sqrt(3)*(1 - 2^(1/3)*(1 + x^2)^(1/3)))/x)/(2*2^(2/3)*sqrt(3))]
@test_int [(3 - x)/((1 - x^2)^(1/3)*(3 + x^2)), x, 1, -((sqrt(3)*atan(1/sqrt(3) - (2^(2/3)*(1 + x)^(2/3))/(sqrt(3)*(1 - x)^(1/3))))/2^(2/3)) - log(3 + x^2)/(2*2^(2/3)) + (3*log(2^(1/3)*(1 - x)^(1/3) + (1 + x)^(2/3)))/(2*2^(2/3))]
@test_int [(3 + x)/((1 - x^2)^(1/3)*(3 + x^2)), x, 1, (sqrt(3)*atan(1/sqrt(3) - (2^(2/3)*(1 - x)^(2/3))/(sqrt(3)*(1 + x)^(1/3))))/2^(2/3) + log(3 + x^2)/(2*2^(2/3)) - (3*log((1 - x)^(2/3) + 2^(1/3)*(1 + x)^(1/3)))/(2*2^(2/3))]
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/3)*(c+d*x^2)^q*when*b*c-9*a*d=0=#
@test_int [1/((a + b*x^2)^(1/3)*(9*a*d/b + d*x^2)), x, 1, (sqrt(b)*atan((sqrt(b)*x)/(3*sqrt(a))))/(12*a^(5/6)*d) + (sqrt(b)*atan((a^(1/3) - (a + b*x^2)^(1/3))^2/(3*a^(1/6)*sqrt(b)*x)))/(12*a^(5/6)*d) - (sqrt(b)*atanh((sqrt(3)*a^(1/6)*(a^(1/3) - (a + b*x^2)^(1/3)))/(sqrt(b)*x)))/(4*sqrt(3)*a^(5/6)*d)]
@test_int [1/((a - b*x^2)^(1/3)*(-9*a*d/b + d*x^2)), x, 1, -((sqrt(b)*atan((sqrt(3)*a^(1/6)*(a^(1/3) - (a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(4*sqrt(3)*a^(5/6)*d)) - (sqrt(b)*atanh((sqrt(b)*x)/(3*sqrt(a))))/(12*a^(5/6)*d) + (sqrt(b)*atanh((a^(1/3) - (a - b*x^2)^(1/3))^2/(3*a^(1/6)*sqrt(b)*x)))/(12*a^(5/6)*d)]
@test_int [1/((-a + b*x^2)^(1/3)*(-9*a*d/b + d*x^2)), x, 1, (sqrt(b)*atan((sqrt(3)*a^(1/6)*(a^(1/3) + (-a + b*x^2)^(1/3)))/(sqrt(b)*x)))/(4*sqrt(3)*a^(5/6)*d) + (sqrt(b)*atanh((sqrt(b)*x)/(3*sqrt(a))))/(12*a^(5/6)*d) - (sqrt(b)*atanh((a^(1/3) + (-a + b*x^2)^(1/3))^2/(3*a^(1/6)*sqrt(b)*x)))/(12*a^(5/6)*d)]
@test_int [1/((-a - b*x^2)^(1/3)*(9*a*d/b + d*x^2)), x, 1, -((sqrt(b)*atan((sqrt(b)*x)/(3*sqrt(a))))/(12*a^(5/6)*d)) - (sqrt(b)*atan((a^(1/3) + (-a - b*x^2)^(1/3))^2/(3*a^(1/6)*sqrt(b)*x)))/(12*a^(5/6)*d) + (sqrt(b)*atanh((sqrt(3)*a^(1/6)*(a^(1/3) + (-a - b*x^2)^(1/3)))/(sqrt(b)*x)))/(4*sqrt(3)*a^(5/6)*d)]
@test_int [1/((2 + b*x^2)^(1/3)*((18*d)/b + d*x^2)), x, 1, (sqrt(b)*atan((sqrt(b)*x)/(3*sqrt(2))))/(12*2^(5/6)*d) + (sqrt(b)*atan((2^(1/3) - (2 + b*x^2)^(1/3))^2/(3*2^(1/6)*sqrt(b)*x)))/(12*2^(5/6)*d) - (sqrt(b)*atanh((2^(1/6)*sqrt(3)*(2^(1/3) - (2 + b*x^2)^(1/3)))/(sqrt(b)*x)))/(4*2^(5/6)*sqrt(3)*d)]
@test_int [1/((-2 + b*x^2)^(1/3)*(-((18*d)/b) + d*x^2)), x, 1, (sqrt(b)*atan((2^(1/6)*sqrt(3)*(2^(1/3) + (-2 + b*x^2)^(1/3)))/(sqrt(b)*x)))/(4*2^(5/6)*sqrt(3)*d) + (sqrt(b)*atanh((sqrt(b)*x)/(3*sqrt(2))))/(12*2^(5/6)*d) - (sqrt(b)*atanh((2^(1/3) + (-2 + b*x^2)^(1/3))^2/(3*2^(1/6)*sqrt(b)*x)))/(12*2^(5/6)*d)]
@test_int [1/((2 + 3*x^2)^(1/3)*((18*d)/3 + d*x^2)), x, 1, atan(x/sqrt(6))/(4*2^(5/6)*sqrt(3)*d) + atan((2^(1/3) - (2 + 3*x^2)^(1/3))^2/(3*2^(1/6)*sqrt(3)*x))/(4*2^(5/6)*sqrt(3)*d) - atanh((2^(1/6)*(2^(1/3) - (2 + 3*x^2)^(1/3)))/x)/(4*2^(5/6)*d)]
@test_int [1/((2 - 3*x^2)^(1/3)*(-((18*d)/3) + d*x^2)), x, 1, -(atan((2^(1/6)*(2^(1/3) - (2 - 3*x^2)^(1/3)))/x)/(4*2^(5/6)*d)) - atanh(x/sqrt(6))/(4*2^(5/6)*sqrt(3)*d) + atanh((2^(1/3) - (2 - 3*x^2)^(1/3))^2/(3*2^(1/6)*sqrt(3)*x))/(4*2^(5/6)*sqrt(3)*d)]
@test_int [1/((-2 + 3*x^2)^(1/3)*(-((18*d)/3) + d*x^2)), x, 1, atan((2^(1/6)*(2^(1/3) + (-2 + 3*x^2)^(1/3)))/x)/(4*2^(5/6)*d) + atanh(x/sqrt(6))/(4*2^(5/6)*sqrt(3)*d) - atanh((2^(1/3) + (-2 + 3*x^2)^(1/3))^2/(3*2^(1/6)*sqrt(3)*x))/(4*2^(5/6)*sqrt(3)*d)]
@test_int [1/((-2 - 3*x^2)^(1/3)*((18*d)/3 + d*x^2)), x, 1, -(atan(x/sqrt(6))/(4*2^(5/6)*sqrt(3)*d)) - atan((2^(1/3) + (-2 - 3*x^2)^(1/3))^2/(3*2^(1/6)*sqrt(3)*x))/(4*2^(5/6)*sqrt(3)*d) + atanh((2^(1/6)*(2^(1/3) + (-2 - 3*x^2)^(1/3)))/x)/(4*2^(5/6)*d)]
@test_int [1/((1 + x^2)^(1/3)*(9 + x^2)), x, 1, (1/12)*atan(x/3) + (1/12)*atan((1 - (1 + x^2)^(1/3))^2/(3*x)) - atanh((sqrt(3)*(1 - (1 + x^2)^(1/3)))/x)/(4*sqrt(3))]
@test_int [1/((1 + b*x^2)^(1/3)*(9 + b*x^2)), x, 1, atan((sqrt(b)*x)/3)/(12*sqrt(b)) + atan((1 - (1 + b*x^2)^(1/3))^2/(3*sqrt(b)*x))/(12*sqrt(b)) - atanh((sqrt(3)*(1 - (1 + b*x^2)^(1/3)))/(sqrt(b)*x))/(4*sqrt(3)*sqrt(b))]
@test_int [1/((1 - x^2)^(1/3)*(9 - x^2)), x, 1, atan((sqrt(3)*(1 - (1 - x^2)^(1/3)))/x)/(4*sqrt(3)) + (1/12)*atanh(x/3) - (1/12)*atanh((1 - (1 - x^2)^(1/3))^2/(3*x))]
#= ::Section::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/2)*(c+d*x^2)^(q/2)=#
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/2)*(c+d*x^2)^(q/2)*when*b*c-a*d=0=#
@test_int [sqrt(-1 + c^2*x^2)/(d - c^2*d*x^2)^(5/2), x, 3, (x*sqrt(-1 + c^2*x^2))/(2*d*(d - c^2*d*x^2)^(3/2)) + (sqrt(-1 + c^2*x^2)*atanh(c*x))/(2*c*d^2*sqrt(d - c^2*d*x^2)), (x*sqrt(-1 + c^2*x^2))/(2*d^2*(1 - c^2*x^2)*sqrt(d - c^2*d*x^2)) + (sqrt(-1 + c^2*x^2)*atanh(c*x))/(2*c*d^2*sqrt(d - c^2*d*x^2))]
@test_int [1/((-1 + c^2*x^2)^(3/2)*sqrt(d - c^2*d*x^2)), x, 3, (d*x*sqrt(-1 + c^2*x^2))/(2*(d - c^2*d*x^2)^(3/2)) + (sqrt(-1 + c^2*x^2)*atanh(c*x))/(2*c*sqrt(d - c^2*d*x^2)), (x*(d - c^2*d*x^2)^(3/2))/(2*d^2*(1 - c^2*x^2)*(-1 + c^2*x^2)^(3/2)) + ((d - c^2*d*x^2)^(3/2)*atanh(c*x))/(2*c*d^2*(-1 + c^2*x^2)^(3/2))]
@test_int [1/((-1 + c^2*x^2)^(1/2)*(d - c^2*d*x^2)^(3/2)), x, 3, -((x*sqrt(-1 + c^2*x^2))/(2*(d - c^2*d*x^2)^(3/2))) - (sqrt(-1 + c^2*x^2)*atanh(c*x))/(2*c*d*sqrt(d - c^2*d*x^2)), (x*sqrt(d - c^2*d*x^2))/(2*d^2*(1 - c^2*x^2)*sqrt(-1 + c^2*x^2)) + (sqrt(d - c^2*d*x^2)*atanh(c*x))/(2*c*d^2*sqrt(-1 + c^2*x^2))]
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/2)*(c+d*x^2)^(q/2)=#
#= ::Subsubsection::Closed:: =#
#=q>0=#
@test_int [(a + b*x^2)^(3/2)*sqrt(c + d*x^2), x, 6, ((7*a*c - (2*b*c^2)/d + (3*a^2*d)/b)*x*sqrt(a + b*x^2))/(15*sqrt(c + d*x^2)) - (2*(b*c - 3*a*d)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(15*d) + (b*x*sqrt(a + b*x^2)*(c + d*x^2)^(3/2))/(5*d) + (sqrt(c)*(2*b^2*c^2 - 7*a*b*c*d - 3*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*b*d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (c^(3/2)*(b*c - 9*a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(1/2)*sqrt(c + d*x^2), x, 5, ((b*c + a*d)*x*sqrt(a + b*x^2))/(3*b*sqrt(c + d*x^2)) + (1/3)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2) - (sqrt(c)*(b*c + a*d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*b*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (2*c^(3/2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(c + d*x^2)/(a + b*x^2)^(1/2), x, 4, (d*x*sqrt(a + b*x^2))/(b*sqrt(c + d*x^2)) - (sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(b*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (c^(3/2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(c + d*x^2)/(a + b*x^2)^(3/2), x, 1, (sqrt(c + d*x^2)*Elliptic.E(atan((sqrt(b)*x)/sqrt(a)), 1 - (a*d)/(b*c)))/(sqrt(a)*sqrt(b)*sqrt(a + b*x^2)*sqrt((a*(c + d*x^2))/(c*(a + b*x^2))))]
@test_int [sqrt(c + d*x^2)/(a + b*x^2)^(5/2), x, 4, (x*sqrt(c + d*x^2))/(3*a*(a + b*x^2)^(3/2)) + ((2*b*c - a*d)*sqrt(c + d*x^2)*Elliptic.E(atan((sqrt(b)*x)/sqrt(a)), 1 - (a*d)/(b*c)))/(3*a^(3/2)*sqrt(b)*(b*c - a*d)*sqrt(a + b*x^2)*sqrt((a*(c + d*x^2))/(c*(a + b*x^2)))) - (c^(3/2)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*a^2*(b*c - a*d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(c + d*x^2)/(a + b*x^2)^(7/2), x, 5, (x*sqrt(c + d*x^2))/(5*a*(a + b*x^2)^(5/2)) + ((4*b*c - 3*a*d)*x*sqrt(c + d*x^2))/(15*a^2*(b*c - a*d)*(a + b*x^2)^(3/2)) + ((8*b^2*c^2 - 13*a*b*c*d + 3*a^2*d^2)*sqrt(c + d*x^2)*Elliptic.E(atan((sqrt(b)*x)/sqrt(a)), 1 - (a*d)/(b*c)))/(15*a^(5/2)*sqrt(b)*(b*c - a*d)^2*sqrt(a + b*x^2)*sqrt((a*(c + d*x^2))/(c*(a + b*x^2)))) - (2*c^(3/2)*sqrt(d)*(2*b*c - 3*a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*a^3*(b*c - a*d)^2*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(3/2)*(c + d*x^2)^(3/2), x, 7, -((2*(b*c + a*d)*(b^2*c^2 - 6*a*b*c*d + a^2*d^2)*x*sqrt(a + b*x^2))/(35*b^2*d*sqrt(c + d*x^2))) + (1/35)*(9*a*c + (b*c^2)/d - (2*a^2*d)/b)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2) + (2*(4*b*c - a*d)*x*(a + b*x^2)^(3/2)*sqrt(c + d*x^2))/(35*b) + (d*x*(a + b*x^2)^(5/2)*sqrt(c + d*x^2))/(7*b) + (2*sqrt(c)*(b*c + a*d)*(b^2*c^2 - 6*a*b*c*d + a^2*d^2)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(35*b^2*d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (c^(3/2)*(b^2*c^2 - 18*a*b*c*d + a^2*d^2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(35*b*d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(1/2)*(c + d*x^2)^(3/2), x, 6, ((3*b^2*c^2 + 7*a*b*c*d - 2*a^2*d^2)*x*sqrt(a + b*x^2))/(15*b^2*sqrt(c + d*x^2)) + (2*(3*b*c - a*d)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(15*b) + (d*x*(a + b*x^2)^(3/2)*sqrt(c + d*x^2))/(5*b) - (sqrt(c)*(3*b^2*c^2 + 7*a*b*c*d - 2*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*b^2*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (c^(3/2)*(9*b*c - a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*b*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(c + d*x^2)^(3/2)/(a + b*x^2)^(1/2), x, 5, (2*d*(2*b*c - a*d)*x*sqrt(a + b*x^2))/(3*b^2*sqrt(c + d*x^2)) + (d*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(3*b) - (2*sqrt(c)*sqrt(d)*(2*b*c - a*d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*b^2*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (c^(3/2)*(3*b*c - a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*a*b*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(c + d*x^2)^(3/2)/(a + b*x^2)^(3/2), x, 5, -((d*(b*c - 2*a*d)*x*sqrt(a + b*x^2))/(a*b^2*sqrt(c + d*x^2))) + ((b*c - a*d)*x*sqrt(c + d*x^2))/(a*b*sqrt(a + b*x^2)) + (sqrt(c)*sqrt(d)*(b*c - 2*a*d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*b^2*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (c^(3/2)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*b*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(c + d*x^2)^(3/2)/(a + b*x^2)^(5/2), x, 4, ((b*c - a*d)*x*sqrt(c + d*x^2))/(3*a*b*(a + b*x^2)^(3/2)) + (2*(b*c + a*d)*sqrt(c + d*x^2)*Elliptic.E(atan((sqrt(b)*x)/sqrt(a)), 1 - (a*d)/(b*c)))/(3*a^(3/2)*b^(3/2)*sqrt(a + b*x^2)*sqrt((a*(c + d*x^2))/(c*(a + b*x^2)))) - (c^(3/2)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*a^2*b*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(c + d*x^2)^(3/2)/(a + b*x^2)^(7/2), x, 5, ((b*c - a*d)*x*sqrt(c + d*x^2))/(5*a*b*(a + b*x^2)^(5/2)) + (2*(2*b*c + a*d)*x*sqrt(c + d*x^2))/(15*a^2*b*(a + b*x^2)^(3/2)) + ((8*b^2*c^2 - 3*a*b*c*d - 2*a^2*d^2)*sqrt(c + d*x^2)*Elliptic.E(atan((sqrt(b)*x)/sqrt(a)), 1 - (a*d)/(b*c)))/(15*a^(5/2)*b^(3/2)*(b*c - a*d)*sqrt(a + b*x^2)*sqrt((a*(c + d*x^2))/(c*(a + b*x^2)))) - (c^(3/2)*sqrt(d)*(4*b*c - a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*a^3*b*(b*c - a*d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(2 + b*x^2)*sqrt(3 + d*x^2), x, 5, ((3*b + 2*d)*x*sqrt(2 + b*x^2))/(3*b*sqrt(3 + d*x^2)) + (1/3)*x*sqrt(2 + b*x^2)*sqrt(3 + d*x^2) - (sqrt(2)*(3*b + 2*d)*sqrt(2 + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(3)), 1 - (3*b)/(2*d)))/(3*b*sqrt(d)*sqrt((2 + b*x^2)/(3 + d*x^2))*sqrt(3 + d*x^2)) + (2*sqrt(2)*sqrt(2 + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(3)), 1 - (3*b)/(2*d)))/(sqrt(d)*sqrt((2 + b*x^2)/(3 + d*x^2))*sqrt(3 + d*x^2))]
@test_int [sqrt(2 + 4*x^2)*sqrt(3 - 6*x^2), x, 3, sqrt(2/3)*x*sqrt(1 - 4*x^4) + (2*Elliptic.F(asin(sqrt(2)*x), -1))/sqrt(3)]
@test_int [sqrt(2 + 4*x^2)*sqrt(3 + 6*x^2), x, 2, sqrt(6)*x + 2*sqrt(2/3)*x^3]
@test_int [sqrt(2 + b*x^2)/sqrt(3 + d*x^2), x, 4, (x*sqrt(2 + b*x^2))/sqrt(3 + d*x^2) - (sqrt(2)*sqrt(2 + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(3)), 1 - (3*b)/(2*d)))/(sqrt(d)*sqrt((2 + b*x^2)/(3 + d*x^2))*sqrt(3 + d*x^2)) + (sqrt(2)*sqrt(2 + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(3)), 1 - (3*b)/(2*d)))/(sqrt(d)*sqrt((2 + b*x^2)/(3 + d*x^2))*sqrt(3 + d*x^2))]
@test_int [sqrt(4 - x^2)/sqrt(c + d*x^2), x, 5, -((sqrt(c + d*x^2)*Elliptic.E(asin(x/2), -((4*d)/c)))/(d*sqrt(1 + (d*x^2)/c))) + ((c + 4*d)*sqrt(1 + (d*x^2)/c)*Elliptic.F(asin(x/2), -((4*d)/c)))/(d*sqrt(c + d*x^2))]
@test_int [sqrt(4 + x^2)/sqrt(c + d*x^2), x, 4, (x*sqrt(c + d*x^2))/(d*sqrt(4 + x^2)) - (sqrt(c + d*x^2)*Elliptic.E(atan(x/2), 1 - (4*d)/c))/(d*sqrt(4 + x^2)*sqrt((c + d*x^2)/(c*(4 + x^2)))) + (4*sqrt(c + d*x^2)*Elliptic.F(atan(x/2), 1 - (4*d)/c))/(c*sqrt(4 + x^2)*sqrt((c + d*x^2)/(c*(4 + x^2))))]
@test_int [sqrt(1 - x^2)/sqrt(2 - 3*x^2), x, 1, Elliptic.E(asin(sqrt(3/2)*x), 2/3)/sqrt(3)]
@test_int [sqrt(4 - x^2)/sqrt(2 - 3*x^2), x, 1, (2*Elliptic.E(asin(sqrt(3/2)*x), 1/6))/sqrt(3)]
@test_int [sqrt(1 - 4*x^2)/sqrt(2 - 3*x^2), x, 1, Elliptic.E(asin(sqrt(3/2)*x), 8/3)/sqrt(3)]
@test_int [sqrt(1 + x^2)/sqrt(1 - x^2), x, 1, Elliptic.E(asin(x), -1)]
@test_int [sqrt(1 + x^2)/sqrt(2 - 3*x^2), x, 1, Elliptic.E(asin(sqrt(3/2)*x), -2/3)/sqrt(3)]
@test_int [sqrt(4 + x^2)/sqrt(2 - 3*x^2), x, 1, (2*Elliptic.E(asin(sqrt(3/2)*x), -1/6))/sqrt(3)]
@test_int [sqrt(1 + 4*x^2)/sqrt(2 - 3*x^2), x, 1, Elliptic.E(asin(sqrt(3/2)*x), -8/3)/sqrt(3)]
@test_int [sqrt(1 - x^2)/sqrt(1 + x^2), x, 4, -Elliptic.E(asin(x), -1) + 2*Elliptic.F(asin(x), -1)]
@test_int [sqrt(1 - x^2)/sqrt(2 + 3*x^2), x, 3, (-(1/3))*sqrt(2)*Elliptic.E(asin(x), -(3/2)) + (5*Elliptic.F(asin(x), -(3/2)))/(3*sqrt(2))]
@test_int [sqrt(4 - x^2)/sqrt(2 + 3*x^2), x, 3, (-(1/3))*sqrt(2)*Elliptic.E(asin(x/2), -6) + (7/3)*sqrt(2)*Elliptic.F(asin(x/2), -6)]
@test_int [sqrt(1 - 4*x^2)/sqrt(2 + 3*x^2), x, 3, (-(2/3))*sqrt(2)*Elliptic.E(asin(2*x), -(3/8)) + (11*Elliptic.F(asin(2*x), -(3/8)))/(6*sqrt(2))]
@test_int [sqrt(1 + x^2)/sqrt(2 + 3*x^2), x, 4, (x*sqrt(2 + 3*x^2))/(3*sqrt(1 + x^2)) - (sqrt(2)*sqrt(2 + 3*x^2)*Elliptic.E(atan(x), -(1/2)))/(3*sqrt(1 + x^2)*sqrt((2 + 3*x^2)/(1 + x^2))) + (sqrt(2 + 3*x^2)*Elliptic.F(atan(x), -(1/2)))/(sqrt(2)*sqrt(1 + x^2)*sqrt((2 + 3*x^2)/(1 + x^2)))]
@test_int [sqrt(4 + x^2)/sqrt(2 + 3*x^2), x, 4, (x*sqrt(2 + 3*x^2))/(3*sqrt(4 + x^2)) - (sqrt(2)*sqrt(2 + 3*x^2)*Elliptic.E(atan(x/2), -5))/(3*sqrt(4 + x^2)*sqrt((2 + 3*x^2)/(4 + x^2))) + (2*sqrt(2)*sqrt(2 + 3*x^2)*Elliptic.F(atan(x/2), -5))/(sqrt(4 + x^2)*sqrt((2 + 3*x^2)/(4 + x^2)))]
@test_int [sqrt(1 + 4*x^2)/sqrt(2 + 3*x^2), x, 4, (4*x*sqrt(2 + 3*x^2))/(3*sqrt(1 + 4*x^2)) - (2*sqrt(2)*sqrt(2 + 3*x^2)*Elliptic.E(atan(2*x), 5/8))/(3*sqrt((2 + 3*x^2)/(1 + 4*x^2))*sqrt(1 + 4*x^2)) + (sqrt(2 + 3*x^2)*Elliptic.F(atan(2*x), 5/8))/(2*sqrt(2)*sqrt((2 + 3*x^2)/(1 + 4*x^2))*sqrt(1 + 4*x^2))]
@test_int [sqrt(1 - x^2)/sqrt(-1 + 2*x^2), x, 2, (sqrt(1 - 2*x^2)*Elliptic.E(asin(sqrt(2)*x), 1/2))/(sqrt(2)*sqrt(-1 + 2*x^2))]
#= ::Subsubsection::Closed:: =#
#=q<0=#
@test_int [(a + b*x^2)^(7/2)/sqrt(c + d*x^2), x, 7, -((8*(b*c - 2*a*d)*(6*b^2*c^2 - 11*a*b*c*d + 11*a^2*d^2)*x*sqrt(a + b*x^2))/(105*d^3*sqrt(c + d*x^2))) + (b*(24*b^2*c^2 - 71*a*b*c*d + 71*a^2*d^2)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(105*d^3) - (6*b*(b*c - 2*a*d)*x*(a + b*x^2)^(3/2)*sqrt(c + d*x^2))/(35*d^2) + (b*x*(a + b*x^2)^(5/2)*sqrt(c + d*x^2))/(7*d) + (8*sqrt(c)*(b*c - 2*a*d)*(6*b^2*c^2 - 11*a*b*c*d + 11*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(105*d^(7/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (sqrt(c)*(3*b*c - 7*a*d)*(8*b^2*c^2 - 11*a*b*c*d + 15*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(105*d^(7/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(5/2)/sqrt(c + d*x^2), x, 6, ((8*b^2*c^2 - 23*a*b*c*d + 23*a^2*d^2)*x*sqrt(a + b*x^2))/(15*d^2*sqrt(c + d*x^2)) - (4*b*(b*c - 2*a*d)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(15*d^2) + (b*x*(a + b*x^2)^(3/2)*sqrt(c + d*x^2))/(5*d) - (sqrt(c)*(8*b^2*c^2 - 23*a*b*c*d + 23*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*d^(5/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (sqrt(c)*(4*b^2*c^2 - 11*a*b*c*d + 15*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*d^(5/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(3/2)/sqrt(c + d*x^2), x, 5, -((2*(b*c - 2*a*d)*x*sqrt(a + b*x^2))/(3*d*sqrt(c + d*x^2))) + (b*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(3*d) + (2*sqrt(c)*(b*c - 2*a*d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (sqrt(c)*(b*c - 3*a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(1/2)/sqrt(c + d*x^2), x, 4, (x*sqrt(a + b*x^2))/sqrt(c + d*x^2) - (sqrt(c)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (sqrt(c)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^(1/2)*sqrt(c + d*x^2)), x, 1, (sqrt(c)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^(3/2)*sqrt(c + d*x^2)), x, 6, -((d*x*sqrt(a + b*x^2))/(a*(b*c - a*d)*sqrt(c + d*x^2))) + (b*x*sqrt(c + d*x^2))/(a*(b*c - a*d)*sqrt(a + b*x^2)) + (sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*(b*c - a*d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*(b*c - a*d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^(5/2)*sqrt(c + d*x^2)), x, 4, (b*x*sqrt(c + d*x^2))/(3*a*(b*c - a*d)*(a + b*x^2)^(3/2)) + (2*sqrt(b)*(b*c - 2*a*d)*sqrt(c + d*x^2)*Elliptic.E(atan((sqrt(b)*x)/sqrt(a)), 1 - (a*d)/(b*c)))/(3*a^(3/2)*(b*c - a*d)^2*sqrt(a + b*x^2)*sqrt((a*(c + d*x^2))/(c*(a + b*x^2)))) - (sqrt(c)*sqrt(d)*(b*c - 3*a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*a^2*(b*c - a*d)^2*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^(7/2)*sqrt(c + d*x^2)), x, 5, (b*x*sqrt(c + d*x^2))/(5*a*(b*c - a*d)*(a + b*x^2)^(5/2)) + (4*b*(b*c - 2*a*d)*x*sqrt(c + d*x^2))/(15*a^2*(b*c - a*d)^2*(a + b*x^2)^(3/2)) + (sqrt(b)*(8*b^2*c^2 - 23*a*b*c*d + 23*a^2*d^2)*sqrt(c + d*x^2)*Elliptic.E(atan((sqrt(b)*x)/sqrt(a)), 1 - (a*d)/(b*c)))/(15*a^(5/2)*(b*c - a*d)^3*sqrt(a + b*x^2)*sqrt((a*(c + d*x^2))/(c*(a + b*x^2)))) - (sqrt(c)*sqrt(d)*(4*b^2*c^2 - 11*a*b*c*d + 15*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*a^3*(b*c - a*d)^3*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(7/2)/(c + d*x^2)^(3/2), x, 7, ((48*b^3*c^3 - 128*a*b^2*c^2*d + 103*a^2*b*c*d^2 - 15*a^3*d^3)*x*sqrt(a + b*x^2))/(15*c*d^3*sqrt(c + d*x^2)) - ((b*c - a*d)*x*(a + b*x^2)^(5/2))/(c*d*sqrt(c + d*x^2)) - (b*(24*b^2*c^2 - 43*a*b*c*d + 15*a^2*d^2)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(15*c*d^3) + (b*(6*b*c - 5*a*d)*x*(a + b*x^2)^(3/2)*sqrt(c + d*x^2))/(5*c*d^2) - ((48*b^3*c^3 - 128*a*b^2*c^2*d + 103*a^2*b*c*d^2 - 15*a^3*d^3)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*sqrt(c)*d^(7/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (b*sqrt(c)*(24*b^2*c^2 - 61*a*b*c*d + 45*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(15*d^(7/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(5/2)/(c + d*x^2)^(3/2), x, 6, -(((8*b^2*c^2 - 13*a*b*c*d + 3*a^2*d^2)*x*sqrt(a + b*x^2))/(3*c*d^2*sqrt(c + d*x^2))) - ((b*c - a*d)*x*(a + b*x^2)^(3/2))/(c*d*sqrt(c + d*x^2)) + (b*(4*b*c - 3*a*d)*x*sqrt(a + b*x^2)*sqrt(c + d*x^2))/(3*c*d^2) + ((8*b^2*c^2 - 13*a*b*c*d + 3*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*sqrt(c)*d^(5/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (2*b*sqrt(c)*(2*b*c - 3*a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*d^(5/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(3/2)/(c + d*x^2)^(3/2), x, 5, -(((b*c - a*d)*x*sqrt(a + b*x^2))/(c*d*sqrt(c + d*x^2))) + ((2*b*c - a*d)*x*sqrt(a + b*x^2))/(c*d*sqrt(c + d*x^2)) - ((2*b*c - a*d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(c)*d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (b*sqrt(c)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(d^(3/2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [(a + b*x^2)^(1/2)/(c + d*x^2)^(3/2), x, 1, (sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(c)*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^(1/2)*(c + d*x^2)^(3/2)), x, 6, -((sqrt(d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(c)*(b*c - a*d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))) + (b*sqrt(c)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*(b*c - a*d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^(3/2)*(c + d*x^2)^(3/2)), x, 4, (b*x)/(a*(b*c - a*d)*sqrt(a + b*x^2)*sqrt(c + d*x^2)) + (sqrt(d)*(b*c + a*d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(c)*(b*c - a*d)^2*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (2*b*sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*(b*c - a*d)^2*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/((a + b*x^2)^(5/2)*(c + d*x^2)^(3/2)), x, 5, (b*x)/(3*a*(b*c - a*d)*(a + b*x^2)^(3/2)*sqrt(c + d*x^2)) + (2*b*(b*c - 3*a*d)*x)/(3*a^2*(b*c - a*d)^2*sqrt(a + b*x^2)*sqrt(c + d*x^2)) + (sqrt(d)*(2*b^2*c^2 - 7*a*b*c*d - 3*a^2*d^2)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*a^2*sqrt(c)*(b*c - a*d)^3*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (b*sqrt(c)*sqrt(d)*(b*c - 9*a*d)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(3*a^2*(b*c - a*d)^3*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/(sqrt(a + b*x^2)*sqrt(c + d*x^2)), x, 1, (sqrt(c)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [1/(sqrt(a - b*x^2)*sqrt(c + d*x^2)), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(1 + (d*x^2)/c)*Elliptic.F(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*sqrt(a - b*x^2)*sqrt(c + d*x^2))]
@test_int [1/(sqrt(a + b*x^2)*sqrt(c - d*x^2)), x, 3, (sqrt(c)*sqrt(1 + (b*x^2)/a)*sqrt(1 - (d*x^2)/c)*Elliptic.F(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(sqrt(d)*sqrt(a + b*x^2)*sqrt(c - d*x^2))]
@test_int [1/(sqrt(a - b*x^2)*sqrt(c - d*x^2)), x, 3, (sqrt(c)*sqrt(1 - (b*x^2)/a)*sqrt(1 - (d*x^2)/c)*Elliptic.F(asin((sqrt(d)*x)/sqrt(c)), (b*c)/(a*d)))/(sqrt(d)*sqrt(a - b*x^2)*sqrt(c - d*x^2))]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 + 5*x^2)), x, 1, Elliptic.F(asin(x), -(5/2))/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 + 4*x^2)), x, 1, Elliptic.F(asin(x), -2)/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 + 3*x^2)), x, 1, Elliptic.F(asin(x), -(3/2))/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 + 2*x^2)), x, 2, Elliptic.F(asin(x), -1)/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 + 1*x^2)), x, 1, Elliptic.F(asin(x), -(1/2))/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 - 1*x^2)), x, 1, Elliptic.F(asin(x), 1/2)/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 - 2*x^2)), x, 2, atanh(x)/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 - 3*x^2)), x, 1, Elliptic.F(asin(x), 3/2)/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 - 4*x^2)), x, 1, Elliptic.F(asin(x), 2)/sqrt(2)]
@test_int [1/(sqrt(1 - x^2)*sqrt(2 - 5*x^2)), x, 1, Elliptic.F(asin(x), 5/2)/sqrt(2)]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 + 5*x^2)), x, 1, (sqrt(2 + 5*x^2)*Elliptic.F(atan(x), -(3/2)))/(sqrt(2)*sqrt(1 + x^2)*sqrt((2 + 5*x^2)/(1 + x^2)))]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 + 4*x^2)), x, 1, (sqrt(1 + 2*x^2)*Elliptic.F(atan(x), -1))/(sqrt(2)*sqrt(1 + x^2)*sqrt((1 + 2*x^2)/(1 + x^2)))]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 + 3*x^2)), x, 1, (sqrt(2 + 3*x^2)*Elliptic.F(atan(x), -(1/2)))/(sqrt(2)*sqrt(1 + x^2)*sqrt((2 + 3*x^2)/(1 + x^2)))]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 + 2*x^2)), x, 2, atan(x)/sqrt(2)]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 + 1*x^2)), x, 1, (sqrt(2 + x^2)*Elliptic.F(atan(x), 1/2))/(sqrt(2)*sqrt(1 + x^2)*sqrt((2 + x^2)/(1 + x^2)))]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 - 1*x^2)), x, 1, Elliptic.F(asin(x/sqrt(2)), -2)]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 - 2*x^2)), x, 2, Elliptic.F(asin(x), -1)/sqrt(2)]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 - 3*x^2)), x, 1, Elliptic.F(asin(sqrt(3/2)*x), -(2/3))/sqrt(3)]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 - 4*x^2)), x, 1, (1/2)*Elliptic.F(asin(sqrt(2)*x), -(1/2))]
@test_int [1/(sqrt(1 + x^2)*sqrt(2 - 5*x^2)), x, 1, Elliptic.F(asin(sqrt(5/2)*x), -(2/5))/sqrt(5)]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 + 5*x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), -(5/2)))/(sqrt(2)*sqrt(-1 + x^2))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 + 4*x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), -2))/(sqrt(2)*sqrt(-1 + x^2))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 + 3*x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), -(3/2)))/(sqrt(2)*sqrt(-1 + x^2))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 + 2*x^2)), x, 2, (1/2)*Elliptic.F(asin((sqrt(2)*x)/sqrt(-1 + x^2)), 1/2)]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 + 1*x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), -(1/2)))/(sqrt(2)*sqrt(-1 + x^2))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 - 1*x^2)), x, 1, -Elliptic.F(acos(x/sqrt(2)), 2)]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 - 2*x^2)), x, 2, -((sqrt(-1 + x^2)*atanh(x))/(sqrt(2)*sqrt(1 - x^2)))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 - 3*x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), 3/2))/(sqrt(2)*sqrt(-1 + x^2))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 - 4*x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), 2))/(sqrt(2)*sqrt(-1 + x^2))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(2 - 5*x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), 5/2))/(sqrt(2)*sqrt(-1 + x^2))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 + 5*x^2)), x, 1, (sqrt(2 + 5*x^2)*Elliptic.F(atan(x), -(3/2)))/(sqrt(2)*sqrt(-1 - x^2)*sqrt((2 + 5*x^2)/(1 + x^2)))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 + 4*x^2)), x, 1, (sqrt(1 + 2*x^2)*Elliptic.F(atan(x), -1))/(sqrt(2)*sqrt(-1 - x^2)*sqrt((1 + 2*x^2)/(1 + x^2)))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 + 3*x^2)), x, 1, (sqrt(2 + 3*x^2)*Elliptic.F(atan(x), -(1/2)))/(sqrt(2)*sqrt(-1 - x^2)*sqrt((2 + 3*x^2)/(1 + x^2)))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 + 2*x^2)), x, 2, (sqrt(1 + x^2)*atan(x))/(sqrt(2)*sqrt(-1 - x^2))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 + 1*x^2)), x, 1, (sqrt(2 + x^2)*Elliptic.F(atan(x), 1/2))/(sqrt(2)*sqrt(-1 - x^2)*sqrt((2 + x^2)/(1 + x^2)))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 - 1*x^2)), x, 2, (sqrt(1 + x^2)*Elliptic.F(asin(x/sqrt(2)), -2))/sqrt(-1 - x^2)]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 - 2*x^2)), x, 2, -((sqrt(1 - 1/x^4)*x^2*Elliptic.F(acsc(x), -1))/(sqrt(2 - 2*x^2)*sqrt(-1 - x^2))), (sqrt(-1 + x^2)*sqrt(1 + x^2)*Elliptic.F(asin((sqrt(2)*x)/sqrt(-1 + x^2)), 1/2))/(2*sqrt(-1 - x^2)*sqrt(1 - x^2))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 - 3*x^2)), x, 2, (sqrt(1 + x^2)*Elliptic.F(asin(sqrt(3/2)*x), -(2/3)))/(sqrt(3)*sqrt(-1 - x^2))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 - 4*x^2)), x, 2, (sqrt(1 + x^2)*Elliptic.F(asin(sqrt(2)*x), -(1/2)))/(2*sqrt(-1 - x^2))]
@test_int [1/(sqrt(-1 - x^2)*sqrt(2 - 5*x^2)), x, 2, (sqrt(1 + x^2)*Elliptic.F(asin(sqrt(5/2)*x), -(2/5)))/(sqrt(5)*sqrt(-1 - x^2))]
@test_int [sqrt(a + b*x^2)/sqrt(c - d*x^2), x, 3, (sqrt(c)*sqrt(a + b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(sqrt(d)*sqrt(1 + (b*x^2)/a)*sqrt(c - d*x^2))]
@test_int [sqrt(-a - b*x^2)/sqrt(c - d*x^2), x, 3, (sqrt(c)*sqrt(-a - b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(sqrt(d)*sqrt(1 + (b*x^2)/a)*sqrt(c - d*x^2))]
@test_int [sqrt(a + b*x^2)/sqrt(-c + d*x^2), x, 3, (sqrt(c)*sqrt(a + b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(sqrt(d)*sqrt(1 + (b*x^2)/a)*sqrt(-c + d*x^2))]
@test_int [sqrt(-a - b*x^2)/sqrt(-c + d*x^2), x, 3, (sqrt(c)*sqrt(-a - b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(sqrt(d)*sqrt(1 + (b*x^2)/a)*sqrt(-c + d*x^2))]
@test_int [sqrt(a - b*x^2)/sqrt(c - d*x^2), x, 3, (sqrt(c)*sqrt(a - b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), (b*c)/(a*d)))/(sqrt(d)*sqrt(1 - (b*x^2)/a)*sqrt(c - d*x^2))]
@test_int [sqrt(-a + b*x^2)/sqrt(c - d*x^2), x, 3, (sqrt(c)*sqrt(-a + b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), (b*c)/(a*d)))/(sqrt(d)*sqrt(1 - (b*x^2)/a)*sqrt(c - d*x^2))]
@test_int [sqrt(a - b*x^2)/sqrt(-c + d*x^2), x, 3, (sqrt(c)*sqrt(a - b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), (b*c)/(a*d)))/(sqrt(d)*sqrt(1 - (b*x^2)/a)*sqrt(-c + d*x^2))]
@test_int [sqrt(-a + b*x^2)/sqrt(-c + d*x^2), x, 3, (sqrt(c)*sqrt(-a + b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), (b*c)/(a*d)))/(sqrt(d)*sqrt(1 - (b*x^2)/a)*sqrt(-c + d*x^2))]
@test_int [sqrt(a + b*x^2)/sqrt(c + d*x^2), x, 4, (x*sqrt(a + b*x^2))/sqrt(c + d*x^2) - (sqrt(c)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (sqrt(c)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(-a - b*x^2)/sqrt(c + d*x^2), x, 4, (x*sqrt(-a - b*x^2))/sqrt(c + d*x^2) - (sqrt(c)*sqrt(-a - b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (sqrt(c)*sqrt(-a - b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(a + b*x^2)/sqrt(-c - d*x^2), x, 4, (x*sqrt(a + b*x^2))/sqrt(-c - d*x^2) - (sqrt(c)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))) + (sqrt(c)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2))))]
@test_int [sqrt(-a - b*x^2)/sqrt(-c - d*x^2), x, 4, (x*sqrt(-a - b*x^2))/sqrt(-c - d*x^2) - (sqrt(c)*sqrt(-a - b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))) + (sqrt(c)*sqrt(-a - b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(sqrt(d)*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2))))]
@test_int [sqrt(a - b*x^2)/sqrt(c + d*x^2), x, 7, -((sqrt(a)*sqrt(b)*sqrt(1 - (b*x^2)/a)*sqrt(c + d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(d*sqrt(a - b*x^2)*sqrt(1 + (d*x^2)/c))) + (sqrt(a)*(b*c + a*d)*sqrt(1 - (b*x^2)/a)*sqrt(1 + (d*x^2)/c)*Elliptic.F(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*d*sqrt(a - b*x^2)*sqrt(c + d*x^2))]
@test_int [sqrt(-a + b*x^2)/sqrt(c + d*x^2), x, 7, (sqrt(a)*sqrt(b)*sqrt(1 - (b*x^2)/a)*sqrt(c + d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(d*sqrt(-a + b*x^2)*sqrt(1 + (d*x^2)/c)) - (sqrt(a)*(b*c + a*d)*sqrt(1 - (b*x^2)/a)*sqrt(1 + (d*x^2)/c)*Elliptic.F(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*d*sqrt(-a + b*x^2)*sqrt(c + d*x^2))]
@test_int [sqrt(a - b*x^2)/sqrt(-c - d*x^2), x, 7, (sqrt(a)*sqrt(b)*sqrt(1 - (b*x^2)/a)*sqrt(-c - d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(d*sqrt(a - b*x^2)*sqrt(1 + (d*x^2)/c)) + (sqrt(a)*(b*c + a*d)*sqrt(1 - (b*x^2)/a)*sqrt(1 + (d*x^2)/c)*Elliptic.F(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*d*sqrt(a - b*x^2)*sqrt(-c - d*x^2))]
@test_int [sqrt(-a + b*x^2)/sqrt(-c - d*x^2), x, 7, -((sqrt(a)*sqrt(b)*sqrt(1 - (b*x^2)/a)*sqrt(-c - d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(d*sqrt(-a + b*x^2)*sqrt(1 + (d*x^2)/c))) - (sqrt(a)*(b*c + a*d)*sqrt(1 - (b*x^2)/a)*sqrt(1 + (d*x^2)/c)*Elliptic.F(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*d*sqrt(-a + b*x^2)*sqrt(-c - d*x^2))]
@test_int [sqrt(c + d*x^2)/sqrt(a - b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(c + d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*sqrt(a - b*x^2)*sqrt(1 + (d*x^2)/c))]
@test_int [sqrt(-c - d*x^2)/sqrt(a - b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(-c - d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*sqrt(a - b*x^2)*sqrt(1 + (d*x^2)/c))]
@test_int [sqrt(c + d*x^2)/sqrt(-a + b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(c + d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*sqrt(-a + b*x^2)*sqrt(1 + (d*x^2)/c))]
@test_int [sqrt(-c - d*x^2)/sqrt(-a + b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(-c - d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), -((a*d)/(b*c))))/(sqrt(b)*sqrt(-a + b*x^2)*sqrt(1 + (d*x^2)/c))]
@test_int [sqrt(c - d*x^2)/sqrt(a - b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(c - d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), (a*d)/(b*c)))/(sqrt(b)*sqrt(a - b*x^2)*sqrt(1 - (d*x^2)/c))]
@test_int [sqrt(-c + d*x^2)/sqrt(a - b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(-c + d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), (a*d)/(b*c)))/(sqrt(b)*sqrt(a - b*x^2)*sqrt(1 - (d*x^2)/c))]
@test_int [sqrt(c - d*x^2)/sqrt(-a + b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(c - d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), (a*d)/(b*c)))/(sqrt(b)*sqrt(-a + b*x^2)*sqrt(1 - (d*x^2)/c))]
@test_int [sqrt(-c + d*x^2)/sqrt(-a + b*x^2), x, 3, (sqrt(a)*sqrt(1 - (b*x^2)/a)*sqrt(-c + d*x^2)*Elliptic.E(asin((sqrt(b)*x)/sqrt(a)), (a*d)/(b*c)))/(sqrt(b)*sqrt(-a + b*x^2)*sqrt(1 - (d*x^2)/c))]
@test_int [sqrt(c + d*x^2)/sqrt(a + b*x^2), x, 4, (d*x*sqrt(a + b*x^2))/(b*sqrt(c + d*x^2)) - (sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(b*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) + (c^(3/2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(-c - d*x^2)/sqrt(a + b*x^2), x, 4, -((d*x*sqrt(a + b*x^2))/(b*sqrt(-c - d*x^2))) + (sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(b*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))) - (c^(3/2)*sqrt(a + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2))))]
@test_int [sqrt(c + d*x^2)/sqrt(-a - b*x^2), x, 4, -((d*x*sqrt(-a - b*x^2))/(b*sqrt(c + d*x^2))) + (sqrt(c)*sqrt(d)*sqrt(-a - b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(b*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2)) - (c^(3/2)*sqrt(-a - b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))*sqrt(c + d*x^2))]
@test_int [sqrt(-c - d*x^2)/sqrt(-a - b*x^2), x, 4, (d*x*sqrt(-a - b*x^2))/(b*sqrt(-c - d*x^2)) - (sqrt(c)*sqrt(d)*sqrt(-a - b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(b*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2)))) + (c^(3/2)*sqrt(-a - b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(c)), 1 - (b*c)/(a*d)))/(a*sqrt(d)*sqrt(-c - d*x^2)*sqrt((c*(a + b*x^2))/(a*(c + d*x^2))))]
@test_int [sqrt(c - d*x^2)/sqrt(a + b*x^2), x, 7, -((sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(1 + (b*x^2)/a)*sqrt(c - d*x^2))) + (sqrt(c)*(b*c + a*d)*sqrt(1 + (b*x^2)/a)*sqrt(1 - (d*x^2)/c)*Elliptic.F(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(d)*sqrt(a + b*x^2)*sqrt(c - d*x^2))]
@test_int [sqrt(-c + d*x^2)/sqrt(a + b*x^2), x, 7, (sqrt(c)*sqrt(d)*sqrt(a + b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(1 + (b*x^2)/a)*sqrt(-c + d*x^2)) - (sqrt(c)*(b*c + a*d)*sqrt(1 + (b*x^2)/a)*sqrt(1 - (d*x^2)/c)*Elliptic.F(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(d)*sqrt(a + b*x^2)*sqrt(-c + d*x^2))]
@test_int [sqrt(c - d*x^2)/sqrt(-a - b*x^2), x, 7, (sqrt(c)*sqrt(d)*sqrt(-a - b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(1 + (b*x^2)/a)*sqrt(c - d*x^2)) + (sqrt(c)*(b*c + a*d)*sqrt(1 + (b*x^2)/a)*sqrt(1 - (d*x^2)/c)*Elliptic.F(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(d)*sqrt(-a - b*x^2)*sqrt(c - d*x^2))]
@test_int [sqrt(-c + d*x^2)/sqrt(-a - b*x^2), x, 7, -((sqrt(c)*sqrt(d)*sqrt(-a - b*x^2)*sqrt(1 - (d*x^2)/c)*Elliptic.E(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(1 + (b*x^2)/a)*sqrt(-c + d*x^2))) - (sqrt(c)*(b*c + a*d)*sqrt(1 + (b*x^2)/a)*sqrt(1 - (d*x^2)/c)*Elliptic.F(asin((sqrt(d)*x)/sqrt(c)), -((b*c)/(a*d))))/(b*sqrt(d)*sqrt(-a - b*x^2)*sqrt(-c + d*x^2))]
@test_int [1/(sqrt(2 + b*x^2)*sqrt(3 + d*x^2)), x, 1, (sqrt(2 + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(3)), 1 - (3*b)/(2*d)))/(sqrt(2)*sqrt(d)*sqrt((2 + b*x^2)/(3 + d*x^2))*sqrt(3 + d*x^2))]
@test_int [1/(sqrt(4 - x^2)*sqrt(c + d*x^2)), x, 2, (sqrt(1 + (d*x^2)/c)*Elliptic.F(asin(x/2), -((4*d)/c)))/sqrt(c + d*x^2)]
@test_int [1/(sqrt(4 + x^2)*sqrt(c + d*x^2)), x, 1, (sqrt(c + d*x^2)*Elliptic.F(atan(x/2), 1 - (4*d)/c))/(c*sqrt(4 + x^2)*sqrt((c + d*x^2)/(c*(4 + x^2))))]
@test_int [1/(sqrt(1 - x^2)*sqrt(-1 + 2*x^2)), x, 1, -Elliptic.F(acos(x), 2)]
@test_int [sqrt(1 - c^2*x^2)/sqrt(1 + c^2*x^2), x, 4, -(Elliptic.E(asin(c*x), -1)/c) + (2*Elliptic.F(asin(c*x), -1))/c]
@test_int [sqrt(2 + b*x^2)/sqrt(3 + d*x^2), x, 4, (x*sqrt(2 + b*x^2))/sqrt(3 + d*x^2) - (sqrt(2)*sqrt(2 + b*x^2)*Elliptic.E(atan((sqrt(d)*x)/sqrt(3)), 1 - (3*b)/(2*d)))/(sqrt(d)*sqrt((2 + b*x^2)/(3 + d*x^2))*sqrt(3 + d*x^2)) + (sqrt(2)*sqrt(2 + b*x^2)*Elliptic.F(atan((sqrt(d)*x)/sqrt(3)), 1 - (3*b)/(2*d)))/(sqrt(d)*sqrt((2 + b*x^2)/(3 + d*x^2))*sqrt(3 + d*x^2))]
@test_int [sqrt(-1 + 3*x^2)/sqrt(2 - 3*x^2), x, 1, -(Elliptic.E(acos(sqrt(3/2)*x), 2)/sqrt(3))]
@test_int [sqrt(1 + (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))/sqrt(1 - (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))), x, 1, (sqrt(b + sqrt(b^2 - 4*a*c))*Elliptic.E(asin((sqrt(2)*sqrt(c)*x)/sqrt(b + sqrt(b^2 - 4*a*c))), -((b + sqrt(b^2 - 4*a*c))/(b - sqrt(b^2 - 4*a*c)))))/(sqrt(2)*sqrt(c))]
@test_int [sqrt(1 - (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))/sqrt(1 - (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))), x, 1, (sqrt(b + sqrt(b^2 - 4*a*c))*Elliptic.E(asin((sqrt(2)*sqrt(c)*x)/sqrt(b + sqrt(b^2 - 4*a*c))), (b + sqrt(b^2 - 4*a*c))/(b - sqrt(b^2 - 4*a*c))))/(sqrt(2)*sqrt(c))]
@test_int [sqrt(1 + (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))/sqrt(1 + (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))), x, 4, (x*sqrt(1 + (2*c*x^2)/(b - sqrt(b^2 - 4*a*c))))/sqrt(1 + (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))) - (sqrt(b + sqrt(b^2 - 4*a*c))*sqrt(1 + (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))*Elliptic.E(atan((sqrt(2)*sqrt(c)*x)/sqrt(b + sqrt(b^2 - 4*a*c))), -((2*sqrt(b^2 - 4*a*c))/(b - sqrt(b^2 - 4*a*c)))))/(sqrt(2)*sqrt(c)*sqrt((1 + (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))/(1 + (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))))*sqrt(1 + (2*c*x^2)/(b + sqrt(b^2 - 4*a*c)))) + (sqrt(b + sqrt(b^2 - 4*a*c))*sqrt(1 + (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))*Elliptic.F(atan((sqrt(2)*sqrt(c)*x)/sqrt(b + sqrt(b^2 - 4*a*c))), -((2*sqrt(b^2 - 4*a*c))/(b - sqrt(b^2 - 4*a*c)))))/(sqrt(2)*sqrt(c)*sqrt((1 + (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))/(1 + (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))))*sqrt(1 + (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))))]
@test_int [sqrt(1 - (2*c*x^2)/(b - sqrt(b^2 - 4*a*c)))/sqrt(1 + (2*c*x^2)/(b + sqrt(b^2 - 4*a*c))), x, 3, -(((b + sqrt(b^2 - 4*a*c))*Elliptic.E(asin((sqrt(2)*sqrt(c)*x)/sqrt(b - sqrt(b^2 - 4*a*c))), -((b - sqrt(b^2 - 4*a*c))/(b + sqrt(b^2 - 4*a*c)))))/(sqrt(2)*sqrt(c)*sqrt(b - sqrt(b^2 - 4*a*c)))) + (sqrt(2)*b*Elliptic.F(asin((sqrt(2)*sqrt(c)*x)/sqrt(b - sqrt(b^2 - 4*a*c))), -((b - sqrt(b^2 - 4*a*c))/(b + sqrt(b^2 - 4*a*c)))))/(sqrt(c)*sqrt(b - sqrt(b^2 - 4*a*c)))]
@test_int [(1 - 2*x^2)^m/sqrt(1 - x^2), x, -1, -((2^(-2 - m)*sqrt(x^2)*(2 - 4*x^2)^(1 + m)*HypergeometricFunctions._₂F₁(1/2, (1 + m)/2, (3 + m)/2, (1 - 2*x^2)^2))/((1 + m)*x))]
@test_int [1/(sqrt(-1 + x^2)*sqrt(7 - 4*sqrt(3) + x^2)), x, 2, (sqrt(1 - x^2)*Elliptic.F(asin(x), -7 - 4*sqrt(3)))/(sqrt(7 - 4*sqrt(3))*sqrt(-1 + x^2))]
@test_int [1/(sqrt(3 - 3*sqrt(3) + 2*sqrt(3)*x^2)*sqrt(3 + (-3 + sqrt(3))*x^2)), x, 1, (-(1/6))*sqrt(3 + sqrt(3))*Elliptic.F(acos(sqrt((1/3)*(3 - sqrt(3)))*x), (1/2)*(1 + sqrt(3)))]
#= ::Section::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/4)*(c+d*x^2)^q=#
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/4)*(c+d*x^2)^q*when*b*c-2*a*d=0=#
#= ::Subsubsection::Closed:: =#
#=a>0=#
@test_int [1/((2 + 3*x^2)^(1/4)*(4 + 3*x^2)), x, 1, -(atan((2*2^(3/4) + 2*2^(1/4)*sqrt(2 + 3*x^2))/(2*sqrt(3)*x*(2 + 3*x^2)^(1/4)))/(2*2^(3/4)*sqrt(3))) - atanh((2*2^(3/4) - 2*2^(1/4)*sqrt(2 + 3*x^2))/(2*sqrt(3)*x*(2 + 3*x^2)^(1/4)))/(2*2^(3/4)*sqrt(3))]
@test_int [1/((2 - 3*x^2)^(1/4)*(4 - 3*x^2)), x, 1, atan((2 - sqrt(2)*sqrt(2 - 3*x^2))/(2^(1/4)*sqrt(3)*x*(2 - 3*x^2)^(1/4)))/(2*2^(3/4)*sqrt(3)) + atanh((2 + sqrt(2)*sqrt(2 - 3*x^2))/(2^(1/4)*sqrt(3)*x*(2 - 3*x^2)^(1/4)))/(2*2^(3/4)*sqrt(3))]
@test_int [1/((2 + b*x^2)^(1/4)*(4 + b*x^2)), x, 1, -(atan((2*2^(3/4) + 2*2^(1/4)*sqrt(2 + b*x^2))/(2*sqrt(b)*x*(2 + b*x^2)^(1/4)))/(2*2^(3/4)*sqrt(b))) - atanh((2*2^(3/4) - 2*2^(1/4)*sqrt(2 + b*x^2))/(2*sqrt(b)*x*(2 + b*x^2)^(1/4)))/(2*2^(3/4)*sqrt(b))]
@test_int [1/((2 - b*x^2)^(1/4)*(4 - b*x^2)), x, 1, atan((2 - sqrt(2)*sqrt(2 - b*x^2))/(2^(1/4)*sqrt(b)*x*(2 - b*x^2)^(1/4)))/(2*2^(3/4)*sqrt(b)) + atanh((2 + sqrt(2)*sqrt(2 - b*x^2))/(2^(1/4)*sqrt(b)*x*(2 - b*x^2)^(1/4)))/(2*2^(3/4)*sqrt(b))]
@test_int [1/((a + 3*x^2)^(1/4)*(2*a + 3*x^2)), x, 1, -(atan((a^(3/4)*(1 + sqrt(a + 3*x^2)/sqrt(a)))/(sqrt(3)*x*(a + 3*x^2)^(1/4)))/(2*sqrt(3)*a^(3/4))) - atanh((a^(3/4)*(1 - sqrt(a + 3*x^2)/sqrt(a)))/(sqrt(3)*x*(a + 3*x^2)^(1/4)))/(2*sqrt(3)*a^(3/4))]
@test_int [1/((a - 3*x^2)^(1/4)*(2*a - 3*x^2)), x, 1, atan((a^(3/4)*(1 - sqrt(a - 3*x^2)/sqrt(a)))/(sqrt(3)*x*(a - 3*x^2)^(1/4)))/(2*sqrt(3)*a^(3/4)) + atanh((a^(3/4)*(1 + sqrt(a - 3*x^2)/sqrt(a)))/(sqrt(3)*x*(a - 3*x^2)^(1/4)))/(2*sqrt(3)*a^(3/4))]
@test_int [1/((a + b*x^2)^(1/4)*(2*a + b*x^2)), x, 1, -(atan((a^(3/4)*(1 + sqrt(a + b*x^2)/sqrt(a)))/(sqrt(b)*x*(a + b*x^2)^(1/4)))/(2*a^(3/4)*sqrt(b))) - atanh((a^(3/4)*(1 - sqrt(a + b*x^2)/sqrt(a)))/(sqrt(b)*x*(a + b*x^2)^(1/4)))/(2*a^(3/4)*sqrt(b))]
@test_int [1/((a - b*x^2)^(1/4)*(2*a - b*x^2)), x, 1, atan((a^(3/4)*(1 - sqrt(a - b*x^2)/sqrt(a)))/(sqrt(b)*x*(a - b*x^2)^(1/4)))/(2*a^(3/4)*sqrt(b)) + atanh((a^(3/4)*(1 + sqrt(a - b*x^2)/sqrt(a)))/(sqrt(b)*x*(a - b*x^2)^(1/4)))/(2*a^(3/4)*sqrt(b))]
#= ::Subsubsection::Closed:: =#
#=a<0=#
@test_int [1/((-2 + 3*x^2)*(-1 + 3*x^2)^(1/4)), x, 1, -(atan((sqrt(3/2)*x)/(-1 + 3*x^2)^(1/4))/(2*sqrt(6))) - atanh((sqrt(3/2)*x)/(-1 + 3*x^2)^(1/4))/(2*sqrt(6))]
@test_int [1/((-2 - 3*x^2)*(-1 - 3*x^2)^(1/4)), x, 1, -(atan((sqrt(3/2)*x)/(-1 - 3*x^2)^(1/4))/(2*sqrt(6))) - atanh((sqrt(3/2)*x)/(-1 - 3*x^2)^(1/4))/(2*sqrt(6))]
@test_int [1/((-2 + b*x^2)*(-1 + b*x^2)^(1/4)), x, 1, -(atan((sqrt(b)*x)/(sqrt(2)*(-1 + b*x^2)^(1/4)))/(2*sqrt(2)*sqrt(b))) - atanh((sqrt(b)*x)/(sqrt(2)*(-1 + b*x^2)^(1/4)))/(2*sqrt(2)*sqrt(b))]
@test_int [1/((-2 - b*x^2)*(-1 - b*x^2)^(1/4)), x, 1, -(atan((sqrt(b)*x)/(sqrt(2)*(-1 - b*x^2)^(1/4)))/(2*sqrt(2)*sqrt(b))) - atanh((sqrt(b)*x)/(sqrt(2)*(-1 - b*x^2)^(1/4)))/(2*sqrt(2)*sqrt(b))]
@test_int [1/((-a + 3*x^2)^(1/4)*(-2*a + 3*x^2)), x, 1, -(atan((sqrt(3/2)*x)/(a^(1/4)*(-a + 3*x^2)^(1/4)))/(2*sqrt(6)*a^(3/4))) - atanh((sqrt(3/2)*x)/(a^(1/4)*(-a + 3*x^2)^(1/4)))/(2*sqrt(6)*a^(3/4))]
@test_int [1/((-a - 3*x^2)^(1/4)*(-2*a - 3*x^2)), x, 1, -(atan((sqrt(3/2)*x)/(a^(1/4)*(-a - 3*x^2)^(1/4)))/(2*sqrt(6)*a^(3/4))) - atanh((sqrt(3/2)*x)/(a^(1/4)*(-a - 3*x^2)^(1/4)))/(2*sqrt(6)*a^(3/4))]
@test_int [1/((-a + b*x^2)^(1/4)*(-2*a + b*x^2)), x, 1, -(atan((sqrt(b)*x)/(sqrt(2)*a^(1/4)*(-a + b*x^2)^(1/4)))/(2*sqrt(2)*a^(3/4)*sqrt(b))) - atanh((sqrt(b)*x)/(sqrt(2)*a^(1/4)*(-a + b*x^2)^(1/4)))/(2*sqrt(2)*a^(3/4)*sqrt(b))]
@test_int [1/((-a - b*x^2)^(1/4)*(-2*a - b*x^2)), x, 1, -(atan((sqrt(b)*x)/(sqrt(2)*a^(1/4)*(-a - b*x^2)^(1/4)))/(2*sqrt(2)*a^(3/4)*sqrt(b))) - atanh((sqrt(b)*x)/(sqrt(2)*a^(1/4)*(-a - b*x^2)^(1/4)))/(2*sqrt(2)*a^(3/4)*sqrt(b))]
@test_int [1/((2 - x^2)*(x^2 - 1)^(1/4)), x, 1, atan(x/(sqrt(2)*(-1 + x^2)^(1/4)))/(2*sqrt(2)) + atanh(x/(sqrt(2)*(-1 + x^2)^(1/4)))/(2*sqrt(2))]
#= ::Subsection::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^(p/4)*(c+d*x^2)^q=#
#= ::Subsubsection:: =#
#=q>0=#
#= ::Subsubsection::Closed:: =#
#=q<0=#
@test_int [(a + b*x^2)^(7/4)/(c + d*x^2), x, 13, (6*a*b*x)/(5*d*(a + b*x^2)^(1/4)) - (2*b*(b*c - a*d)*x)/(d^2*(a + b*x^2)^(1/4)) + (2*b*x*(a + b*x^2)^(3/4))/(5*d) - (6*a^(3/2)*sqrt(b)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(5*d*(a + b*x^2)^(1/4)) + (2*sqrt(a)*sqrt(b)*(b*c - a*d)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(d^2*(a + b*x^2)^(1/4)) + (a^(1/4)*((-b)*c + a*d)^(3/2)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d^(5/2)*x) - (a^(1/4)*((-b)*c + a*d)^(3/2)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d^(5/2)*x)]
@test_int [(a + b*x^2)^(5/4)/(c + d*x^2), x, 12, (2*b*x*(a + b*x^2)^(1/4))/(3*d) + (2*a^(3/2)*sqrt(b)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(3*d*(a + b*x^2)^(3/4)) - (2*sqrt(a)*sqrt(b)*(b*c - a*d)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(d^2*(a + b*x^2)^(3/4)) + (a^(1/4)*(b*c - a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d^2*x) + (a^(1/4)*(b*c - a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d^2*x)]
@test_int [(a + b*x^2)^(3/4)/(c + d*x^2), x, 8, (2*b*x)/(d*(a + b*x^2)^(1/4)) - (2*sqrt(a)*sqrt(b)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(d*(a + b*x^2)^(1/4)) + (a^(1/4)*sqrt((-b)*c + a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d^(3/2)*x) - (a^(1/4)*sqrt((-b)*c + a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d^(3/2)*x)]
@test_int [(a + b*x^2)^(1/4)/(c + d*x^2), x, 8, (2*sqrt(a)*sqrt(b)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(d*(a + b*x^2)^(3/4)) - (a^(1/4)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d*x) - (a^(1/4)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(d*x)]
@test_int [1/((a + b*x^2)^(1/4)*(c + d*x^2)), x, 4, (a^(1/4)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(sqrt(d)*sqrt((-b)*c + a*d)*x) - (a^(1/4)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(sqrt(d)*sqrt((-b)*c + a*d)*x)]
@test_int [1/((a + b*x^2)^(3/4)*(c + d*x^2)), x, 5, (a^(1/4)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/((b*c - a*d)*x) + (a^(1/4)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/((b*c - a*d)*x)]
@test_int [1/((a + b*x^2)^(5/4)*(c + d*x^2)), x, 7, (2*sqrt(b)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(sqrt(a)*(b*c - a*d)*(a + b*x^2)^(1/4)) + (a^(1/4)*sqrt(d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(((-b)*c + a*d)^(3/2)*x) - (a^(1/4)*sqrt(d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(((-b)*c + a*d)^(3/2)*x)]
@test_int [1/((a + b*x^2)^(7/4)*(c + d*x^2)), x, 9, (2*b*x)/(3*a*(b*c - a*d)*(a + b*x^2)^(3/4)) + (2*sqrt(b)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(3*sqrt(a)*(b*c - a*d)*(a + b*x^2)^(3/4)) - (a^(1/4)*d*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/((b*c - a*d)^2*x) - (a^(1/4)*d*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/((b*c - a*d)^2*x)]
@test_int [1/((a + b*x^2)^(9/4)*(c + d*x^2)), x, 10, (2*b*x)/(5*a*(b*c - a*d)*(a + b*x^2)^(5/4)) + (2*sqrt(b)*(3*b*c - 8*a*d)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(5*a^(3/2)*(b*c - a*d)^2*(a + b*x^2)^(1/4)) + (a^(1/4)*d^(3/2)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(((-b)*c + a*d)^(5/2)*x) - (a^(1/4)*d^(3/2)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(((-b)*c + a*d)^(5/2)*x)]
@test_int [1/((a + b*x^2)^(11/4)*(c + d*x^2)), x, 10, (2*b*x)/(7*a*(b*c - a*d)*(a + b*x^2)^(7/4)) + (2*b*(5*b*c - 12*a*d)*x)/(21*a^2*(b*c - a*d)^2*(a + b*x^2)^(3/4)) + (2*sqrt(b)*(5*b*c - 12*a*d)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(21*a^(3/2)*(b*c - a*d)^2*(a + b*x^2)^(3/4)) + (a^(1/4)*d^2*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/((b*c - a*d)^3*x) + (a^(1/4)*d^2*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/((b*c - a*d)^3*x)]
@test_int [(a + b*x^2)^(7/4)/(c + d*x^2)^2, x, 9, (b*(5*b*c - a*d)*x)/(2*c*d^2*(a + b*x^2)^(1/4)) - ((b*c - a*d)*x*(a + b*x^2)^(3/4))/(2*c*d*(c + d*x^2)) - (sqrt(a)*sqrt(b)*(5*b*c - a*d)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(2*c*d^2*(a + b*x^2)^(1/4)) + (a^(1/4)*sqrt((-b)*c + a*d)*(5*b*c + 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d^(5/2)*x) - (a^(1/4)*sqrt((-b)*c + a*d)*(5*b*c + 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d^(5/2)*x)]
@test_int [(a + b*x^2)^(5/4)/(c + d*x^2)^2, x, 9, -(((b*c - a*d)*x*(a + b*x^2)^(1/4))/(2*c*d*(c + d*x^2))) + (sqrt(a)*sqrt(b)*(3*b*c + a*d)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(2*c*d^2*(a + b*x^2)^(3/4)) - (a^(1/4)*(3*b*c + 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d^2*x) - (a^(1/4)*(3*b*c + 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d^2*x)]
@test_int [(a + b*x^2)^(3/4)/(c + d*x^2)^2, x, 9, -((b*x)/(2*c*d*(a + b*x^2)^(1/4))) + (x*(a + b*x^2)^(3/4))/(2*c*(c + d*x^2)) + (sqrt(a)*sqrt(b)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(2*c*d*(a + b*x^2)^(1/4)) + (a^(1/4)*(b*c + 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d^(3/2)*sqrt((-b)*c + a*d)*x) - (a^(1/4)*(b*c + 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d^(3/2)*sqrt((-b)*c + a*d)*x)]
@test_int [(a + b*x^2)^(1/4)/(c + d*x^2)^2, x, 9, (x*(a + b*x^2)^(1/4))/(2*c*(c + d*x^2)) + (sqrt(a)*sqrt(b)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(2*c*d*(a + b*x^2)^(3/4)) - (a^(1/4)*(b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d*(b*c - a*d)*x) - (a^(1/4)*(b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*d*(b*c - a*d)*x)]
@test_int [1/((a + b*x^2)^(1/4)*(c + d*x^2)^2), x, 9, (b*x)/(2*c*(b*c - a*d)*(a + b*x^2)^(1/4)) - (d*x*(a + b*x^2)^(3/4))/(2*c*(b*c - a*d)*(c + d*x^2)) - (sqrt(a)*sqrt(b)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(2*c*(b*c - a*d)*(a + b*x^2)^(1/4)) - (a^(1/4)*(3*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*sqrt(d)*((-b)*c + a*d)^(3/2)*x) + (a^(1/4)*(3*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*sqrt(d)*((-b)*c + a*d)^(3/2)*x)]
@test_int [1/((a + b*x^2)^(3/4)*(c + d*x^2)^2), x, 9, -((d*x*(a + b*x^2)^(1/4))/(2*c*(b*c - a*d)*(c + d*x^2))) - (sqrt(a)*sqrt(b)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(2*c*(b*c - a*d)*(a + b*x^2)^(3/4)) + (a^(1/4)*(5*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*(b*c - a*d)^2*x) + (a^(1/4)*(5*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*(b*c - a*d)^2*x)]
@test_int [1/((a + b*x^2)^(5/4)*(c + d*x^2)^2), x, 10, -((d*x)/(2*c*(b*c - a*d)*(a + b*x^2)^(1/4)*(c + d*x^2))) + (sqrt(b)*(4*b*c + a*d)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(2*sqrt(a)*c*(b*c - a*d)^2*(a + b*x^2)^(1/4)) - (a^(1/4)*sqrt(d)*(7*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*((-b)*c + a*d)^(5/2)*x) + (a^(1/4)*sqrt(d)*(7*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*((-b)*c + a*d)^(5/2)*x)]
@test_int [1/((a + b*x^2)^(7/4)*(c + d*x^2)^2), x, 10, (b*(4*b*c + 3*a*d)*x)/(6*a*c*(b*c - a*d)^2*(a + b*x^2)^(3/4)) - (d*x)/(2*c*(b*c - a*d)*(a + b*x^2)^(3/4)*(c + d*x^2)) + (sqrt(b)*(4*b*c + 3*a*d)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(6*sqrt(a)*c*(b*c - a*d)^2*(a + b*x^2)^(3/4)) - (a^(1/4)*d*(9*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*(b*c - a*d)^3*x) - (a^(1/4)*d*(9*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*(b*c - a*d)^3*x)]
@test_int [1/((a + b*x^2)^(9/4)*(c + d*x^2)^2), x, 11, (b*(4*b*c + 5*a*d)*x)/(10*a*c*(b*c - a*d)^2*(a + b*x^2)^(5/4)) - (d*x)/(2*c*(b*c - a*d)*(a + b*x^2)^(5/4)*(c + d*x^2)) + (sqrt(b)*(12*b^2*c^2 - 52*a*b*c*d - 5*a^2*d^2)*(1 + (b*x^2)/a)^(1/4)*Elliptic.E((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(10*a^(3/2)*c*(b*c - a*d)^3*(a + b*x^2)^(1/4)) - (a^(1/4)*d^(3/2)*(11*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*((-b)*c + a*d)^(7/2)*x) + (a^(1/4)*d^(3/2)*(11*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*((-b)*c + a*d)^(7/2)*x)]
@test_int [1/((a + b*x^2)^(11/4)*(c + d*x^2)^2), x, 11, (b*(4*b*c + 7*a*d)*x)/(14*a*c*(b*c - a*d)^2*(a + b*x^2)^(7/4)) + (b*(20*b^2*c^2 - 76*a*b*c*d - 21*a^2*d^2)*x)/(42*a^2*c*(b*c - a*d)^3*(a + b*x^2)^(3/4)) - (d*x)/(2*c*(b*c - a*d)*(a + b*x^2)^(7/4)*(c + d*x^2)) + (sqrt(b)*(20*b^2*c^2 - 76*a*b*c*d - 21*a^2*d^2)*(1 + (b*x^2)/a)^(3/4)*Elliptic.F((1/2)*atan((sqrt(b)*x)/sqrt(a)), 2))/(42*a^(3/2)*c*(b*c - a*d)^3*(a + b*x^2)^(3/4)) + (a^(1/4)*d^2*(13*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi(-((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d)), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*(b*c - a*d)^4*x) + (a^(1/4)*d^2*(13*b*c - 2*a*d)*sqrt(-((b*x^2)/a))*Elliptic.Pi((sqrt(a)*sqrt(d))/sqrt((-b)*c + a*d), asin((a + b*x^2)^(1/4)/a^(1/4)), -1))/(4*c*(b*c - a*d)^4*x)]
#= ::Section::Closed:: =#
#=Integrands*of*the*form*(a+b*x^2)^p*(c+d*x^2)^q*with*p*symbolic=#
@test_int [(a + b*x^2)^p*(c + d*x^2)^q, x, 3, (x*(a + b*x^2)^p*(c + d*x^2)^q*AppellF1(1/2, -p, -q, 3/2, -((b*x^2)/a), -((d*x^2)/c)))/((1 + (b*x^2)/a)^p*(1 + (d*x^2)/c)^q)]
@test_int [(a + b*x^2)^p*(c + d*x^2)^3, x, 5, If(13>=8, (d*(15*a^2*d^2 - 8*a*b*c*d*(6 + p) + b^2*c^2*(57 + 28*p + 4*p^2))*x*(a + b*x^2)^(1 + p))/(b^3*(3 + 2*p)*(5 + 2*p)*(7 + 2*p)) - (d*(5*a*d - b*c*(11 + 2*p))*x*(a + b*x^2)^(1 + p)*(c + d*x^2))/(b^2*(5 + 2*p)*(7 + 2*p)) + (d*x*(a + b*x^2)^(1 + p)*(c + d*x^2)^2)/(b*(7 + 2*p)) - ((15*a^3*d^3 - 9*a^2*b*c*d^2*(7 + 2*p) + 3*a*b^2*c^2*d*(35 + 24*p + 4*p^2) - b^3*c^3*(105 + 142*p + 60*p^2 + 8*p^3))*x*(a + b*x^2)^p*HypergeometricFunctions._₂F₁(1/2, -p, 3/2, -((b*x^2)/a)))/((1 + (b*x^2)/a)^p*(b^3*(3 + 2*p)*(5 + 2*p)*(7 + 2*p))), (d*(15*a^2*d^2 - 8*a*b*c*d*(6 + p) + b^2*c^2*(57 + 28*p + 4*p^2))*x*(a + b*x^2)^(1 + p))/(b^3*(105 + 142*p + 60*p^2 + 8*p^3)) - (d*(5*a*d - b*c*(11 + 2*p))*x*(a + b*x^2)^(1 + p)*(c + d*x^2))/(b^2*(35 + 24*p + 4*p^2)) + (d*x*(a + b*x^2)^(1 + p)*(c + d*x^2)^2)/(b*(7 + 2*p)) - ((15*a^3*d^3 - 9*a^2*b*c*d^2*(7 + 2*p) + 3*a*b^2*c^2*d*(35 + 24*p + 4*p^2) - b^3*c^3*(105 + 142*p + 60*p^2 + 8*p^3))*x*(a + b*x^2)^p*HypergeometricFunctions._₂F₁(1/2, -p, 3/2, -((b*x^2)/a)))/(1 + (b*x^2)/a)^p/(b^3*(105 + 142*p + 60*p^2 + 8*p^3)))]
@test_int [(a + b*x^2)^p*(c + d*x^2)^2, x, 4, If(13>=8, -((d*(3*a*d - b*c*(7 + 2*p))*x*(a + b*x^2)^(1 + p))/(b^2*(3 + 2*p)*(5 + 2*p))) + (d*x*(a + b*x^2)^(1 + p)*(c + d*x^2))/(b*(5 + 2*p)) + ((3*a^2*d^2 - 2*a*b*c*d*(5 + 2*p) + b^2*c^2*(15 + 16*p + 4*p^2))*x*(a + b*x^2)^p*HypergeometricFunctions._₂F₁(1/2, -p, 3/2, -((b*x^2)/a)))/((1 + (b*x^2)/a)^p*(b^2*(3 + 2*p)*(5 + 2*p))), -((d*(3*a*d - b*c*(7 + 2*p))*x*(a + b*x^2)^(1 + p))/(b^2*(15 + 16*p + 4*p^2))) + (d*x*(a + b*x^2)^(1 + p)*(c + d*x^2))/(b*(5 + 2*p)) + ((3*a^2*d^2 - 2*a*b*c*d*(5 + 2*p) + b^2*c^2*(15 + 16*p + 4*p^2))*x*(a + b*x^2)^p*HypergeometricFunctions._₂F₁(1/2, -p, 3/2, -((b*x^2)/a)))/((1 + (b*x^2)/a)^p*(b^2*(15 + 16*p + 4*p^2))))]
@test_int [(a + b*x^2)^p*(c + d*x^2)^1, x, 3, (d*x*(a + b*x^2)^(1 + p))/(b*(3 + 2*p)) - ((a*d - b*c*(3 + 2*p))*x*(a + b*x^2)^p*HypergeometricFunctions._₂F₁(1/2, -p, 3/2, -((b*x^2)/a)))/((1 + (b*x^2)/a)^p*(b*(3 + 2*p)))]
@test_int [(a + b*x^2)^p*(c + d*x^2)^0, x, 2, (x*(a + b*x^2)^p*HypergeometricFunctions._₂F₁(1/2, -p, 3/2, -((b*x^2)/a)))/(1 + (b*x^2)/a)^p]
@test_int [(a + b*x^2)^p/(c + d*x^2)^1, x, 2, (x*(a + b*x^2)^p*AppellF1(1/2, -p, 1, 3/2, -((b*x^2)/a), -((d*x^2)/c)))/((1 + (b*x^2)/a)^p*c)]
@test_int [(a + b*x^2)^p/(c + d*x^2)^2, x, 2, (x*(a + b*x^2)^p*AppellF1(1/2, -p, 2, 3/2, -((b*x^2)/a), -((d*x^2)/c)))/((1 + (b*x^2)/a)^p*c^2)]
@test_int [(a + b*x^2)^p/(c + d*x^2)^3, x, 2, (x*(a + b*x^2)^p*AppellF1(1/2, -p, 3, 3/2, -((b*x^2)/a), -((d*x^2)/c)))/((1 + (b*x^2)/a)^p*c^3)]
@test_int [(a + b*x^2)^(-1 - (b*c)/(2*b*c - 2*a*d))*(c + d*x^2)^(-1 + (a*d)/(2*b*c - 2*a*d)), x, 1, (x*(c + d*x^2)^((a*d)/(2*b*c - 2*a*d)))/((a + b*x^2)^((b*c)/(2*b*c - 2*a*d))*(a*c))]
end
|
{"hexsha": "40bb70d08704467b2fddc7705dd36180cc356d95", "size": 126528, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/1 Algebraic functions/1.1 Binomial products/1.1.2 Quadratic/1.1.2.3 (a+b x^2)^p (c+d x^2)^q.jl", "max_stars_repo_name": "gronniger/RubiSymbolics.jl", "max_stars_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/1 Algebraic functions/1.1 Binomial products/1.1.2 Quadratic/1.1.2.3 (a+b x^2)^p (c+d x^2)^q.jl", "max_issues_repo_name": "gronniger/RubiSymbolics.jl", "max_issues_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/1 Algebraic functions/1.1 Binomial products/1.1.2 Quadratic/1.1.2.3 (a+b x^2)^p (c+d x^2)^q.jl", "max_forks_repo_name": "gronniger/RubiSymbolics.jl", "max_forks_repo_head_hexsha": "dd117985dea699be7ddf14c3e6bdf4f407bb8da6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 211.9396984925, "max_line_length": 1448, "alphanum_fraction": 0.4163189176, "num_tokens": 71072}
|
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, save_npz, load_npz
l = [[0, 1, 2],
[3, 0, 4],
[0, 0, 0]]
csr = csr_matrix(l)
csc = csc_matrix(l)
save_npz('data/temp/csr.npz', csr)
save_npz('data/temp/csc.npz', csc)
csr_ = load_npz('data/temp/csr.npz')
print(csr_.toarray())
# [[0 1 2]
# [3 0 4]
# [0 0 0]]
print(type(csr))
# <class 'scipy.sparse.csr.csr_matrix'>
csc_ = load_npz('data/temp/csc.npz')
print(csc_.toarray())
# [[0 1 2]
# [3 0 4]
# [0 0 0]]
print(type(csc))
# <class 'scipy.sparse.csc.csc_matrix'>
lil = lil_matrix(l)
# save_npz('data/temp/lil.npz', lil)
# NotImplementedError: Save is not implemented for sparse matrix of format lil.
npz = np.load('data/temp/csr.npz')
print(npz.files)
# ['indices', 'indptr', 'format', 'shape', 'data']
print(npz['data'])
# [1 2 3 4]
print(npz['indices'])
# [1 2 0 2]
print(npz['indptr'])
# [0 2 4 4]
print(npz['format'])
# b'csr'
print(npz['shape'])
# [3 3]
|
{"hexsha": "43cf33a2f2c819305357334e15d9f0fe8d75a5d4", "size": 969, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebook/scipy_sparse_save_load_npz.py", "max_stars_repo_name": "vhn0912/python-snippets", "max_stars_repo_head_hexsha": "80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 174, "max_stars_repo_stars_event_min_datetime": "2018-05-30T21:14:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T07:59:37.000Z", "max_issues_repo_path": "notebook/scipy_sparse_save_load_npz.py", "max_issues_repo_name": "vhn0912/python-snippets", "max_issues_repo_head_hexsha": "80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-08-10T03:22:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-12T20:31:17.000Z", "max_forks_repo_path": "notebook/scipy_sparse_save_load_npz.py", "max_forks_repo_name": "vhn0912/python-snippets", "max_forks_repo_head_hexsha": "80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 53, "max_forks_repo_forks_event_min_datetime": "2018-04-27T05:26:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:59:37.000Z", "avg_line_length": 17.3035714286, "max_line_length": 79, "alphanum_fraction": 0.6274509804, "include": true, "reason": "import numpy,from scipy", "num_tokens": 372}
|
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons
from matplotlib.colors import ListedColormap, BoundaryNorm
import functools
from math import floor
import numpy as np
from src.PathFindingAlgorithms import BFS, Dijkstra, Astar
class PathGUI():
def __init__(self, Map, pathFinderType='BFS'):
"""
Constructor to initialize GUI
arguments:
Map: 2D numpy array of a map
pathFinder: a path finding algorithm class
parameters:
path: list to store path return by path finding algorithms
clicked: true if mouse is clicked
inAxis: true if mouse is in the plotting area
"""
self.path = []
self.clicked = False
self.inAxis = False
"""
Visualization Settings
"""
self.fig, self.ax = plt.subplots(figsize=(10,5), dpi=200)
self.ax.tick_params(axis='both', bottom=False, left=False, labelbottom=False, labelleft=False)
self.colors = {'blank': (0, 'white'), 'Wall': (1, '#312F2F'), 'Start': (2, '#1C5D99'), 'End': (3, '#178C23'), 'Path': (4, 'grey'), 'Mud': (5, '#A39171')}
self.type = 'Start'
cmap = ListedColormap([self.colors[i][1] for i in self.colors])
bounds = BoundaryNorm([0, 1, 2, 3, 4, 5, 6], cmap.N)
cmap.set_under(color='w', alpha=0)
self.fig.subplots_adjust(left=0.2)
self.bkg = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.radio_axis = self.fig.add_axes([0.05, 0.4, 0.1, 0.2])
self.radio = RadioButtons(self.radio_axis, ('Start', 'End', 'Wall', 'Mud'))
self.radio.on_clicked(self.set_color)
self.fig.canvas.mpl_connect("motion_notify_event", self.on_move)
self.fig.canvas.mpl_connect("button_press_event", self.on_click)
self.fig.canvas.mpl_connect("button_release_event", self.on_release)
self.fig.canvas.mpl_connect("axes_enter_event", self.on_enter)
self.fig.canvas.mpl_connect("axes_leave_event", self.on_leave)
"""
Initial Map Settings
parameters:
pathFinder: path finding algorithm -> BFS is the default
start: location of starting node -> tuple
goal: location of goal node -> tuple
row: store height of map -> int (only used to fix bug when creating nodes at the top of the map)
"""
self.pathFinderType = pathFinderType
self.map = np.flip(np.array(Map), axis=0)
self.row = len(self.map[0]) - 1
self.col = len(self.map) - 1
self.start = (0, 0)
self.goal = (self.col, self.row)
self.map[self.start] = 2
self.map[self.goal] = 3
self.maze = self.ax.pcolormesh(self.map, edgecolors='black', norm=bounds, cmap=cmap)
self.pathLine, = self.ax.plot([], [], color='red')
self.scatter = self.ax.scatter([], [], c=[], cmap='viridis')
self.ax.axis('scaled')
def on_move(self, event):
if self.clicked and self.inAxis:
self.wallHandle(event)
def wallHandle(self, event):
tmp = (floor(event.ydata), floor(event.xdata))
if tmp[0] > self.col:
tmp = (self.col, tmp[1])
if event.button == 1:
if self.type == 'Start':
if not ((tmp == self.goal) or (self.map[tmp] == 1) or (self.map[tmp] == 2)):
self.map[self.start] = 0
self.start = tmp
self.map[self.start] = self.colors['Start'][0]
elif self.type == 'End':
if not ((tmp == self.start) or (self.map[tmp] == 1)):
self.map[self.goal] = 0
self.goal = tmp
self.map[self.goal] = self.colors['End'][0]
elif self.type == 'Wall':
if not (tmp == self.start or tmp == self.goal):
self.map[tmp] = self.colors['Wall'][0]
else:
if not (tmp == self.start or tmp == self.goal):
self.map[tmp] = self.colors['Mud'][0]
else:
if not (tmp == self.start or tmp == self.goal):
self.map[tmp] = 0
self.update()
def update(self):
if self.path:
x, y = zip(*self.path)
x = [i + 0.5 for i in x]
y = [i + 0.5 for i in y]
self.pathLine.set_data(y, x)
points = np.array([(i[1] + 0.5, i[0] + 0.5) for i in self.pathFinder.closedList])
# y = [i[1] + 0.5 for i in self.pathFinder.closedList]
costs = np.array([self.pathFinder.closedList[i][1] for i in self.pathFinder.closedList])
# print(costs)
self.scatter.set_offsets(points)
self.scatter.set_array(costs)
# self.visitedNodes.set_data(y, x)
self.maze.set_array(self.map.flatten())
# restore background
self.fig.canvas.restore_region(self.bkg)
# redraw just the points
self.ax.draw_artist(self.maze)
self.ax.draw_artist(self.pathLine)
self.ax.draw_artist(self.scatter)
# fill in the axes rectangle
self.fig.canvas.blit(self.ax.bbox)
# self.fig.canvas.draw()
def on_click(self, event):
if self.inAxis:
self.clicked = True
self.wallHandle(event)
def on_release(self, event):
self.clicked = False
if self.inAxis:
# for node in self.path:
# if not self.map[node] == 1:
# self.map[node] = 0
if self.pathFinderType == 'BFS':
self.pathFinder = BFS(self.map, self.start, self.goal)
elif self.pathFinderType == 'Dij':
self.pathFinder = Dijkstra(self.map, self.start, self.goal)
else:
self.pathFinder = Astar(self.map, self.start, self.goal)
self.path = self.pathFinder.solve()
if not self.path:
print('No Path Found!')
self.update()
def on_enter(self, event):
if event.inaxes == self.ax:
self.inAxis = True
def on_leave(self, event):
self.inAxis = False
def set_color(self, event):
self.type = event
|
{"hexsha": "54c609fd70c2b19f6792ee5258fbe4db750ae52f", "size": 6343, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/GUI.py", "max_stars_repo_name": "Lilweavs/PathFinding", "max_stars_repo_head_hexsha": "56266ab6958473449852b9b47784daded797430d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/GUI.py", "max_issues_repo_name": "Lilweavs/PathFinding", "max_issues_repo_head_hexsha": "56266ab6958473449852b9b47784daded797430d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GUI.py", "max_forks_repo_name": "Lilweavs/PathFinding", "max_forks_repo_head_hexsha": "56266ab6958473449852b9b47784daded797430d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9414634146, "max_line_length": 161, "alphanum_fraction": 0.548636292, "include": true, "reason": "import numpy", "num_tokens": 1530}
|
# Import all the useful libraries
import numpy as np
import pandas as pd
import fancyimpute
from sklearn import model_selection
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import AdaBoostClassifier # PROBABILITY
from sklearn.tree import DecisionTreeClassifier # PROBABILITY
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier # PROBABILITY
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier # PROBABILITY
from sklearn.linear_model import LogisticRegression # PROBABILITY
from sklearn.naive_bayes import GaussianNB # PROBABILITY
from sklearn.ensemble import ExtraTreesClassifier # PROBABILITY
from sklearn.neighbors import KNeighborsClassifier # PROBABILITY
from sklearn.ensemble import BaggingClassifier # PROBABILITY
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import TomekLinks
# MISSING PARTs
# 1) send the distribution (mean and std) of the data if requested (for example, how the two classes are distrubuted over the age of the population (or any other feature))
# 2) send other useful data ? ((if available) feature importance, decision_path)
# ...
# training data -> expected to be with all the listed features (IN ORDER -> like in the data we have). It is ok, if there are missing values
class LocalModel:
# local model functions
# train
# predict
# initialize the local model with the training data
def __init__(self, data, target_name, model_name="dt4", random_state=12345678, imputation_strategy='mice',
balance_strategy='SMOTE'):
# we train the model with all the available data
self.target_name = target_name ## it the name of the target column
self.target = None ## it is the target vector
self.data = data ## it is the complete dataset -> will be modified
self.original_data = data ## store a copy of the original data -> never modified
self.X = None ## it is the data except the target
self.features = None ## available features
# for cross-validation
self.cv_x = None # data -> in principle equal to self.X
self.cv_y = None # target -> in principle equal to self.target
self.random_state = random_state # random state -> fixed for testing
self.selected_model_name = 'dt4' # name of the model -> default fixed
self.selected_model = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=15,
min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None,
random_state=self.random_state, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False) ## default model
self.models = [] ## list of all the available models
self.chosen_model(model_name) # select the chosen model -> otherwise use the default one
self.fixDataset(imputation_strategy=imputation_strategy,
balance_strategy=balance_strategy) ## fix data set before training -> clean data (remove unused columns, convert categotical attributes into numerical), recover missing values (use a strategy to impute the missing values), balance the data set
# initiate the models
def models_definition(self, random_state):
## here we can tune the paramenters of the models
self.models.append(("ada1",
AdaBoostClassifier(DecisionTreeClassifier(max_depth=1, random_state=self.random_state),
algorithm="SAMME", n_estimators=200)))
self.models.append(("ada2",
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3, random_state=self.random_state),
algorithm="SAMME", n_estimators=200)))
self.models.append(("ada3",
AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, random_state=self.random_state),
algorithm="SAMME", n_estimators=100)))
self.models.append(("ada4",
AdaBoostClassifier(DecisionTreeClassifier(max_depth=10, random_state=self.random_state),
algorithm="SAMME", n_estimators=300)))
self.models.append(("ada5",
AdaBoostClassifier(DecisionTreeClassifier(max_depth=20, random_state=self.random_state),
algorithm="SAMME", n_estimators=100)))
self.models.append(("ada6", AdaBoostClassifier(
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=2,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, oob_score=False,
random_state=self.random_state, verbose=0, warm_start=False))))
self.models.append(("ada7", AdaBoostClassifier(
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=5,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, oob_score=False,
random_state=self.random_state, verbose=0, warm_start=False))))
self.models.append(("ada8", AdaBoostClassifier(
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=10,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1, oob_score=False,
random_state=self.random_state, verbose=0, warm_start=False))))
# self.model.append(RadiusNeighborsClassifier(radius=10.0, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski'))
self.models.append(("ridge1",
RidgeClassifier(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None,
tol=0.001, class_weight=None, solver='auto',
random_state=self.random_state)))
paramsGB1 = {'n_estimators': 120, 'max_depth': 3, 'subsample': 0.5, 'learning_rate': 0.01,
'min_samples_leaf': 1, 'random_state': self.random_state}
paramsGB2 = {'n_estimators': 120, 'max_depth': 6, 'subsample': 0.5, 'learning_rate': 0.05,
'min_samples_leaf': 1, 'random_state': self.random_state}
paramsGB3 = {'n_estimators': 60, 'max_depth': 15, 'subsample': 0.5, 'learning_rate': 0.01,
'min_samples_leaf': 1, 'random_state': self.random_state}
paramsGB4 = {'n_estimators': 320, 'max_depth': 10, 'subsample': 0.5, 'learning_rate': 0.005,
'min_samples_leaf': 1, 'random_state': self.random_state}
self.models.append(("gb1", GradientBoostingClassifier(**paramsGB1)))
self.models.append(("gb2", GradientBoostingClassifier(**paramsGB2)))
self.models.append(("gb3", GradientBoostingClassifier(**paramsGB3)))
self.models.append(("gb4", GradientBoostingClassifier(**paramsGB4)))
self.models.append(("dt1", DecisionTreeClassifier(random_state=self.random_state)))
self.models.append(("dt2",
DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=3, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None,
random_state=self.random_state, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False)))
self.models.append(("dt3",
DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=7, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None,
random_state=self.random_state, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False)))
self.models.append(("dt4",
DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=15, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None,
random_state=self.random_state, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False)))
self.models.append(("dt5", DecisionTreeClassifier(criterion='entropy', splitter='best', max_depth=None,
min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None,
random_state=self.random_state, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False)))
self.models.append(("rf1",
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=2,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=self.random_state, verbose=0,
warm_start=False)))
self.models.append(("rf2",
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=5,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=20, n_jobs=1,
oob_score=False, random_state=self.random_state, verbose=0,
warm_start=False)))
self.models.append(("rf3",
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=10,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=50, n_jobs=1,
oob_score=False, random_state=self.random_state, verbose=0,
warm_start=False)))
self.models.append(("ld1",
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)))
self.models.append(("lr1", LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True,
intercept_scaling=1, class_weight=None,
random_state=self.random_state, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1)))
self.models.append(("knn1",
KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2,
metric='minkowski', metric_params=None, n_jobs=1)))
self.models.append(("knn2",
KNeighborsClassifier(n_neighbors=10, weights='uniform', algorithm='auto', leaf_size=30, p=2,
metric='minkowski', metric_params=None, n_jobs=1)))
self.models.append(("knn3",
KNeighborsClassifier(n_neighbors=15, weights='uniform', algorithm='auto', leaf_size=30, p=2,
metric='minkowski', metric_params=None, n_jobs=1)))
self.models.append(("knn4",
KNeighborsClassifier(n_neighbors=20, weights='distance', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1)))
self.models.append(("knn5",
KNeighborsClassifier(n_neighbors=50, weights='distance', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1)))
self.models.append(("nb1", GaussianNB()))
self.models.append(("et1", ExtraTreesClassifier(n_estimators=50, random_state=self.random_state)))
self.models.append(("et2", ExtraTreesClassifier(n_estimators=100, random_state=self.random_state)))
self.models.append(("et3", ExtraTreesClassifier(n_estimators=200, random_state=self.random_state)))
self.models.append(("bag1",
BaggingClassifier(base_estimator=None, n_estimators=5, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False,
warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag2",
BaggingClassifier(base_estimator=None, n_estimators=10, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False,
warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag3",
BaggingClassifier(base_estimator=None, n_estimators=20, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False,
warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag4",
BaggingClassifier(base_estimator=None, n_estimators=50, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False,
warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag5",
BaggingClassifier(base_estimator=None, n_estimators=100, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False,
warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag6",
BaggingClassifier(base_estimator=None, n_estimators=150, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False,
warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag7",
BaggingClassifier(base_estimator=None, n_estimators=200, max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False, oob_score=False,
warm_start=False, n_jobs=1, random_state=self.random_state, verbose=0)))
self.models.append(("bag8", BaggingClassifier(
base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=2,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=self.random_state, verbose=0,
warm_start=False), n_estimators=200, max_samples=1.0,
max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1,
random_state=self.random_state, verbose=0)))
self.models.append(("bag9", BaggingClassifier(
base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=5,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=self.random_state, verbose=0,
warm_start=False), n_estimators=200, max_samples=1.0,
max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1,
random_state=self.random_state, verbose=0)))
self.models.append(("bag10", BaggingClassifier(
base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=10,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=self.random_state, verbose=0,
warm_start=False), n_estimators=200, max_samples=1.0,
max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1,
random_state=self.random_state, verbose=0)))
self.models.append(("bag11", BaggingClassifier(
base_estimator=RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=self.random_state, verbose=0,
warm_start=False), n_estimators=200, max_samples=1.0,
max_features=1.0, bootstrap=True, bootstrap_features=False, oob_score=False, warm_start=False, n_jobs=1,
random_state=self.random_state, verbose=0)))
## add other models ...
def chosen_model(self, name):
# initialize the available models
self.models_definition(self.random_state)
found = 0
for (n, i) in self.models: # n = name , i = model
if n == name and found == 0:
found = 1
self.selected_model = i
self.selected_model_name = name
if found == 0:
# feel free to modify the model.. if another is better
self.selected_model = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=15,
min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None,
random_state=self.random_state, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False)
self.selected_model_name = "dt4"
return
## to choose the best model using cross validation
## normally crossvalidate just the chosen model, if all_models = 1 -> crossvalidate all the models
def crossValidation(self, all_models=0, k_fold=10, random_state=12345678):
# cross validation
if all_models == 1:
print("begin cross validation for all models")
evaluation = []
counter = 1
numberOfModels = len(self.models)
# best = ("BEST", 0, 0)
for (name, i) in self.models:
print(round(counter / numberOfModels, 3), " is complete \t")
e = model_selection.cross_val_score(i, self.cv_x, self.cv_y,
cv=StratifiedKFold(n_splits=k_fold, random_state=random_state,
shuffle=True))
avg = round(np.average(e), 4) * 100
std = round(np.std(e), 4) * 100
evaluation.append((name, avg, std))
counter = counter + 1
evaluation.sort(key=lambda tup: tup[1], reverse=True)
df_cv = pd.DataFrame(evaluation)
print("end cross validation")
return df_cv
else:
e = model_selection.cross_val_score(self.selected_model, self.cv_x, self.cv_y,
cv=StratifiedKFold(n_splits=k_fold, random_state=random_state,
shuffle=True))
t = pd.DataFrame([(self.selected_model_name, round(np.average(e), 4) * 100, round(np.std(e), 4) * 100)])
return t
return
def showData(self, lines=5, original_data=0):
if original_data == 1:
print(self.original_data.head(lines))
else:
print(self.data.head(lines))
# remove unused features, convert categorical attributes to numerical ones
def cleanData(self):
print("START CLEANING")
# re-start from the orginial data
self.data = self.original_data
if 'Soggetti' in self.data.columns:
self.data = self.data.drop('Soggetti', axis=1)
if 'PCneg' in self.data.columns:
self.data = self.data.drop('PCneg', axis=1)
if 'IPG' in self.data.columns:
self.data = self.data.drop('IPG', axis=1)
if 'sbjBeatConsidered' in self.data.columns:
self.data = self.data.drop('sbjBeatConsidered', axis=1)
if 'numRRaveraged' in self.data.columns:
self.data = self.data.drop('numRRaveraged', axis=1)
# convert categorical variables into numerical
if 'patsex' in self.data.columns and (
"männlich" in self.data["patsex"].values or "weiblich" in self.data["patsex"].values):
self.data['patsex'] = self.data['patsex'].map({'männlich': 1, 'weiblich': 0})
if 'AFclass' in self.data.columns and (
"persistierend (>7 Tage, EKV)" in self.data["AFclass"].values or "paroxysmal" in self.data[
"AFclass"].values):
self.data["AFclass"] = self.data["AFclass"].map({'persistierend (>7 Tage, EKV)': 1, 'paroxysmal': 0})
# extract features
self.features = self.data.columns[self.data.columns != self.target_name]
self.X = self.data[self.features]
self.target = self.data[self.target_name]
print("END CLEANING")
# clean the test data -> first drop unused data -> make it "compliant" to the features of the dataset
def cleanDataTest(self, test_x, features="self_features"):
print("START TEST CLEANING")
print("TEST_X : ", test_x.shape)
print(test_x)
# convert categorical variables into numerical
if 'patsex' in test_x.columns and (
"männlich" in test_x["patsex"].values or "weiblich" in test_x["patsex"].values):
test_x['patsex'] = test_x['patsex'].map({'männlich': 1, 'weiblich': 0})
if str(features) == "self_features":
list_of_features = self.features
else:
list_of_features = features
# drop all the columns that are not present in the training dataset
for i in test_x.columns:
if i not in list_of_features:
test_x = test_x.drop(i, axis=1)
# add columns that are not present in the test set
for i in list_of_features:
if i not in test_x.columns:
test_x[i] = np.nan
## REORDER the features
test_x = test_x[list_of_features]
print("END TEST CLEANING")
print("DATA shape : ", self.data.shape)
return test_x
## data -> it is the dataset we want to 'recover'
def imputeData(self, dataframe, imputation_strategy='knn', features="self_features"):
try:
if imputation_strategy == 'knn':
x_complete_a = fancyimpute.KNN(15).complete(dataframe)
## feel free to add other imputation methods
# ...
else: ## default case -> MICE impute method
mice = fancyimpute.MICE(n_imputations=100, impute_type='col', n_nearest_columns=5)
x_complete_a = mice.complete(dataframe)
except:
x_complete_a = dataframe
print("x_incomplete shape : ", x_complete_a.shape)
if str(features) == "self_features":
f = self.features
else:
f = features
print("FEATURESS : ", f.size, f)
return pd.DataFrame(x_complete_a, columns=f)
def recoverMissing(self, data='trainData', imputation_strategy='mice'):
print("START RecoverMissing VALUES")
if str(data) == 'trainData':
x_incomplete = self.data[self.features]
else:
x_incomplete = data[self.features]
# print (x_incomplete)
# create a united dataset -> suppose it is possile -> if we clean first ->> then it is possible
if str(data) != 'trainData':
united_df = pd.concat([x_incomplete, self.X])
united_complete = self.imputeData(united_df, features=x_incomplete.columns)
x_complete = united_complete.iloc[:x_incomplete.shape[0], :x_incomplete.shape[1]]
# print ("united_complete shape : ",united_complete.shape )
else:
x_complete = self.imputeData(x_incomplete)
'''
try:
if imputation_strategy == 'knn':
x_complete_a = fancyimpute.KNN(15).complete(x_incomplete)
## feel free to add other imputation methods
# ...
else : ## default case -> MICE impute method
mice = fancyimpute.MICE(n_imputations=100, impute_type='col', n_nearest_columns=5)
x_complete_a = mice.complete(x_incomplete)
except:
x_complete_a = x_incomplete
x_complete = pd.DataFrame(x_complete_a, columns = self.features)
'''
if str(data) == 'trainData':
self.X = x_complete
return x_complete
def balanceDataSet(self, data="trainData", target_name="AFclass", balance_strategy='SMOTE'):
if str(data) == "trainData":
X = self.X
y = self.data[self.target_name].as_matrix()
target_name = self.target_name
else:
X = data[data.columns[data.columns != target_name]]
y = data[target_name].as_matrix()
y_new = pd.DataFrame(y)
y_new = y_new.rename(columns={y_new.columns[0]: target_name})
Data_complete = pd.concat([X, y_new], axis=1)
if balance_strategy == 'ADASYN':
try:
print("Try ADASYN")
X_resampled, y_resampled = ADASYN().fit_sample(X, y_new)
except:
print("ADASYN FAILED -> used SMOTE")
X_resampled, y_resampled = SMOTE().fit_sample(X, y_new)
## feel free to add other balancing strategies
# ...
else: # default SMOTE
X_resampled, y_resampled = SMOTE().fit_sample(X, y_new)
X_final = pd.DataFrame(X_resampled, columns=self.features)
Y_final = pd.DataFrame(y_resampled)
Y_final = Y_final.rename(columns={Y_final.columns[0]: self.target_name})
Data_final = pd.concat([X_final, Y_final], axis=1)
if str(data) == "trainData":
self.X = X_final
self.target = Y_final
self.cv_x = X_final
self.cv_y = Y_final
self.data = Data_final
return Data_final
# clean the data, recover missing values, balance the dataset
def fixDataset(self, imputation_strategy='mice', balance_strategy='SMOTE'):
print("begin fixing dataset")
self.cleanData()
self.recoverMissing(imputation_strategy=imputation_strategy)
self.balanceDataSet(balance_strategy=balance_strategy)
print("end fixing dataset")
# train the selected model
def train(self):
## use all the availble data -> we assume to know what is the best model -> otherwise use the crossvalidation function to choose a model
print("begin training")
self.selected_model.fit(self.X, self.target)
print("end training")
# predict using the trained model. x_test is a vector
# return the prediction for all values in the vector x_test, and all the other useful data (according to the selected_model used to predict)
def predict(self, test):
original_x = test
train = test.copy()
x_test = self.cleanDataTest(test_x=train)
x_test = self.recoverMissing(data=x_test)
result = x_test.copy()
prediction = self.selected_model.predict(x_test)
result['prediction'] = prediction
# decision_path = None
# features_importance = None
if callable(getattr(self.selected_model, "predict_proba")):
predict_proba_df = pd.DataFrame(self.selected_model.predict_proba(x_test),
columns=self.selected_model.classes_)
result['predict_proba_zero'] = predict_proba_df[predict_proba_df.columns[0]]
result['predict_proba_uno'] = predict_proba_df[predict_proba_df.columns[1]]
if callable(getattr(self.selected_model, "predict_log_proba")):
predict_log_proba_df = pd.DataFrame(self.selected_model.predict_log_proba(x_test),
columns=self.selected_model.classes_)
result['predict_log_proba_zero'] = predict_log_proba_df[predict_log_proba_df.columns[0]]
result['predict_log_proba_uno'] = predict_log_proba_df[predict_log_proba_df.columns[1]]
return pd.DataFrame(result)
|
{"hexsha": "f9469d662c8a394418e1281849c9338717fd6652", "size": 33289, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/LocalModel.py", "max_stars_repo_name": "hades208002/mdp-project", "max_stars_repo_head_hexsha": "c242a8d00412cc3772d298986977f6acc47002ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/LocalModel.py", "max_issues_repo_name": "hades208002/mdp-project", "max_issues_repo_head_hexsha": "c242a8d00412cc3772d298986977f6acc47002ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/LocalModel.py", "max_forks_repo_name": "hades208002/mdp-project", "max_forks_repo_head_hexsha": "c242a8d00412cc3772d298986977f6acc47002ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.1669829222, "max_line_length": 268, "alphanum_fraction": 0.5736129052, "include": true, "reason": "import numpy", "num_tokens": 6656}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 08:15:32 2018
Computes the global extrema metrics for regridded and reference target data
@author: jeguerra
"""
import numpy as np
def computeGlobalExtremaMetrics(varS2T, varST):
Lden = (np.max(abs(varST)) - np.min(abs(varST)))
L_min = min(np.amin(varS2T) - np.amin(varST), 0.0) / \
Lden # < 0 indicates failure
L_max = max(np.amax(varS2T) - np.amax(varST), 0.0) / \
Lden # > 0 indicates failure
return L_min, L_max
|
{"hexsha": "d0819aef474400c99d08b41c868c60d12b9ce2e8", "size": 530, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/computeGlobalExtremaMetrics.py", "max_stars_repo_name": "CANGA/MIRA", "max_stars_repo_head_hexsha": "2f1214d34b884790fa8660b5208cd12495800f92", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-23T20:28:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-12T15:09:49.000Z", "max_issues_repo_path": "src/computeGlobalExtremaMetrics.py", "max_issues_repo_name": "CANGA/Remapping-Intercomparison", "max_issues_repo_head_hexsha": "2f1214d34b884790fa8660b5208cd12495800f92", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-03-18T17:08:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-15T21:09:25.000Z", "max_forks_repo_path": "src/computeGlobalExtremaMetrics.py", "max_forks_repo_name": "CANGA/Remapping-Intercomparison", "max_forks_repo_head_hexsha": "2f1214d34b884790fa8660b5208cd12495800f92", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0434782609, "max_line_length": 75, "alphanum_fraction": 0.6452830189, "include": true, "reason": "import numpy", "num_tokens": 177}
|
import pickle
import numpy as np
import pandas as pd
from config.config import DATA_PATH, VECTORIZER_PATH
from src.data.preprocess_data import preprocess_data
from src.features.vectorize import split_dataset, fit_vectorizer, transform_vectorizer
from sklearn.metrics import f1_score, classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
def multinomial_nv_clf(X_train_vec, X_test_vec, y_train, y_test):
clf = OneVsRestClassifier(MultinomialNB())
clf.fit(X_train_vec, y_train)
y_pred = clf.predict(X_test_vec)
score = f1_score(y_test, y_pred, average="macro", labels=np.unique(y_pred))
clf_rep = classification_report(y_test, y_pred)
return clf, score, clf_rep
if __name__ == "__main__":
df = pd.read_csv(DATA_PATH, header=None)
df = preprocess_data(df)
X_train, X_test, y_train, y_test = split_dataset(df)
vect = fit_vectorizer(X_train)
X_train_vec = transform_vectorizer(vectorizer=vect, data=X_train)
X_test_vec = transform_vectorizer(vectorizer=vect, data=X_test)
clf, score, clf_rep = multinomial_nv_clf(X_train_vec, X_test_vec, y_train, y_test)
input = ["I love apples"]
vec = transform_vectorizer(vectorizer=vect, data=input)
pred = clf.predict_proba(vec)
print(list(enumerate(pred.ravel().tolist())))
|
{"hexsha": "75d24f29f694fb9a2e8e15a8122ef8bbb48f33c0", "size": 1349, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/baseline.py", "max_stars_repo_name": "silencedsre/Fusemachines-AI-Training", "max_stars_repo_head_hexsha": "46ae5be0895fee66ef8a37f1da40a6d3a4bfde5a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/baseline.py", "max_issues_repo_name": "silencedsre/Fusemachines-AI-Training", "max_issues_repo_head_hexsha": "46ae5be0895fee66ef8a37f1da40a6d3a4bfde5a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/baseline.py", "max_forks_repo_name": "silencedsre/Fusemachines-AI-Training", "max_forks_repo_head_hexsha": "46ae5be0895fee66ef8a37f1da40a6d3a4bfde5a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4722222222, "max_line_length": 86, "alphanum_fraction": 0.7709414381, "include": true, "reason": "import numpy", "num_tokens": 338}
|
from skimage import io as sio
import skimage
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def reverse_jet(img):
img = skimage.img_as_float(sio.imread(img))
jet = plt.cm.jet
jet._init()
lut = jet._lut[..., :3]
z = img - lut[:, None, None, :]
z *= z
d = z.sum(axis=-1)
out = d.argmin(axis=0)
return out
if __name__ == '__main__':
url = 'im2.tif'
img = plt.imread(url)
out = reverse_jet(url)
f, (ax0, ax1, ax2) = plt.subplots(1, 3)
ax0.imshow(img)
ax1.imshow(skimage.color.rgb2grey(img), cmap=plt.cm.gray)
ax2.imshow(out, cmap=plt.cm.gray)
ax0.set_title("original")
ax1.set_title("grayscale")
ax2.set_title("reversed jet")
for ax in [ax0, ax1, ax2]:
ax.axis('off')
plt.show()
|
{"hexsha": "39f6f1fe5bc9be97730b53ab69c6a6fe24660b0d", "size": 796, "ext": "py", "lang": "Python", "max_stars_repo_path": "reverseJet/reverseJet.py", "max_stars_repo_name": "ajpfahnl/labUtils", "max_stars_repo_head_hexsha": "11130f08100809fb9820f6771cb0666e4908a116", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-15T03:29:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-14T10:45:30.000Z", "max_issues_repo_path": "reverseJet/reverseJet.py", "max_issues_repo_name": "ajpfahnl/labUtils", "max_issues_repo_head_hexsha": "11130f08100809fb9820f6771cb0666e4908a116", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reverseJet/reverseJet.py", "max_forks_repo_name": "ajpfahnl/labUtils", "max_forks_repo_head_hexsha": "11130f08100809fb9820f6771cb0666e4908a116", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-04T05:58:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-04T05:58:00.000Z", "avg_line_length": 20.4102564103, "max_line_length": 61, "alphanum_fraction": 0.6118090452, "include": true, "reason": "import numpy", "num_tokens": 242}
|
[STATEMENT]
lemma step_4_push_big_size_ok_1: "\<lbrakk>
invar (States dir big small);
4 \<le> remaining_steps (States dir big small);
(step^^4) (States dir (Big.push x big) small) = States dir' big' small';
remaining_steps (States dir big small) + 1 \<le> 4 * size small
\<rbrakk> \<Longrightarrow> remaining_steps (States dir' big' small') + 1 \<le> 4 * size small'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>invar (States dir big small); 4 \<le> remaining_steps (States dir big small); (step ^^ 4) (States dir (Big.push x big) small) = States dir' big' small'; remaining_steps (States dir big small) + 1 \<le> 4 * size small\<rbrakk> \<Longrightarrow> remaining_steps (States dir' big' small') + 1 \<le> 4 * size small'
[PROOF STEP]
by (smt (verit, ccfv_SIG) Nat.le_diff_conv2 add_leD2 invar_push_big le_add1 le_add_diff_inverse2 remaining_steps_n_steps_sub remaining_steps_push_big step_n_size_small)
|
{"llama_tokens": 351, "file": "Real_Time_Deque_States_Proof", "length": 1}
|
using Documenter, LearnBase, LossFunctions
istravis = "TRAVIS" ∈ keys(ENV)
makedocs(
format = Documenter.HTML(assets=["assets/style.css","assets/favicon.ico"], prettyurls=istravis),
sitename = "LossFunctions.jl",
authors = "Christof Stocker, Tom Breloff, Alex Williams",
pages = [
hide("Home" => "index.md"),
"Introduction" => [
"introduction/gettingstarted.md",
"introduction/motivation.md",
],
"User's Guide" => [
"user/interface.md",
"user/aggregate.md",
],
"Available Losses" => [
"losses/distance.md",
"losses/margin.md",
"losses/other.md",
],
"Advances Topics" => [
"advanced/extend.md",
"advanced/developer.md",
],
hide("Indices" => "indices.md"),
"acknowledgements.md",
"LICENSE.md",
],
)
deploydocs(repo="github.com/JuliaML/LossFunctions.jl.git")
|
{"hexsha": "c3033084d7f6f172ad19ea6512b779f207585f5a", "size": 983, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "Evizero/MLModels.jl", "max_stars_repo_head_hexsha": "521ff95e5514329b7c98260df99cfde25d08d3b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 103, "max_stars_repo_stars_event_min_datetime": "2016-11-03T06:52:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T16:45:19.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "JuliaML/LossFunctions.jl", "max_issues_repo_head_hexsha": "cd5f5a493ced706825489de5e5cae60492b8fc2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 91, "max_issues_repo_issues_event_min_datetime": "2016-10-17T21:39:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-03T00:38:47.000Z", "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "Evizero/MLModels.jl", "max_forks_repo_head_hexsha": "521ff95e5514329b7c98260df99cfde25d08d3b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2016-10-17T11:10:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T02:21:29.000Z", "avg_line_length": 28.0857142857, "max_line_length": 100, "alphanum_fraction": 0.5422177009, "num_tokens": 238}
|
import numpy as np
import nest
import matplotlib.pyplot as plt
import pylab as pl
###################################################################################
# Parameters
###################################################################################
# General simulation parameters
dt = 0.1 # simulation resolution (ms)
MSP_update_interval = 100 # update interval for MSP (ms)
# Parameters for asynchronous irregular firing
g = 12.0 # ratio between maximum amplitude of EPSP and EPSP
eta = 1.5 # ratio between external rate and external frequency needed for the mean input to reach threshold in absence of feedback
eps = 0.1 # connection probability for static connections (all but EE)
order = 100 # order of network size
NE = 4*order # number of excitatory neurons
NI = 1*order # number of inhibitory neurons
N = NE+NI # total number of neurons
CE = int(eps*NE) # number of incoming excitatory synapses per inhibitory neuron
CI = int(eps*NI) # number of incominb inhibitory synapses per neuron
# Growth
growth_time = 20000. # growth time (ms)
cicles = 10 # cicles for recording during growth
growth_step = growth_time/cicles
# Stimulation
stimulation_time = 20000. # stimulation time (ms)
stimulation_cicles = 10 # cicles for recording during stimulation
stimulation_strength = 1.1 # modulation of external input firing rate during stimulation
stimulated_fraction = 0.1 # fraction of excitatory neurons stimulated
stimulated_pop = int(stimulated_fraction*NE)
stimulation_end = growth_time + stimulation_time
stimulation_step = stimulation_time / stimulation_cicles
# Post stimulation
post_stimulation_time = 20000. # time post stimulation (ms)
post_stimulation_cicles = 10 # cicles for recording post stimulation
post_stimulation_end = stimulation_end + post_stimulation_time
post_stimulation_step = post_stimulation_time / post_stimulation_cicles
# Decay
decay_time = 40000. # time after post stimulation period (ms)
decay_cicles = 10 # cicles for recording after post stimulation period
decay_end = post_stimulation_end + decay_time
decay_step = decay_time / decay_cicles
# Parameters of the integrate and fire neuron
neuron_model = "iaf_psc_delta"
CMem = 250.0 # membrane capacitance (pF)
tauMem = 20.0 # membrane time constant (ms)
theta = 20.0 # spike threshold (mV)
t_ref = 2. # refractory period (ms)
E_L = 0. # resting membrane potential (mV)
V_reset = 10. # reset potential of the membrane (mV)
V_m = 0. # initial membrane potential (mV)
tau_Ca = 1000. # time constant for calcium trace (ms)
beta_Ca = 1./tau_Ca # increment on calcium trace per spike (1/ms)
J = 0.1 # postsynaptic amplitude in mV
delay = 1. # synaptic delay (ms)
neuron_params = {
"C_m" : CMem,
"tau_m" : tauMem,
"t_ref" : t_ref,
"E_L" : E_L,
"V_reset" : V_reset,
"V_m" : V_m,
"beta_Ca" : beta_Ca,
"tau_Ca" : tau_Ca,
"V_th" : theta
}
# External input rate
nu_th = theta/(J*CE*tauMem)
nu_ex = eta*nu_th
rate = 1000.0*nu_ex*CE
# Parameter for structural plasticity
growth_curve = "linear" # type of growth curve for synaptic elements
z0 = 1. # initial number of synaptic elements
slope = -0.5 # slope of growth curve for synaptic elements
synapse_model = "static_synapse" # plastic EE synapse type
###################################################################################
# Estimation of target rate for 10% connection probability
###################################################################################
simtime = 10000.
rectime = 8000.
nest.ResetKernel()
nest.SetDefaults(neuron_model, neuron_params)
pop_exc = nest.Create(neuron_model, NE)
pop_inh = nest.Create(neuron_model, NI)
poisson_generator = nest.Create('poisson_generator',params={'rate':rate})
spike_detector = nest.Create('spike_detector',params={'start':simtime-rectime})
nest.Connect(pop_exc, pop_exc+pop_inh,{'rule': 'fixed_indegree','indegree': CE},syn_spec={"weight":J, "delay":delay})
nest.Connect(pop_inh, pop_exc+pop_inh,{'rule': 'fixed_indegree','indegree': CI},syn_spec={"weight":-g*J, "delay":delay})
nest.Connect(poisson_generator, pop_exc+pop_inh,'all_to_all',syn_spec={"weight":J, "delay":delay})
nest.Connect(pop_exc, spike_detector,'all_to_all')
nest.Simulate(simtime)
target_rate = nest.GetStatus(spike_detector,'n_events')[0]/rectime/NE # target rate of excitatory neurons (/ms)
###################################################################################
# Simulation setup
###################################################################################
# Set Kernel
nest.ResetKernel()
nest.EnableStructuralPlasticity()
nest.SetKernelStatus({"resolution" : dt,
"print_time" : True,
"structural_plasticity_update_interval" : int(MSP_update_interval/dt) # update interval for MSP in time steps
})
# Set model defaults
nest.SetDefaults(neuron_model, neuron_params)
nest.CopyModel(neuron_model, 'excitatory')
nest.CopyModel(neuron_model, 'inhibitory')
nest.CopyModel("static_synapse","device",{"weight":J, "delay":delay})
nest.CopyModel("static_synapse","inhibitory_synapse",{"weight":-g*J, "delay":delay})
nest.CopyModel("static_synapse","EI_synapse",{"weight":J, "delay":delay})
nest.CopyModel(synapse_model, 'msp_excitatory')
nest.SetDefaults('msp_excitatory',{'weight': J,'delay': delay})
# Assign synaptic elements with growth curve to excitatory neuron model
gc_den = {'growth_curve': growth_curve, 'z': z0, 'growth_rate': -slope*target_rate, 'eps': target_rate, 'continuous': False}
gc_axon = {'growth_curve': growth_curve, 'z': z0, 'growth_rate': -slope*target_rate, 'eps': target_rate, 'continuous': False}
nest.SetDefaults('excitatory', 'synaptic_elements', {'Axon_exc': gc_axon, 'Den_exc': gc_den})
# Use SetKernelStatus to activate the plastic synapses
nest.SetKernelStatus({
'structural_plasticity_synapses': {
'syn1': {
'model': 'msp_excitatory',
'post_synaptic_element': 'Den_exc',
'pre_synaptic_element': 'Axon_exc',
}
},
'autapses': False,
})
# Create nodes
pop_exc = nest.Create('excitatory', NE)
pop_inh = nest.Create('inhibitory', NI)
poisson_generator_ex = nest.Create('poisson_generator',2)
poisson_generator_inh = nest.Create('poisson_generator')
spike_detector = nest.Create("spike_detector")
nest.SetStatus(poisson_generator_ex, {"rate": rate})
nest.SetStatus(poisson_generator_inh, {"rate": rate})
nest.SetStatus(spike_detector,{"withtime": True, "withgid": True})
# Connect nodes
nest.Connect(pop_exc, pop_inh,{'rule': 'fixed_indegree','indegree': CE},'EI_synapse')
nest.Connect(pop_inh, pop_exc+pop_inh,{'rule': 'fixed_indegree','indegree': CI},'inhibitory_synapse')
nest.Connect([poisson_generator_ex[0]], pop_exc[:stimulated_pop],'all_to_all', model="device")
nest.Connect([poisson_generator_ex[1]], pop_exc[stimulated_pop:],'all_to_all', model="device")
nest.Connect(poisson_generator_inh, pop_inh,'all_to_all',model="device")
nest.Connect(pop_exc+pop_inh, spike_detector,'all_to_all',model="device")
def simulate_cicle(growth_steps,global_index):
step = np.diff(growth_steps)[0]
for simulation_time in growth_steps:
nest.Simulate(step)
local_connections = nest.GetConnections(pop_exc, pop_exc)
sources = np.array(nest.GetStatus(local_connections,'source'))
targets = np.array(nest.GetStatus(local_connections,'target'))
matrix = np.zeros((NE,NE))
for ii in np.arange(sources.shape[0]):
matrix[targets[ii]-1,sources[ii]-1] += 1
connectivity[0,0,global_index] = np.mean(matrix[:stimulated_pop,:stimulated_pop])
connectivity[0,1,global_index] = np.mean(matrix[:stimulated_pop,stimulated_pop:])
connectivity[1,0,global_index] = np.mean(matrix[stimulated_pop:,:stimulated_pop])
connectivity[1,1,global_index] = np.mean(matrix[stimulated_pop:,stimulated_pop:])
events = nest.GetStatus(spike_detector,'events')[0]
times = events['times']
senders = events['senders']
spike_count = np.histogram(senders,bins=np.array([0,stimulated_pop,NE,N]))[0]
firing_rate[:,global_index] = spike_count/np.array([stimulated_pop,NE-stimulated_pop,NI]).astype(float)/step*1000.
nest.SetStatus(spike_detector,'n_events',0)
global_index += 1
return matrix,global_index
# Create time steps and initialize recording arrays
growth_steps = np.arange(growth_step,growth_time+1,growth_step)
stimulation_steps = np.arange(growth_time+stimulation_step, stimulation_end+1, stimulation_step)
post_stimulation_steps = np.arange(stimulation_end+post_stimulation_step, post_stimulation_end+1, post_stimulation_step)
decay_steps = np.arange(post_stimulation_end+decay_step, decay_end+1, decay_step)
all_steps = np.concatenate(([0],growth_steps,stimulation_steps,post_stimulation_steps,decay_steps))
connectivity = np.zeros((2,2,all_steps.shape[0]))
firing_rate = np.zeros((3,all_steps.shape[0]))
global_index = 1
###################################################################################
# Simulate
###################################################################################
# Grow network
matrix_before,global_index = simulate_cicle(growth_steps,global_index)
# Stimulate
nest.SetStatus([poisson_generator_ex[0]], {"rate": rate*stimulation_strength})
matrix,global_index = simulate_cicle(stimulation_steps,global_index)
# Post stimulation
nest.SetStatus([poisson_generator_ex[0]], {"rate": rate})
matrix_post,global_index = simulate_cicle(post_stimulation_steps,global_index)
# Decay
matrix,global_index = simulate_cicle(decay_steps,global_index)
###################################################################################
# Plotting
###################################################################################
all_steps /= 1000.
fig = plt.figure(figsize=(10,7))
matrix_before = matrix_before[:int(NE/2),:int(NE/2)] # plot matrix only for half of the excitatory population
matrix_post = matrix_post[:int(NE/2),:int(NE/2)] # plot matrix only for half of the excitatory population
max1 = np.max(matrix_before)
max2 = np.max(matrix_post)
max_syn = max(max1,max2)+1
cmap = pl.cm.get_cmap('CMRmap_r',max_syn)
ax = fig.add_subplot(2,2,1)
cax = ax.imshow(matrix_before,cmap=cmap,interpolation="nearest")
cax.set_clim(-0.5,max_syn-0.5)
ax.set_xlabel("Pre")
ax.set_ylabel("Post")
ax.set_title("Time = %.f s" %(growth_steps[-1]/1000.))
ax = fig.add_subplot(2,2,2)
cax = ax.imshow(matrix_post,cmap=cmap,interpolation="nearest")
cbar = fig.colorbar(cax,ticks=np.arange(0,max_syn,1))
cbar.set_label("Number synapses")
cax.set_clim(-0.5,max_syn-0.5)
ax.set_title("Time = %.f s" %(post_stimulation_steps[-1]/1000.))
ax = fig.add_subplot(2,2,3)
ax.plot(all_steps,connectivity[0,0,:],'g',label=r'S$\to$S')
ax.plot(all_steps,connectivity[0,1,:],'orange',label=r'E$\to$S')
ax.plot(all_steps,connectivity[1,0,:],'gray',label=r'S$\to$E')
ax.plot(all_steps,connectivity[1,1,:],'b',label=r'E$\to$E')
ax.legend(loc=2)
ax.set_ylabel("Connection probability")
ax.set_xlabel("Time (s)")
ax = fig.add_subplot(2,2,4)
ax.plot(all_steps,firing_rate[0,:],'g',label='S')
ax.plot(all_steps,firing_rate[1,:],'b',label='E')
ax.plot(all_steps,firing_rate[2,:],'r',label='I')
ax.legend(loc=4)
ax.set_ylabel("Population rate (Hz)")
ax.set_xlabel("Time (s)")
fig.savefig('figure.pdf',format='pdf')
|
{"hexsha": "9eb6ddce0b01cf85243f781c8a4a27c063dae299", "size": 12389, "ext": "py", "lang": "Python", "max_stars_repo_path": "associtive_properties.py", "max_stars_repo_name": "juliavg/assembly-structural-plasticity", "max_stars_repo_head_hexsha": "d00fc61fc6fea625f1f133efbc5847e8c0cc0341", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "associtive_properties.py", "max_issues_repo_name": "juliavg/assembly-structural-plasticity", "max_issues_repo_head_hexsha": "d00fc61fc6fea625f1f133efbc5847e8c0cc0341", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "associtive_properties.py", "max_forks_repo_name": "juliavg/assembly-structural-plasticity", "max_forks_repo_head_hexsha": "d00fc61fc6fea625f1f133efbc5847e8c0cc0341", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.725631769, "max_line_length": 148, "alphanum_fraction": 0.6298329163, "include": true, "reason": "import numpy", "num_tokens": 3025}
|
[STATEMENT]
lemma transpose_mat_plus: assumes wf: "mat nr nc m1" "mat nr nc m2"
shows "transpose nr (mat_plusI pl m1 m2) = mat_plusI pl (transpose nr m1) (transpose nr m2)" (is "?l = ?r")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2)
[PROOF STEP]
proof (rule mat_eqI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mat ?nr ?nc (Matrix_Legacy.transpose nr (mat_plusI pl m1 m2))
2. mat ?nr ?nc (mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2))
3. \<And>i j. \<lbrakk>i < ?nc; j < ?nr\<rbrakk> \<Longrightarrow> Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) ! i ! j = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2) ! i ! j
[PROOF STEP]
fix i j
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. mat ?nr ?nc (Matrix_Legacy.transpose nr (mat_plusI pl m1 m2))
2. mat ?nr ?nc (mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2))
3. \<And>i j. \<lbrakk>i < ?nc; j < ?nr\<rbrakk> \<Longrightarrow> Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) ! i ! j = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2) ! i ! j
[PROOF STEP]
assume i: "i < nr" and j: "j < nc"
[PROOF STATE]
proof (state)
this:
i < nr
j < nc
goal (3 subgoals):
1. mat ?nr ?nc (Matrix_Legacy.transpose nr (mat_plusI pl m1 m2))
2. mat ?nr ?nc (mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2))
3. \<And>i j. \<lbrakk>i < ?nc; j < ?nr\<rbrakk> \<Longrightarrow> Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) ! i ! j = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2) ! i ! j
[PROOF STEP]
note [simp] = transpose_index[OF _ this] mat_plus_index[OF _ _ j i] mat_plus_index[OF _ _ this]
[PROOF STATE]
proof (state)
this:
mat nr nc ?m \<Longrightarrow> Matrix_Legacy.transpose nr ?m ! i ! j = ?m ! j ! i
\<lbrakk>mat nr nc ?m1.0; mat nr nc ?m2.0\<rbrakk> \<Longrightarrow> mat_plusI ?pl ?m1.0 ?m2.0 ! j ! i = ?pl (?m1.0 ! j ! i) (?m2.0 ! j ! i)
\<lbrakk>mat nc nr ?m1.0; mat nc nr ?m2.0\<rbrakk> \<Longrightarrow> mat_plusI ?pl ?m1.0 ?m2.0 ! i ! j = ?pl (?m1.0 ! i ! j) (?m2.0 ! i ! j)
goal (3 subgoals):
1. mat ?nr ?nc (Matrix_Legacy.transpose nr (mat_plusI pl m1 m2))
2. mat ?nr ?nc (mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2))
3. \<And>i j. \<lbrakk>i < ?nc; j < ?nr\<rbrakk> \<Longrightarrow> Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) ! i ! j = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2) ! i ! j
[PROOF STEP]
show "?l ! i ! j = ?r ! i ! j"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) ! i ! j = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2) ! i ! j
[PROOF STEP]
using wf
[PROOF STATE]
proof (prove)
using this:
mat nr nc m1
mat nr nc m2
goal (1 subgoal):
1. Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) ! i ! j = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2) ! i ! j
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Matrix_Legacy.transpose nr (mat_plusI pl m1 m2) ! i ! j = mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2) ! i ! j
goal (2 subgoals):
1. mat nc nr (Matrix_Legacy.transpose nr (mat_plusI pl m1 m2))
2. mat nc nr (mat_plusI pl (Matrix_Legacy.transpose nr m1) (Matrix_Legacy.transpose nr m2))
[PROOF STEP]
qed (auto intro: wf)
|
{"llama_tokens": 1558, "file": "Matrix_Matrix_Legacy", "length": 8}
|
from numba import jit
import numpy as np
from numpy import pi
import matplotlib.pyplot as plt
import time
# General Constants
MASS_E = 9.10938291e-31 # electron mass
MASS_P = 1.67262178e-27 # proton mass
STEF_BOLT = 5.670373e-8 # Stefan-Boltzmann constant
K_BOLT = 1.381e-23 # Boltzmann constant
C = 299792458 # speed of light
G = 6.673e-11 # Gravitation constant
HBAR = 1.054571817e-34
# Astronomy constants
RAD_CONST= (4*STEF_BOLT)/C # radiation constant
X = 0.73 # Hydrogen mass fraction
Y = 0.25 # Helium mass fraction
Z = 0.02 # Metals mass fraction)k
GAMMA = 5/3 # adiabatic constant
M_SUN = 1.989e30 # Kg
R_SUN = 6.955e8 # meters
L_SUN = 3.827e26 # watts
ION_GAS_APPROX = (2 * X + 0.75 * Y + 0.5 * Z) ** -1 # approximation for ionized gas? u = 2X + 0.75Y + 0.5Z
KAPPA = 1
K = 1.38e-23
pi = np.pi
mu = (2.0 * X + 0.75 * Y + 0.5 * Z) ** (-1.0)
r0 = 0.001 # m
S = 1.0 # error tolerance
# Pressures
@jit(nopython=True)
def P(density, temp):
# Pressure degenerate
# Eqn 5 in the project_description
P_deg = (pow((3.0 * pi ** 2.0), (2/3)) * pow(HBAR, 2) * pow(density / MASS_P, 5/3)) / (5.0 * MASS_E)
# Pressure Ideal Gas
P_ig = (K * temp * density) / (mu * MASS_P)
# Pressure Radiative
P_rad = (1.0 / 3.0) * RAD_CONST * (temp ** 4.0)
return P_deg + P_ig + P_rad
# Pressure differentials
@jit(nopython=True)
def dPdp(density, temp):
# See note above
# Degenerate
dPdp_deg = (pow((3.0 * pi ** 2.0), (2/3)) * pow(HBAR, 2) * ((density / MASS_P) ** (2.0 / 3.0))) / (
3.0 * MASS_P * MASS_E)
# Ideal Gas
dPdp_ig = (K * temp) / (mu * MASS_P)
return dPdp_deg + dPdp_ig
@jit(nopython=True)
def dPdT(density, temp):
# See note above
dPdT_ig = (density * K) / (mu * MASS_P)
dPdT_rad = (4.0 / 3.0) * RAD_CONST * (temp ** 3.0)
return dPdT_ig + dPdT_rad
# Energy generation
@jit(nopython=True)
def epsilon(density, temp):
# Value for epsilon, used often below
epp = (1.07e-7) * (density / 1.0e5) * (X ** 2.0) * ((temp / 1.0e6) ** 4.0)
ecno = (8.24e-26) * (density / 1.0e5) * 0.03 * (X ** 2.0) * ((temp / 1.0e6) ** 19.9)
return epp + ecno
# Opacity
@jit(nopython=True)
def Kappa(density, temp):
Kes = 0.02 * (1.0 + X)
Kff = 1.0e24 * (Z + 0.0001) * ((density / 1.0e3) ** 0.7) * temp ** (-3.5)
Khminus = 2.5e-32 * (Z / 0.02) * ((density / 1.0e3) ** 0.5) * (temp ** 9.0)
return ((1.0 / Khminus) + (1.0 / max(Kes, Kff))) ** (-1.0)
# Stellar Structure ODEs
@jit(nopython=True)
def dpdr(radius, mass, density, temp, lum):
# First last equation in the set of 5 equations in the project description file
return -((G * mass * density / (radius ** 2.0)) +
dPdT(density, temp) * dTdr(radius, mass, density, temp, lum)) / (dPdp(density, temp))
@jit(nopython=True)
def dTdr(radius, mass, density, temp, lum):
# second equation in the set of 5 equations in the project description file
dTdr_rad = (3.0 * Kappa(density, temp) * density * lum) / (
16.0 * pi * RAD_CONST * C * (temp ** 3.0) * (radius ** 2.0))
dTdr_conv = (1.0 - (1.0 / GAMMA)) * (temp / P(density, temp)) * (
(G * mass * density) / (radius ** 2.0))
return - min(dTdr_rad, dTdr_conv)
@jit(nopython=True)
def dMdr(radius, density):
# middle last equation in the set of 5 equations in the project description file
return 4.0 * pi * (radius ** 2.0) * density
@jit(nopython=True)
def dLdr(radius, density, temp):
# Second last equation in the set of 5 equations in the project description file
return dMdr(radius, density) * epsilon(density, temp)
@jit(nopython=True)
def dtaudr(density, temp):
# Last equation in the set of 5 equations in the project description file
return Kappa(density, temp) * density
@jit(nopython=True)
def dPdr(radius, mass, density):
return -(G * mass * density / (radius ** 2.0))
# delta(tau) for optical depth limit
@jit(nopython=True)
def dtau(radius, mass, density, temp, lum):
return (Kappa(density, temp) * (density ** 2.0)) / (abs(dpdr(radius, mass, density, temp, lum)))
#### NOTE ###
# We need to add the luminosity equation in our section (5.6, eqn 19), and add it as a function here use it
@jit(nopython=True)
def func(dep_var, radius):
"""
This is the function that bridges the math above to the class and stuff below.
Given dep_var, a list of depenadant variables, it runs the below functions that
compile to find T, M, L, tau, and rho.
:param dep_var: list of floats - dependant variables for current sequence
:param radius: float - current radius
:return:
"""
# Just extracts the array in dep_var to individual variable names
density, temp, mass, lum, tau = dep_var
# These functions below call the math functions above and star the chain of math calcualtions.
# If we assume the class stuff below is correct (and the runge kutta method works), then that
# leaves the math above to be wrong, which is called and accessed here. Thus the functions
# dpdr, dTdr, dMdr, dLdr, dtaudr should be inspected (assuming runge kutta works)
rho = dpdr(radius, mass, density, temp, lum)
T = dTdr(radius, mass, density, temp, lum)
M = dMdr(radius, density)
L = dLdr(radius, density, temp)
tau = dtaudr(density, temp)
# returns a list of those values for the next step in the runge kutta sequence
return np.array([rho, T, M, L, tau])
"""
Below is the class used to find the temp, rho, lum, radius, etc. for a star. This is done every time the SingleStar
class is called.
"""
class SingleStar:
def __init__(self, dr, rho_c, temp_c, plotmode):
self.dr = dr
self.Rstar = 0.0
self.r = [r0]
self.d = [rho_c]
self.t = [temp_c]
self.m = [(4.0 / 3.0) * pi * (r0 ** 3.0) * rho_c]
self.l = [self.m[0] * epsilon(rho_c, temp_c)]
self.tau = [Kappa(rho_c, temp_c) * rho_c]
# other variables
self.k = [Kappa(rho_c, temp_c)]
self.p = [P(rho_c, temp_c)]
self.dLdr_list = [0.0]
print(f"the gamma is {GAMMA}")
self.dlogPdlogT = [(temp_c / P(rho_c, temp_c)) *
(dPdr(r0, self.m[-1], rho_c) /
dTdr(r0, self.m[-1], rho_c, temp_c, self.l[-1]))]
self.drray = [dr]
self.dtauarray = []
self.CreateStar()
self.RadStar()
self.Plots(plotmode)
###########______Define all Equations______###########
def rk4(self, y, r, h):
'''
This is the runge kutta method we discussed. It runs the function func
(defined above) on the current dependant variable values and the radius
y: current values for dependent variables
r: radius, the independent variables
h: step-size
f:function array to be integrated
'''
k1 = h * func(y, r)
k2 = h * func(y + k1/2, r + h/2)
k3 = h * func(y + k2/2, r + h/2)
k4 = h * func(y + k3, r + h)
return y + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0, r + h
def OpticalDepthLimit(self, new_rk4, r_new):
"""
This just tests the optical depth to make sure that d_tau is not very small,
or that the mass hasn't blown up (1e3 Mass sun).
While those conditions have not been met, returns False
:param new_rk4: list of floats - rho, temp, mass, luminosity, tau of current radius
:param r_new: float - new radius
:return: bool
"""
# extracts the list into it's values
cur_d, cur_t, cur_m, cur_l, cur_tau = new_rk4
d_tau = dtau(r_new, cur_m, cur_d, cur_t, cur_l)
self.dtauarray.append(d_tau) # unimportant list
if d_tau < 0.001 or self.m[-1] > 1e3 * M_SUN:
return True
else:
return False
def RadStar(self):
self.Rstar = np.argmin(abs(self.tau[-1] - np.array(self.tau) - (2.0 / 3.0)))
return self.Rstar
def do_stuff(self, new_rk4, r_new):
"""
:param new_rk4: list of floats - eho, temp, mass, luminosity, tau of current radius
:param r_new: float - new radius
:return:
"""
# new_rk4 is a small array that has the distance, temp, mass, lum, and tau. So here we just append those to
# the corresponding lists
cur_d, cur_t, cur_m, cur_l, cur_tau = new_rk4
self.d.append(cur_d)
self.t.append(cur_t)
self.m.append(cur_m)
self.l.append(cur_l)
self.tau.append(cur_tau)
self.r.append(r_new)
# These next lines append to the dLdr list, kappa list, pressure list, and dlogPdlogT lists.
self.dLdr_list.append(dLdr(r_new, cur_d, cur_t))
self.k.append(Kappa(cur_d, cur_t))
self.p.append(P(cur_d, cur_t))
self.dlogPdlogT.append((cur_t / self.p[-1]) * (
dPdr(r_new, cur_m, cur_d) / dTdr(r_new, cur_m, cur_d, cur_t, cur_l)))
def CreateStar(self):
# these three lines run the runge kutta method (self.rk4) once before entering a loop to keep running it.
rk4_var = np.array([self.d[-1], self.t[-1], self.m[-1], self.l[-1], self.tau[-1]], float)
new_rk4, r_new = self.rk4(rk4_var, self.r[-1], self.dr)
self.do_stuff(new_rk4, r_new)
# Loop that keeps running the runge kutta method until the Optical limit returns True, then stops.
while not self.OpticalDepthLimit(new_rk4, r_new):
new_rk4, r_new = self.rk4(new_rk4, self.r[-1], self.dr)
self.do_stuff(new_rk4, r_new)
# Finds the new dr (increment in radius) by checking if the temp is < 5e4
if self.t[-1] < 5e4:
self.dr = (0.00001 * r_new) + 1000
else:
self.dr = (0.001 * r_new) + 1000
def Plots(self, plotmode):
# convert to arrays for normalizations
r = np.array(self.r)
rho = np.array(self.d)
temp = np.array(self.t)
mass = np.array(self.m)
lum = np.array(self.l)
tau = np.array(self.tau)
pressure = np.array(self.p)
kappa = np.array(self.k)
dLdr = np.array(self.dLdr_list)
star_radius = self.Rstar
if plotmode:
# # plot the data
# plt.figure(1)
# plt.grid()
x_axis = r / self.r[-1]
#
# plt.plot(x_axis, rho / self.d[0], label='rho')
# plt.plot(x_axis, temp / self.t[0], label='temp')
# plt.plot(x_axis, mass / self.m[-1], label='Mass')
# plt.plot(x_axis, lum / self.l[-1], label='Lum')
# plt.legend(loc='best', bbox_to_anchor=(0.8, 0.66), prop={'size': 11})
# # plt.title("Rho", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("$\\rho/\\rho_{c}$, $T/T_{c}$, $M/M.$, $L/L.$")
# plt.savefig(f'Multi-Lined Plot.png', dpi=1000)
# plt.show()
# plt.clf()
# self.plotdata = (x_axis, rho / self.d[0], temp / self.t[0], mass / self.m[-1], lum / self.l[-1])
# print(self.d[0], "is the rho")
# print(self.t[0], "is the temp")
# print(self.t[-1], "is the surface temp")
# print(self.m[-1], "is the mass")
# print(self.l[-1], "is the lum")
# print(self.r[-1], "is the radius")
# plt.figure(2)
# plt.grid()
# plt.plot(x_axis, temp / self.t[0], label='temp')
# plt.title("Temperature", fontsize=25)
# plt.ylabel("$T/T_c$")
# plt.xlabel("r/R.")
# plt.savefig(f"Temp.png", dpi=1000)
# plt.clf()
#
# plt.figure(3)
# plt.grid()
# plt.plot(x_axis, mass / self.m[-1], label='Mass')
# plt.title("Mass", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("$M/M.$")
# plt.savefig(f"Mass.png", dpi=1000)
# plt.clf()
#
# plt.figure(4)
# plt.grid()
# plt.plot(x_axis, lum / self.l[-1], label='Lum')
# plt.title("Luminosity", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("$L/L.$")
# plt.savefig(f"Luminosity.png", dpi=1000)
# plt.clf()
#
# plt.figure(9)
# plt.grid()
# plt.plot(r / self.r[star_radius], tau / self.tau[-1], label='Tau')
# plt.title("Tau", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("$\\tau/\\tau_{c}$")
# plt.savefig(f'Tau.png', dpi=1000)
# plt.clf()
#
# plt.figure(5)
# plt.grid()
# plt.plot(r / self.r[star_radius], dLdr / (self.l[-1] / self.r[star_radius]), label='dL/dR')
# plt.title("dL/dR", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("$dL/dr$ $(L./R.)$")
# plt.savefig(f'dLdR.png', dpi=1000)
# plt.clf()
#
# plt.figure(6)
# plt.grid()
# plt.legend(loc='best', bbox_to_anchor=(0.8, 0.66), prop={'size': 11})
# plt.plot(r / self.r[star_radius], pressure / self.p[0], label='Pressure')
# plt.title("Pressure", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("$P/P_c$")
# plt.savefig(f'Pressure.png', dpi=1000)
# plt.clf()
#
# plt.figure(7)
# plt.grid()
# plt.plot(r / self.r[star_radius], np.log10(kappa), label='Opacity')
# plt.title("Opacity", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("$\\log_{10}(\\kappa)$ ($cm^{2}/g$)")
# plt.savefig(f'Opacity.png', dpi=1000)
# plt.clf()
#
# plt.figure(8)
# axes = plt.gca()
# axes.set_ylim(0, 10)
# axes.set_xlim(0, 1)
# plt.grid()
# plt.plot(r / self.r[star_radius], self.dlogPdlogT, label='dlogP/dlogT')
# plt.title("dlogP/dlogT", fontsize=25)
# plt.xlabel("r/R.")
# plt.ylabel("dlogP/dlogT")
# plt.savefig(f'dlogP-dlogT.png', dpi=1000)
# plt.clf()
# if plotmode:
# # plot the data
# plt.figure(1)
# plt.grid()
# x_axis = r / self.r[-1]
# plt.plot(x_axis, rho / self.d[0], label='rho')
# plt.plot(x_axis, temp / self.t[0], label='temp')
# plt.plot(x_axis, mass / self.m[-1], label='Mass')
# plt.plot(x_axis, lum / self.l[-1], label='Lum')
# plt.legend(loc='best', bbox_to_anchor=(0.8, 0.66), prop={'size': 11})
# plt.title("Rho", fontsize=25)
# plt.savefig('Fig1.png')
# plt.show()
# #
# plt.figure(2)
# plt.grid()
# plt.plot(x_axis, temp/self.t[0], label='temp')
# plt.title("Temp", fontsize=25)
# plt.show()
#
# plt.figure(3)
# plt.grid()
# plt.plot(x_axis, mass/self.m[-1], label='Mass')
# plt.title("Mass", fontsize=25)
# plt.show()
#
# plt.figure(4)
# plt.grid()
# plt.plot(x_axis, lum/self.l[-1], label='Lum')
# plt.title("Lum", fontsize=25)
# plt.show()
#
#
# plt.figure(9)
# plt.grid()
# plt.plot(r / self.r[star_radius], tau / self.tau[-1], label='Tau')
# plt.title("Tau", fontsize=25)
# plt.savefig('Tau.png', dpi=1000)
# plt.show()
#
# plt.figure(5)
# plt.grid()
# plt.plot(r / self.r[star_radius], dLdr / (self.l[-1] / self.r[star_radius]), label='dL/dR')
# plt.title("dLdR", fontsize=25)
# plt.savefig('dLdR.png', dpi=1000)
# plt.show()
#
# plt.figure(6)
# plt.grid()
# plt.plot(r / self.r[star_radius], pressure / self.p[0], label='Pressure')
# plt.title("Pressure", fontsize=25)
# plt.savefig('Pressure.png', dpi=1000)
# plt.legend(loc='best', bbox_to_anchor=(0.8, 0.66), prop={'size': 11})
# plt.show()
#
# plt.figure(7)
# plt.grid()
# plt.plot(r / self.r[star_radius], np.log10(kappa), label='Opacity')
# plt.title("Opacity", fontsize=25)
# plt.savefig('Opacity.png', dpi=1000)
# plt.show()
#
# plt.figure(8)
# axes = plt.gca()
# # axes.set_xlim(0,11)
# axes.set_ylim(0, 10)
# plt.grid()
# plt.plot(r / self.r[star_radius], self.dlogPdlogT, label='dlogP/dlogT')
# plt.title("dlogP/dlogT", fontsize=25)
# plt.savefig('dlogP-dlogT.png', dpi=1000)
# plt.show()
#
# class FixDensity:
#
# def __init__(self, h, temp_c):
# self.h = h
# self.central_temp = temp_c
#
# self.starA = SingleStar(self.h, 3e3, temp_c, False)
# self.starB = SingleStar(self.h, 5e4, temp_c, False)
# self.starC = SingleStar(self.h, (0.3e3 + 500.0e3) / 2.0, temp_c, False)
#
# self.BestStar = self.bisection(self.starA, self.starB, self.starC, 0.02)
#
#
# def f(self, trialstar):
#
# RstarIndex = trialstar.Rstar
# Lum1 = trialstar.l[RstarIndex]
# Lum2 = (4.0 * pi * STEF_BOLT * (trialstar.r[RstarIndex] ** 2.0) * (trialstar.t[RstarIndex] ** 4.0))
#
# return (Lum1 - Lum2) / np.sqrt(Lum1 * Lum2)
#
# def bisection(self, starA, starB, starC, tol):
#
# while (starB.d[0] - starA.d[0]) > tol:
#
# if self.f(starA) * self.f(starC) < 0:
# starB = starC
#
# else:
# starA = starC
#
# starCrho = (starA.d[0] + starB.d[0]) / 2.0
#
# starC = SingleStar(self.h, starCrho, self.central_temp, False)
#
# starCrho = max(starA.d[0], starB.d[0])
# star_C = SingleStar(self.h, starCrho, self.central_temp, True)
# return star_C
#
#
#
def main():
start_class = time.time()
SingleStar(1000.0, 0.5e3, 1.5e7, 0)
# tempCs = np.linspace(10 ** 6.6, 10 ** 7.4, 16)
# star = FixDensity(1000.0, 2.11e7).BestStar
# print("star mass: ", star.m[-1])
# print("star radius: ", star.r[-1])
print(f"Star took: {time.time() - start_class} seconds")
# dr, rho_c, temp_c,
if __name__ == '__main__':
main()
|
{"hexsha": "398fa1ea70be0d5b50a64db1b5e7dc01ff2857b7", "size": 18716, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Star_Creator.py", "max_stars_repo_name": "iisharankov/Phys375-Stellar-Warming", "max_stars_repo_head_hexsha": "2cf8e81f11701ec4d04752cbdf27805ec187ace7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Star_Creator.py", "max_issues_repo_name": "iisharankov/Phys375-Stellar-Warming", "max_issues_repo_head_hexsha": "2cf8e81f11701ec4d04752cbdf27805ec187ace7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Star_Creator.py", "max_forks_repo_name": "iisharankov/Phys375-Stellar-Warming", "max_forks_repo_head_hexsha": "2cf8e81f11701ec4d04752cbdf27805ec187ace7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9179104478, "max_line_length": 115, "alphanum_fraction": 0.5338747596, "include": true, "reason": "import numpy,from numpy,from numba", "num_tokens": 5832}
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
data = np.genfromtxt(path,delimiter=',',skip_header=1)
print(data)
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
census = np.concatenate((data,new_record),axis=0)
print(census)
#Code starts here
# --------------
#Code starts here
age = census[:,0]
print(age)
max_age = max(age)
print(max_age)
min_age = min(age)
print(min_age)
age_mean = np.mean(age)
print(age_mean)
age_std = np.std(age)
print(age_std)
# --------------
#Code starts here
race_0 = census[census[:,2]==0]
print(race_0)
race_1 = census[census[:,2]==1]
print(race_1)
race_2 = census[census[:,2]==2]
print(race_2)
race_3 = census[census[:,2]==3]
print(race_3)
race_4 = census[census[:,2]==4]
print(race_4)
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
print('Race_0: ', len_0)
print('Race_1: ', len_1)
print('Race_2: ', len_2)
print('Race_3: ', len_3)
print('Race_4: ', len_4)
race_list=[len_0, len_1,len_2, len_3, len_4]
minority_race=race_list.index(min(race_list))
# --------------
#Code starts here
senior_citizens = census[census[:,0]>60]
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum / senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1]>10]
print(high)
low = census[census[:,1]<=10]
print(low)
avg_pay_high=high[:,7].mean()
print(avg_pay_high)
avg_pay_low=low[:,7].mean()
print(avg_pay_low)
|
{"hexsha": "17aea76707478ac2e9cdc5a5aa26c7d15fe1b658", "size": 1682, "ext": "py", "lang": "Python", "max_stars_repo_path": "Numpy/code.py", "max_stars_repo_name": "niketkaria/ga-learner-dsmp-repo", "max_stars_repo_head_hexsha": "e46e9f902907178712c6756dbb1435daf43d65d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Numpy/code.py", "max_issues_repo_name": "niketkaria/ga-learner-dsmp-repo", "max_issues_repo_head_hexsha": "e46e9f902907178712c6756dbb1435daf43d65d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Numpy/code.py", "max_forks_repo_name": "niketkaria/ga-learner-dsmp-repo", "max_forks_repo_head_hexsha": "e46e9f902907178712c6756dbb1435daf43d65d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.6888888889, "max_line_length": 61, "alphanum_fraction": 0.6492271106, "include": true, "reason": "import numpy", "num_tokens": 543}
|
#import pygame
from PIL import Image
import numpy as np
from raycast import Raycaster
from skimage.draw import line_aa
#from pygame.constants import K_w, K_a, K_d, K_s, K_SPACE
import sys
import pdb
OBJECT_MAP = {
'open': 0,
'wall': 1,
'food': 2,
'poison': 3,
'agent': 4,
'floor': 5,
'sky': 6
}
COLOR_MAP = {
OBJECT_MAP['wall']: (93., 192., 221.), # pretty blue color
OBJECT_MAP['food']: (255., 0., 0.),
OBJECT_MAP['poison']: (0., 255., 0.),
OBJECT_MAP['floor']: (255., 255., 255.),
OBJECT_MAP['sky']: (0., 0., 0.),
OBJECT_MAP['agent']: (0., 0., 255.),
OBJECT_MAP['open']: None
}
REWARD_MAP = {
OBJECT_MAP['wall']: -0.1,
OBJECT_MAP['food']: 1.0,
OBJECT_MAP['poison']: -1.0
}
ITEM_AGE_LIMIT = 1000
np.set_printoptions(4, suppress=True)
COLOR_KEY = (0, 0, 0)
TEX_WIDTH = 32
TEX_HEIGHT = 32
CONVERT = (np.array([0.2989, 0.5870, 0.1140])).astype(np.float32)
class Agent(object):
def __init__(self,
pos,
dir,
plane,
max_move=.25,
max_rot=(np.pi / 16),
max_acc=0.3):
self.pos = np.copy(pos)
self.dir = np.copy(dir)
self.plane = np.copy(plane)
self.type = OBJECT_MAP['agent']
self.max_move = max_move
self.max_rot = max_rot
self.max_acc = max_acc
self.vel = np.array([0., 0.])
self.action_set = {
0: (max_move, 0.),
1: (0., -max_rot),
2: (0., max_rot),
3: (-max_move, 0.),
4: (0., 0.)
}
self.key_map = {
'forward': 0,
'left': 1,
'right': 2,
'backward': 3,
'NOOP': 4
}
#self.event_map = {K_w: 0, K_a: 1, K_d: 2, K_s: 3, K_SPACE: 4}
class World(object):
def __init__(self,
map_size,
width=48,
height=48,
resolution=1,
reslidar=180,
rand_walls=False,
num_items=10,
move_items=False,
color=False,
human_view=True):
self.map_size = map_size
self.width = width
self.height = height
self.resolution = resolution
self.reslidar = reslidar
self.run_lidar = False
if reslidar > 0:
self.run_lidar = True
self.random_walls = rand_walls
self.num_items = num_items
self.move_items = move_items
self.color = color
self.human_view = human_view
self.init_pos = np.array([1., 1.])
self.init_dir = np.array([1., 0.])
self.init_angle = 0.
self.init_plane = np.array([0.0, 0.66])
self.agent = Agent(self.init_pos, self.init_dir, self.init_plane)
self.imnum = 0
rw = self.width
if self.human_view:
rw = self.width / 2
self.raycaster = Raycaster(rw, self.height, self.resolution, reslidar,
COLOR_MAP, OBJECT_MAP)
self.lidar = np.zeros(reslidar * 2)
self.do_step = True
self.sprites = None
self.minisprites = None
cscale = .75
bgcolor = np.array(
[93. * cscale, 192. * cscale, 221. * cscale],
dtype=np.float32) / .255
self.load_images()
if (self.color):
self.gscreen = np.zeros(
(self.width, self.height, 3), dtype=np.float32)
self.background = np.zeros_like(self.gscreen)
#self.background[:, :, 0] = bgcolor[0] # just solid color
#self.background[:, :, 1] = bgcolor[1]
#self.background[:, :, 2] = bgcolor[2]
if self.human_view:
self.background[:int(self.width / 2), :, :] = np.copy(self.bg1[
0])
self.background[int(self.width / 2):, :, :] = np.copy(self.bg2[
0])
else:
self.background[:] = np.copy(self.bg1[0])
else:
self.gscreen = np.zeros(
(self.width, self.height), dtype=np.float32)
self.background = np.zeros_like(self.gscreen)
#self.background[:] = np.dot(bgcolor, CONVERT) #solid color
if self.human_view:
self.background[:int(self.width / 2), :] = np.copy(self.bg1[2])
self.background[int(self.width / 2):, :] = np.copy(self.bg2[2])
else:
self.background[:] = np.copy(self.bg1[2])
self.wbackground = np.copy(self.background)
def load_images(self):
files = ["graphics/sprites/dolphin.png", "graphics/sprites/sub.png"]
if self.sprites is None:
self.sprites = {
OBJECT_MAP['poison']: loadimage(
files[0],
TEX_WIDTH,
TEX_HEIGHT,
color=self.color,
color_key=(0, 0, 0)),
OBJECT_MAP['food']: loadimage(
files[1],
TEX_WIDTH,
TEX_HEIGHT,
color=self.color,
color_key=(95, 183, 255))
}
mdim = self.width / 2 / self.map_size[0]
self.minisprites = {
OBJECT_MAP['poison']: loadimage(
files[0],
mdim,
mdim,
color=self.color,
color_key=(0, 0, 0)),
OBJECT_MAP['food']: loadimage(
files[1],
mdim,
mdim,
color=self.color,
color_key=(95, 183, 255)),
OBJECT_MAP['agent']: agent_sprite((0, 0, 0), self.color,
mdim / 2)
}
self.bg1 = loadbackground(self, 'graphics/backgrounds/ocean-floor.png')
self.bg2 = loadbackground(self, 'graphics/backgrounds/ocean-floor-2.jpg')
def make_bgwall(self):
self.wbackground = np.copy(self.background)
xpos = self.width / 2
block_dim = xpos / self.map_size[0]
block = np.zeros((block_dim, block_dim, 3), np.float32)
block[:, :, 0] = 93. / 255. * .75,
block[:, :, 1] = 192. / 255. * .75
block[:, :, 2] = 211. / 255. * .75
if not self.color:
block = block.dot(CONVERT)
for row in range(self.world_map.shape[0]):
xs = xpos + (row * block_dim)
xe = xs + block_dim
for col in range(self.world_map.shape[1]):
ys = col * block_dim
ye = ys + block_dim
if self.world_map[row, col] == OBJECT_MAP['wall']:
if self.color:
self.wbackground[xs:xe, ys:ye, :] = np.copy(block)
else:
self.wbackground[xs:xe, ys:ye] = np.copy(block)
def gen_basic_walls(self):
self.world_map[5, 5:11] = OBJECT_MAP['wall']
self.world_map[20, 5:11] = OBJECT_MAP['wall']
self.world_map[5:21, 11] = OBJECT_MAP['wall']
self.world_map[5, 14:20] = OBJECT_MAP['wall']
self.world_map[20, 14:20] = OBJECT_MAP['wall']
self.world_map[5:21, 20] = OBJECT_MAP['wall']
def gen_random_walls(self):
# generates 4-9 random L-shaped walls
numPillars = np.random.randint(4, 10)
# pillarPos = np.random.randint(25, size=(2,numPillars))
map_x = self.map_size[0]
map_y = self.map_size[1]
pillarPosx = np.random.randint(map_x, size=numPillars)
pillarPosy = np.random.randint(map_y, size=numPillars)
pillarPos = [pillarPosx, pillarPosy]
for i in range(numPillars):
x = pillarPos[0][i]
y = pillarPos[1][i]
xlen = np.random.randint(10)
xdir = np.random.randint(2)
if xdir == 0:
self.world_map[x:x + xlen, y] = OBJECT_MAP['wall']
else:
self.world_map[x - xlen:x, y] = OBJECT_MAP['wall']
ylen = np.random.randint(10)
ydir = np.random.randint(2)
if ydir == 0:
self.world_map[x, y:y + ylen] = OBJECT_MAP['wall']
else:
self.world_map[x, y - ylen:y] = OBJECT_MAP['wall']
def take_action(self, action):
act = self.agent.action_set[action]
self.step(act)
return self.reward
def act(self, action):
self.items[:, 3] += 1
agent = self.agent
oldpos = agent.pos
newpos, newdir, newplane, newvel = self.move(agent, action)
reward = 0.
if self.validmove(oldpos, newpos):
map_pos = newpos.astype(int)
atXY = self.world_map[map_pos[0], map_pos[1]]
grab = self.items[:, 4] < 0.25
self.items[grab, 3] = ITEM_AGE_LIMIT
for r in self.items[grab, 2]:
reward += REWARD_MAP[r]
self.agent.pos = np.copy(newpos)
self.agent.dir = np.copy(newdir)
self.agent.plane = np.copy(newplane)
self.agent.vel = np.copy(newvel)
oldmap_pos = oldpos.astype(int)
self.world_map[oldmap_pos[0], oldmap_pos[1]] = OBJECT_MAP['open']
else:
reward = -1.
self.reward = reward
def cleanup_items(self, agent_pos):
old = self.items[:, 3] >= ITEM_AGE_LIMIT
old = np.logical_and(old, np.random.rand(old.shape[0]) < 0.1)
loc = self.items[old, :2].astype(int)
self.world_map[(loc[:, 0], loc[:, 1])] = OBJECT_MAP['open']
num = np.sum(old)
if num > 0:
open_spots = np.where(self.world_map == OBJECT_MAP['open'])
sel = np.random.choice(
range(open_spots[0].shape[0]), size=num, replace=False)
self.items[old, 3] = 0
x, y = open_spots[0][sel], open_spots[1][sel]
self.items[old, 0] = x + 0.5
self.items[old, 1] = y + 0.5
self.world_map[x, y] = self.items[old, 2].astype(int)
def getGameState(self):
return self.lidar
def getScore(self):
return self.score
def game_over(self):
return False
def init(self):
# define map and put up border wall
world_map = np.zeros(self.map_size, dtype=np.int32)
world_map[:, 0] = OBJECT_MAP['wall']
world_map[:, -1] = OBJECT_MAP['wall']
world_map[0, :] = OBJECT_MAP['wall']
world_map[-1, :] = OBJECT_MAP['wall']
self.world_map = world_map
if self.random_walls:
self.gen_random_walls()
else:
self.gen_basic_walls()
if self.human_view:
self.make_bgwall()
open_spots = np.where(self.world_map == OBJECT_MAP['open'])
sel = np.random.choice(
range(open_spots[0].shape[0]), size=self.num_items, replace=False)
poison = np.random.rand(self.num_items) < 0.5
itype = np.ones(self.num_items, dtype=np.int32) * OBJECT_MAP['food']
itype[poison] = OBJECT_MAP['poison']
# directions each item will move
# 0: +x
# 1: -x
# 2: +y
# 3: -y
idirs = np.random.choice(4, self.num_items)
#items has shape num_itemsx6 with cols (x, y, type, age, distance, direction)
items = np.zeros((self.num_items, 6))
items[:, 0] = open_spots[0][sel] + 0.5
items[:, 1] = open_spots[1][sel] + 0.5
items[:, 2] = itype
items[:, 5] = idirs
self.world_map[items[:, 0].astype(int), items[:, 1].astype(
int)] = items[:, 2].astype(int)
self.items = items
self.score = 0.
self.reward = 0.
self.raycaster.map_ = self.world_map
def reset(self):
self.init()
open_spots = np.where(self.world_map == OBJECT_MAP['open'])
i = np.random.randint(0, open_spots[0].shape[0])
init_pos = np.array([open_spots[0][i], open_spots[1][i]])
self.agent.pos = init_pos
self.agent.dir = self.init_dir
self.agent.plane = self.init_plane
def step(self, action):
self.gscreen[:] = np.copy(self.wbackground)
self.act(action)
self.cleanup_items(self.agent.pos)
self.distitems()
self.score += self.reward
# get variables for drawing walls (first person view)
c, t, b, col, dist = self.raycaster.draw(self.agent, self.world_map)
if self.run_lidar:
self.lidar = self.raycaster.lidar(self.agent, self.world_map)
self.lidar = 1. - (np.clip(self.lidar, 0., 30.) / 30.)
if not self.color:
gray = np.dot(col[..., :], CONVERT) / 255.
else:
col = col.astype(np.float32) / float(255.)
for i in range(len(c)):
vert = np.arange(t[i], b[i], 1).astype(int)
if self.color:
self.gscreen[c[i], vert, :] = col[c[i]]
else:
self.gscreen[c[i], vert] = gray[c[i]]
if self.human_view:
self.draw_topdown()
# try moving items
if self.move_items:
self.moveitems()
not_old = self.items[self.items[:, 3] < ITEM_AGE_LIMIT, :]
drawX, drawY, trans, sdim, ssx = \
self.raycaster.draw_sprites(self.agent, not_old[:, :2])
xwidth = self.width
if self.human_view:
xwidth = self.width / 2
for i in range(not_old.shape[0]):
if (trans[i, 1] > 0.01):
stype = int(not_old[i, 2])
x = np.arange(drawX[i, 0], drawX[i, 1], 1).astype(int)
x = x[x > 0]
x = x[x < xwidth]
x = x[(trans[i, 1] < dist[x]).reshape(-1)]
if x.shape[0] > 0:
txX = 256 * (x - (-sdim[i, 0] / 2 + ssx[i]))
texX = (txX * TEX_WIDTH / sdim[i, 0]) / 256
y = np.arange(drawY[i, 0], drawY[i, 1], 1).astype(int)
d = y * 256 - self.height * 128 + sdim[i, 1] * 128
texY = ((d * TEX_HEIGHT) / sdim[i, 1]) / 256
gsprite = self.sprites[stype][1]
drw = self.sprites[stype][0]
if self.color:
for j in range(x.shape[0]):
sel = drw[texX[j], texY, 0]
dy = y[sel]
ty = texY[sel]
self.gscreen[x[j], dy, 0] = gsprite[texX[j], ty, 0]
self.gscreen[x[j], dy, 1] = gsprite[texX[j], ty, 1]
self.gscreen[x[j], dy, 2] = gsprite[texX[j], ty, 2]
else:
for j in range(x.shape[0]):
sel = drw[texX[j], texY]
dy = y[sel]
ty = texY[sel]
self.gscreen[x[j], dy] = gsprite[texX[j], ty]
def validmove(self, pos, newpos):
if (newpos[0] > 0 and \
newpos[1] > 0) and \
(newpos[0] < (self.world_map.shape[0]-1) and \
newpos[1] < (self.world_map.shape[1]-1)):
at = self.world_map[int(newpos[0]), int(newpos[1])]
if at == OBJECT_MAP['wall']:
#print("invallid move at wall")
return False
else:
#print("valid move")
return True
else:
#print("invalid move, out of zone")
return False
def move(self, agent, action):
pos = np.copy(agent.pos)
dir = np.copy(agent.dir)
plane = np.copy(agent.plane)
try:
xturn = np.cos(action[1])
yturn = np.sin(action[1])
except TypeError:
pdb.set_trace()
dirx = dir[0] * xturn - dir[1] * yturn
diry = dir[0] * yturn + dir[1] * xturn
planex = plane[0] * xturn - plane[1] * yturn
planey = plane[0] * yturn + plane[1] * xturn
dir[0] = dirx
dir[1] = diry
plane[0] = planex
plane[1] = planey
vel = dir * action[0]
nvel = np.copy(vel)
step = 0.95
count = 0
while not self.validmove(agent.pos, pos + nvel):
if count > 500:
self.reset()
#print("got stuck in loop reseting")
return self.agent.pos, self.agent.dir, self.agent.plane, np.array(
[0., 0])
nvel = vel * step
step -= 0.05
count += 1
npos = pos + nvel
mapp = npos.astype(int)
if self.world_map[mapp[0] + 1, mapp[1]] == OBJECT_MAP['wall']:
nvel[0] = 0.
elif self.world_map[mapp[0], mapp[1] + 1] == OBJECT_MAP['wall']:
nvel[1] = 0.
elif self.world_map[mapp[0] + 1, mapp[1] + 1] == OBJECT_MAP['wall']:
nvel = np.array([0., 0.])
else:
pass
return npos, dir, plane, nvel
def distitems(self):
pos = self.agent.pos
ipx = self.items[:, 0]
ipy = self.items[:, 1]
old = self.items[:, 3] >= ITEM_AGE_LIMIT
self.items[:, 4] = (ipx - pos[0])**2 + (ipy - pos[1])**2
self.items[old, 4] = 1e5
self.items = self.items[self.items[:, 4].argsort()[::-1], :]
def moveitems(self):
if self.imnum % 3 != 0:
self.imnum += 1
return
self.imnum += 1
direction_map_x = {0: 1, 1: -1, 2: 0, 3: 0}
direction_map_y = {0: 0, 1: 0, 2: 1, 3: -1}
for item_it in self.items:
itdir_tmp = item_it[5]
ittyp_tmp = item_it[2]
oldpos = [item_it[0], item_it[1]]
newposX = item_it[0] + direction_map_x[itdir_tmp]
newposY = item_it[1] + direction_map_y[itdir_tmp]
newpos = [newposX, newposY]
if self.valid_itemmove(oldpos, newpos) == False:
item_it[5] = np.random.randint(4)
else:
self.world_map[oldpos[0], oldpos[1]] = OBJECT_MAP['open']
self.world_map[newpos[0], newpos[1]] = ittyp_tmp
item_it[0] = newpos[0]
item_it[1] = newpos[1]
def draw_topdown(self):
# start drawing the top-down view
xpos = self.width / 2
ypos = 0
block_dim = xpos / self.map_size[0]
for row in self.world_map:
for col in row:
if col == OBJECT_MAP['food'] or col == OBJECT_MAP['poison']:
self.draw_mini(xpos, ypos, col)
ypos = ypos + block_dim
xpos = xpos + block_dim
ypos = 0
xpos = self.agent.pos[0] * block_dim + self.width / 2.
ypos = self.agent.pos[1] * block_dim
if self.run_lidar:
self.draw_lidar()
self.draw_mini(xpos, ypos, OBJECT_MAP['agent'])
def draw_lidar(self):
xpos = self.width / 2
block_dim = xpos / self.map_size[0]
dir = self.agent.dir.reshape(1, 2) /10. #* 0.1
angle = np.arctan2(dir[0, 1], dir[0, 0])
rangles = np.linspace(angle - np.pi, angle + np.pi, num=self.reslidar)
rdirs = np.array([np.cos(rangles), np.sin(rangles)], dtype=np.float32).T
lidar = ((1. - self.lidar) * 30.).astype(int) * block_dim
xpos = self.agent.pos[0] * block_dim
ypos = self.agent.pos[1] * block_dim
wd, ht = self.minisprites[OBJECT_MAP['agent']][1].shape[:2]
xpos += wd / 2 - 1
ypos += ht / 2 - 1
pt = ((rdirs * lidar[:, np.newaxis]) + [xpos, ypos]).astype(np.int)
for i in range(pt.shape[0]):
rr, cc, val = line_aa(int(xpos), int(ypos), pt[i, 0], pt[i, 1])
rr = np.clip(rr, 0, 511)
cc = np.clip(cc, 0, 511)
row = rr + int(self.width / 2)
self.gscreen[row, cc, 0] = val
self.gscreen[row, cc, 1] = val
self.gscreen[row, cc, 2] = val
def draw_mini(self, x, y, obj):
sprite = self.minisprites[obj]
drw = sprite[0]
img = sprite[1]
(wd, ht) = drw.shape[:2]
if self.color:
self.gscreen[x:x + wd, y:y + ht, :][drw] = img[drw]
else:
self.gscreen[x:x + wd, y:y + ht][drw] = img[drw]
def valid_itemmove(self, oldpos, newpos):
if (newpos[0] > 0 and \
newpos[1] > 0) and \
(newpos[0] < (self.world_map.shape[0]-1) and \
newpos[1] < (self.world_map.shape[1]-1)):
at = self.world_map[int(newpos[0]), int(newpos[1])]
if at == OBJECT_MAP['wall']:
#print("invallid move at wall")
return False
elif at == OBJECT_MAP['food'] or \
at == OBJECT_MAP['poison']:
return False
# invalid to move into another actor
else:
#print("valid move")
return True
else:
#print("invalid move, out of zone")
return False
def getDrawableC(surf, ck):
sarr = np.array(surf)
drw = np.ones((sarr.shape[0], sarr.shape[1], 3), dtype=bool)
if ck is None:
return drw
for i in range(sarr.shape[0]):
for j in range(sarr.shape[1]):
if (sarr[i, j, 0] == ck[0] and sarr[i, j, 1] == ck[1] and
sarr[i, j, 2] == ck[2]):
drw[i, j, :] = False
return drw
def getDrawable(surf, ck):
sarr = np.array(surf)
drw = np.ones((sarr.shape[0], sarr.shape[1]), dtype=bool)
if ck is None:
return drw
for i in range(sarr.shape[0]):
for j in range(sarr.shape[1]):
if (sarr[i, j, 0] == ck[0] and sarr[i, j, 1] == ck[1] and
sarr[i, j, 2] == ck[2]):
drw[i, j] = False
return drw
def getGraySurf(surf):
sarr = np.array(surf, np.float32)
gray = np.dot(sarr[..., :3], CONVERT).astype(np.float32) / 255.
return gray
def agent_sprite(col, color, radius):
drw = np.zeros((radius * 2, radius * 2), dtype=np.bool)
for i in range(radius * 2):
for j in range(radius * 2):
d = np.sqrt((i - radius)**2 + (j - radius)**2)
if d <= radius:
drw[i, j] = True
img = np.zeros((radius * 2, radius * 2), dtype=np.float32)
img[drw] = np.dot(col, CONVERT)
if color:
img = np.zeros((radius * 2, radius * 2, 3), dtype=np.float32)
dr2 = np.zeros((radius * 2, radius * 2, 3), dtype=np.bool)
for i in range(3):
dr2[:, :, i] = np.copy(drw)
img[:, :, i][drw] = col[i]
drw = np.copy(dr2)
return [drw, img]
def loadimage(file, width, height, color=True, color_key=None):
image = Image.open(file)
scaled = image.resize((width, height))
if color:
drw = getDrawableC(scaled, color_key)
drw = np.transpose(drw, axes=[1, 0, 2])
img = np.array(scaled, np.float32) / 255.
img = np.transpose(img, axes=[1, 0, 2])[:, :, :3]
else:
drw = getDrawable(scaled, color_key).T
img = getGraySurf(scaled).T
return [drw, img]
def loadbackground(world, file, color_key=None):
bg = Image.open(file)
if world.human_view:
bdim = int(world.width / 2)
bg = bg.resize((bdim, world.height))
else:
bg = bg.resize((world.width, world.height))
drw = getDrawable(bg, color_key).T
gray = getGraySurf(bg).T
cbg = np.array(bg, dtype=np.float32) / 255.
cbg = np.transpose(cbg, axes=[1, 0, 2])[:, :, :3]
return [cbg, drw, gray]
|
{"hexsha": "9526e756d0e2947eacfba88f9fca61081d035034", "size": 23766, "ext": "py", "lang": "Python", "max_stars_repo_path": "world.py", "max_stars_repo_name": "ScottJordan/waterworld", "max_stars_repo_head_hexsha": "5ad2e670939880944fc0a209c5eef153705afce6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-02T09:18:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-02T09:18:40.000Z", "max_issues_repo_path": "world.py", "max_issues_repo_name": "ScottJordan/waterworld", "max_issues_repo_head_hexsha": "5ad2e670939880944fc0a209c5eef153705afce6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "world.py", "max_forks_repo_name": "ScottJordan/waterworld", "max_forks_repo_head_hexsha": "5ad2e670939880944fc0a209c5eef153705afce6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3135215453, "max_line_length": 85, "alphanum_fraction": 0.4962551544, "include": true, "reason": "import numpy", "num_tokens": 6636}
|
import numpy as np
import scipy.stats as st
def theta_init(K):
return(np.ones(K)/K)
def beta_ber_init(x,K):
n,_ = x.shape
return(np.ones((K,n))*0.5)
def beta_mean_sampleinit(x,K):
n,_ = np.shape(x)
i_sample = np.random.choice(list(range(n)),size=K, replace=False)
x_sample = x[i_sample,:]
return(x_sample)
def beta_sigma2_init(x,K):
x_var = np.var(x)
return(np.ones(K)*x_var/(K**2)*(st.uniform.rvs(0.5,1,size=K)))
def z_init(n,K):
pre_z = st.multinomial.rvs(n=1,p=np.ones(K)/K,size=n)
z_sample = np.array([np.where(pre_z[i,:]) for i in range(n)]).flatten()
return(z_sample)
|
{"hexsha": "b4fbdc59a62478617b0019ab4015009495527073", "size": 622, "ext": "py", "lang": "Python", "max_stars_repo_path": "HW1/gibbs_sampling/initialization.py", "max_stars_repo_name": "DonghanHe/6701-Foundations-of-Graphical-Models", "max_stars_repo_head_hexsha": "3e9dfd7f85e4bbe4848dc1f4159e193e8193fe4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-08T03:50:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-08T03:50:19.000Z", "max_issues_repo_path": "HW1/gibbs_sampling/initialization.py", "max_issues_repo_name": "DonghanHe/6701-Foundations-of-Graphical-Models", "max_issues_repo_head_hexsha": "3e9dfd7f85e4bbe4848dc1f4159e193e8193fe4a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HW1/gibbs_sampling/initialization.py", "max_forks_repo_name": "DonghanHe/6701-Foundations-of-Graphical-Models", "max_forks_repo_head_hexsha": "3e9dfd7f85e4bbe4848dc1f4159e193e8193fe4a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1, "max_line_length": 75, "alphanum_fraction": 0.6446945338, "include": true, "reason": "import numpy,import scipy", "num_tokens": 197}
|
header {* Optimizations for Code Float *}
theory Optimize_Float
imports
"../ODE_Auxiliarities"
Optimize_Integer
begin
lemma compute_bitlen[code]: "bitlen a = (if a > 0 then log2 a + 1 else 0)"
by (simp add: bitlen_def log2_def)
lemma compute_real_of_float[code]:
"real_of_float (Float m e) = (if e \<ge> 0 then m * 2 ^ nat e else m / power_int 2 (-e))"
unfolding power_int_def[symmetric, of 2 e]
by (simp add: Float.compute_real_of_float power_int_def)
lemma compute_float_down[code]:
"float_down p (Float m e) =
(if p + e < 0 then Float (m div power_int 2 (-(p + e))) (-p) else Float m e)"
by (simp add: Float.compute_float_down power_int_def)
lemma compute_float_up[code]:
"float_up p (Float m e) =
(let P = power_int 2 (-(p + e)); r = m mod P in
if p + e < 0 then Float (m div P + (if r = 0 then 0 else 1)) (-p) else Float m e)"
by (simp add: Float.compute_float_up power_int_def)
lemma compute_lapprox_posrat[code]:
fixes prec::nat and x y::nat
shows "lapprox_posrat prec x y =
(let
l = rat_precision prec x y;
d = if 0 \<le> l then int x * power_int 2 l div y else int x div power_int 2 (- l) div y
in normfloat (Float d (- l)))"
by (auto simp add: Float.compute_lapprox_posrat power_int_def Let_def zdiv_int int_power int_mult)
lemma compute_rapprox_posrat[code]:
fixes prec x y
defines "l \<equiv> rat_precision prec x y"
shows "rapprox_posrat prec x y = (let
l = l ;
X = if 0 \<le> l then (int x * power_int 2 l, int y) else (int x, int y * power_int 2 (-l)) ;
d = fst X div snd X ;
m = fst X mod snd X
in normfloat (Float (d + (if m = 0 \<or> y = 0 then 0 else 1)) (- l)))"
by (auto simp add: l_def Float.compute_rapprox_posrat power_int_def Let_def zdiv_int int_power int_mult)
lemma compute_float_truncate_down[code]:
"float_round_down prec (Float m e) = (let d = bitlen (abs m) - int prec in
if 0 < d then let P = power_int 2 d ; n = m div P in Float n (e + d)
else Float m e)"
by (simp add: Float.compute_float_round_down power_int_def cong: if_cong)
lemma compute_float_truncate_up[code]:
"float_round_up prec (Float m e) = (let d = (bitlen (abs m) - int prec) in
if 0 < d then let P = power_int 2 d ; n = m div P ; r = m mod P
in Float (n + (if r = 0 then 0 else 1)) (e + d)
else Float m e)"
by (simp add: Float.compute_float_round_up power_int_def cong: if_cong)
lemma compute_int_floor_fl[code]:
"int_floor_fl (Float m e) = (if 0 \<le> e then m * power_int 2 e else m div (power_int 2 (-e)))"
by (simp add: Float.compute_int_floor_fl power_int_def)
lemma compute_floor_fl[code]:
"floor_fl (Float m e) = (if 0 \<le> e then Float m e else Float (m div (power_int 2 ((-e)))) 0)"
by (simp add: Float.compute_floor_fl power_int_def)
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/Ordinary_Differential_Equations/Numerics/Optimize_Float.thy"}
|
//---------------------------------------------------------------------------//
//!
//! \file tstUniformDistribution.cpp
//! \author Alex Robinson
//! \brief Uniform distribution unit tests.
//!
//---------------------------------------------------------------------------//
// Std Lib Includes
#include <iostream>
// Boost Includes
#include <boost/units/systems/si.hpp>
#include <boost/units/systems/cgs.hpp>
#include <boost/units/io.hpp>
// Trilinos Includes
#include <Teuchos_UnitTestHarness.hpp>
#include <Teuchos_RCP.hpp>
#include <Teuchos_Array.hpp>
#include <Teuchos_ParameterList.hpp>
#include <Teuchos_XMLParameterListCoreHelpers.hpp>
#include <Teuchos_VerboseObject.hpp>
// FRENSIE Includes
#include "Utility_UnitTestHarnessExtensions.hpp"
#include "Utility_OneDDistribution.hpp"
#include "Utility_UniformDistribution.hpp"
#include "Utility_RandomNumberGenerator.hpp"
#include "Utility_PhysicalConstants.hpp"
#include "Utility_UnitTraits.hpp"
#include "Utility_QuantityTraits.hpp"
#include "Utility_ElectronVoltUnit.hpp"
using boost::units::quantity;
using namespace Utility::Units;
namespace si = boost::units::si;
namespace cgs = boost::units::cgs;
//---------------------------------------------------------------------------//
// Testing Variables
//---------------------------------------------------------------------------//
Teuchos::RCP<Teuchos::ParameterList> test_dists_list;
Teuchos::RCP<Utility::TabularOneDDistribution> tab_distribution(
new Utility::UniformDistribution( -1.0, 1.0, 2.0 ) );
Teuchos::RCP<Utility::OneDDistribution> distribution = tab_distribution;
Teuchos::RCP<Utility::UnitAwareTabularOneDDistribution<si::energy,si::amount> >
unit_aware_tab_distribution;
Teuchos::RCP<Utility::UnitAwareOneDDistribution<si::energy,si::amount> >
unit_aware_distribution;
//---------------------------------------------------------------------------//
// Tests.
//---------------------------------------------------------------------------//
// Check that the distribution can be evaluated
TEUCHOS_UNIT_TEST( UniformDistribution, evaluate )
{
TEST_EQUALITY_CONST( distribution->evaluate( -2.0 ), 0.0 );
TEST_EQUALITY_CONST( distribution->evaluate( -1.0 ), 2.0 );
TEST_EQUALITY_CONST( distribution->evaluate( 0.0 ), 2.0 );
TEST_EQUALITY_CONST( distribution->evaluate( 1.0 ), 2.0 );
TEST_EQUALITY_CONST( distribution->evaluate( -2.0 ), 0.0 );
}
//---------------------------------------------------------------------------//
// Check that the unit aware distribution can be evaluated
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, evaluate )
{
TEST_EQUALITY_CONST( unit_aware_distribution->evaluate( -1.0*si::joule ),
0.0*si::mole );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluate( 0.0*si::joule ),
1.0*si::mole );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluate( 0.5*si::joule ),
1.0*si::mole );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluate( 1.0*si::joule ),
1.0*si::mole );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluate( 2.0*si::joule ),
0.0*si::mole );
}
//---------------------------------------------------------------------------//
// Check that the PDF can be evaluated
TEUCHOS_UNIT_TEST( UniformDistribution, evaluatePDF )
{
TEST_EQUALITY_CONST( distribution->evaluatePDF( -2.0 ), 0.0 );
TEST_EQUALITY_CONST( distribution->evaluatePDF( -1.0 ), 0.5 );
TEST_EQUALITY_CONST( distribution->evaluatePDF( 0.0 ), 0.5 );
TEST_EQUALITY_CONST( distribution->evaluatePDF( 1.0 ), 0.5 );
TEST_EQUALITY_CONST( distribution->evaluatePDF( 2.0 ), 0.0 );
}
//---------------------------------------------------------------------------//
// Check that the unit aware PDF can be evaluated
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, evaluatePDF )
{
TEST_EQUALITY_CONST( unit_aware_distribution->evaluatePDF( -1.0*si::joule ),
0.0/si::joule );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluatePDF( 0.0*si::joule ),
1.0/si::joule );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluatePDF( 0.5*si::joule ),
1.0/si::joule );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluatePDF( 1.0*si::joule ),
1.0/si::joule );
TEST_EQUALITY_CONST( unit_aware_distribution->evaluatePDF( 2.0*si::joule ),
0.0/si::joule );
}
//---------------------------------------------------------------------------//
// Check that the CDF can be evaluated
TEUCHOS_UNIT_TEST( UniformDistribution, evaluateCDF )
{
TEST_EQUALITY_CONST( tab_distribution->evaluateCDF( -2.0 ), 0.0 );
TEST_EQUALITY_CONST( tab_distribution->evaluateCDF( -1.0 ), 0.0 );
TEST_EQUALITY_CONST( tab_distribution->evaluateCDF( 0.0 ), 0.5 );
TEST_EQUALITY_CONST( tab_distribution->evaluateCDF( 1.0 ), 1.0 );
TEST_EQUALITY_CONST( tab_distribution->evaluateCDF( 2.0 ), 1.0 );
}
//---------------------------------------------------------------------------//
// Check that the unit aware CDF can be evaluated
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, evaluateCDF )
{
TEST_EQUALITY_CONST(unit_aware_tab_distribution->evaluateCDF(-1.0*si::joule),
0.0 );
TEST_EQUALITY_CONST(unit_aware_tab_distribution->evaluateCDF(0.0*si::joule),
0.0 );
TEST_EQUALITY_CONST(unit_aware_tab_distribution->evaluateCDF(0.5*si::joule),
0.5 );
TEST_EQUALITY_CONST(unit_aware_tab_distribution->evaluateCDF(1.0*si::joule),
1.0 );
TEST_EQUALITY_CONST(unit_aware_tab_distribution->evaluateCDF(2.0*si::joule),
1.0 );
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
TEUCHOS_UNIT_TEST( UniformDistribution, sample )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
double sample = distribution->sample();
TEST_EQUALITY_CONST( sample, -1.0 );
sample = distribution->sample();
TEST_EQUALITY_CONST( sample, 0.0 );
sample = distribution->sample();
TEST_FLOATING_EQUALITY( sample, 1.0, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit aware distribution can be sampled
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, sample )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
quantity<si::energy> sample = unit_aware_distribution->sample();
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
sample = unit_aware_distribution->sample();
TEST_EQUALITY_CONST( sample, 0.5*si::joule );
sample = unit_aware_distribution->sample();
UTILITY_TEST_FLOATING_EQUALITY( sample, 1.0*si::joule, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled w/o an instance
TEUCHOS_UNIT_TEST( UniformDistribution, sample_static )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
double sample = Utility::UniformDistribution::sample( -1.0, 1.0 );
TEST_EQUALITY_CONST( sample, -1.0 );
sample = Utility::UniformDistribution::sample( -1.0, 1.0 );
TEST_EQUALITY_CONST( sample, 0.0 );
sample = Utility::UniformDistribution::sample( -1.0, 1.0 );
TEST_FLOATING_EQUALITY( sample, 1.0, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled w/o an instance
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, sample_static )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
quantity<si::energy> sample =
Utility::UnitAwareUniformDistribution<si::energy>::sample(
0.0*si::joule, 1.0*si::joule );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
sample =
Utility::UnitAwareUniformDistribution<si::energy>::sample(
0.0*si::joule, 1.0*si::joule );
TEST_EQUALITY_CONST( sample, 0.5*si::joule );
sample =
Utility::UnitAwareUniformDistribution<si::energy>::sample(
0.0*si::joule, 1.0*si::joule );
UTILITY_TEST_FLOATING_EQUALITY( sample, 1.0*si::joule, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
TEUCHOS_UNIT_TEST( UniformDistribution, sampleAndRecordTrials )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
unsigned trials = 0;
double sample = distribution->sampleAndRecordTrials( trials );
TEST_EQUALITY_CONST( sample, -1.0 );
TEST_EQUALITY_CONST( 1.0/trials, 1.0 );
sample = distribution->sampleAndRecordTrials( trials );
TEST_EQUALITY_CONST( sample, 0.0 );
TEST_EQUALITY_CONST( 2.0/trials, 1.0 );
sample = distribution->sampleAndRecordTrials( trials );
TEST_FLOATING_EQUALITY( sample, 1.0, 1e-14 );
TEST_EQUALITY_CONST( 3.0/trials, 1.0 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit aware distribution can be sampled
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, sampleAndRecordTrials )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
unsigned trials = 0;
quantity<si::energy> sample =
unit_aware_distribution->sampleAndRecordTrials( trials );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
TEST_EQUALITY_CONST( 1.0/trials, 1.0 );
sample = unit_aware_distribution->sampleAndRecordTrials( trials );
TEST_EQUALITY_CONST( sample, 0.5*si::joule );
TEST_EQUALITY_CONST( 2.0/trials, 1.0 );
sample = unit_aware_distribution->sampleAndRecordTrials( trials );
UTILITY_TEST_FLOATING_EQUALITY( sample, 1.0*si::joule, 1e-14 );
TEST_EQUALITY_CONST( 3.0/trials, 1.0 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled w/o an instance
TEUCHOS_UNIT_TEST( UniformDistribution, sampleAndRecordTrials_static )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
unsigned trials = 0;
double sample = Utility::UniformDistribution::sampleAndRecordTrials(
-1.0, 1.0, trials );
TEST_EQUALITY_CONST( sample, -1.0 );
TEST_EQUALITY_CONST( 1.0/trials, 1.0 );
sample = Utility::UniformDistribution::sampleAndRecordTrials(
-1.0, 1.0, trials );
TEST_EQUALITY_CONST( sample, 0.0 );
TEST_EQUALITY_CONST( 2.0/trials, 1.0 );
sample = Utility::UniformDistribution::sampleAndRecordTrials(
-1.0, 1.0, trials );
TEST_FLOATING_EQUALITY( sample, 1.0, 1e-14 );
TEST_EQUALITY_CONST( 3.0/trials, 1.0 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled w/o an instance
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, sampleAndRecordTrials_static )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
unsigned trials = 0;
quantity<si::energy> sample = Utility::UnitAwareUniformDistribution<si::energy,si::amount>::sampleAndRecordTrials(
0.0*si::joule, 1.0*si::joule, trials );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
TEST_EQUALITY_CONST( 1.0/trials, 1.0 );
sample =
Utility::UnitAwareUniformDistribution<si::energy,si::amount>::sampleAndRecordTrials(
0.0*si::joule, 1.0*si::joule, trials );
TEST_EQUALITY_CONST( sample, 0.5*si::joule );
TEST_EQUALITY_CONST( 2.0/trials, 1.0 );
sample =
Utility::UnitAwareUniformDistribution<si::energy,si::amount>::sampleAndRecordTrials(
0.0*si::joule, 1.0*si::joule, trials );
UTILITY_TEST_FLOATING_EQUALITY( sample, 1.0*si::joule, 1e-14 );
TEST_EQUALITY_CONST( 3.0/trials, 1.0 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
TEUCHOS_UNIT_TEST( UniformDistribution, sampleAndRecordBinIndex )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
unsigned bin_index;
double sample = tab_distribution->sampleAndRecordBinIndex( bin_index );
TEST_EQUALITY_CONST( sample, -1.0 );
TEST_EQUALITY_CONST( bin_index, 0.0 );
sample = tab_distribution->sampleAndRecordBinIndex( bin_index );
TEST_EQUALITY_CONST( sample, 0.0 );
TEST_EQUALITY_CONST( bin_index, 0.0 );
sample = tab_distribution->sampleAndRecordBinIndex( bin_index );
TEST_FLOATING_EQUALITY( sample, 1.0, 1e-14 );
TEST_EQUALITY_CONST( bin_index, 0.0 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, sampleAndRecordBinIndex )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
unsigned bin_index;
quantity<si::energy> sample =
unit_aware_tab_distribution->sampleAndRecordBinIndex( bin_index );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
TEST_EQUALITY_CONST( bin_index, 0.0 );
sample = unit_aware_tab_distribution->sampleAndRecordBinIndex( bin_index );
TEST_EQUALITY_CONST( sample, 0.5*si::joule );
TEST_EQUALITY_CONST( bin_index, 0.0 );
sample = unit_aware_tab_distribution->sampleAndRecordBinIndex( bin_index );
UTILITY_TEST_FLOATING_EQUALITY( sample, 1.0*si::joule, 1e-14 );
TEST_EQUALITY_CONST( bin_index, 0.0 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
TEUCHOS_UNIT_TEST( UniformDistribution, sampleWithRandomNumber )
{
double sample = tab_distribution->sampleWithRandomNumber( 0.0 );
TEST_EQUALITY_CONST( sample, -1.0 );
sample = tab_distribution->sampleWithRandomNumber( 0.5 );
TEST_EQUALITY_CONST( sample, 0.0 );
sample = tab_distribution->sampleWithRandomNumber( 1.0 );
TEST_FLOATING_EQUALITY( sample, 1.0, 1e-14 );
}
//---------------------------------------------------------------------------//
// Check that the unit aware distribution can be sampled
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, sampleWithRandomNumber )
{
quantity<si::energy> sample =
unit_aware_tab_distribution->sampleWithRandomNumber( 0.0 );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
sample = unit_aware_tab_distribution->sampleWithRandomNumber( 0.5 );
TEST_EQUALITY_CONST( sample, 0.5*si::joule );
sample = unit_aware_tab_distribution->sampleWithRandomNumber( 1.0 );
UTILITY_TEST_FLOATING_EQUALITY( sample, 1.0*si::joule, 1e-14 );
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled w/o an instance
TEUCHOS_UNIT_TEST( UniformDistribution, sampleWithRandomNumber_static )
{
double sample = Utility::UniformDistribution::sampleWithRandomNumber(
-1.0, 1.0, 0.0 );
TEST_EQUALITY_CONST( sample, -1.0 );
sample = Utility::UniformDistribution::sampleWithRandomNumber(
-1.0, 1.0, 0.5 );
TEST_EQUALITY_CONST( sample, 0.0 );
sample = Utility::UniformDistribution::sampleWithRandomNumber(
-1.0, 1.0, 1.0 );
TEST_FLOATING_EQUALITY( sample, 1.0, 1e-14 );
}
//---------------------------------------------------------------------------//
// Check that the unit aware distribution can be sampled w/o an instance
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution,
sampleWithRandomNumber_static )
{
quantity<si::energy> sample = Utility::UnitAwareUniformDistribution<si::energy,si::amount>::sampleWithRandomNumber(
0.0*si::joule, 1.0*si::joule, 0.0 );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
sample = Utility::UnitAwareUniformDistribution<si::energy,si::amount>::sampleWithRandomNumber(
0.0*si::joule, 1.0*si::joule, 0.5 );
TEST_EQUALITY_CONST( sample, 0.5*si::joule );
sample = Utility::UnitAwareUniformDistribution<si::energy,si::amount>::sampleWithRandomNumber(
0.0*si::joule, 1.0*si::joule, 1.0 );
UTILITY_TEST_FLOATING_EQUALITY( sample, 1.0*si::joule, 1e-14 );
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
TEUCHOS_UNIT_TEST( UniformDistribution, sampleInSubrange )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
double sample = tab_distribution->sampleInSubrange( 0.0 );
TEST_EQUALITY_CONST( sample, -1.0 );
sample = tab_distribution->sampleInSubrange( 0.0 );
TEST_EQUALITY_CONST( sample, -0.5 );
sample = tab_distribution->sampleInSubrange( 0.0 );
UTILITY_TEST_FLOATING_EQUALITY( sample, 0.0, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit aware distribution can be sampled
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, sampleInSubrange )
{
std::vector<double> fake_stream( 3 );
fake_stream[0] = 0.0;
fake_stream[1] = 0.5;
fake_stream[2] = 1.0 - 1e-15;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
quantity<si::energy> sample =
unit_aware_tab_distribution->sampleInSubrange( 0.5*si::joule );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
sample = unit_aware_tab_distribution->sampleInSubrange( 0.5*si::joule );
TEST_EQUALITY_CONST( sample, 0.25*si::joule );
sample = unit_aware_tab_distribution->sampleInSubrange( 0.5*si::joule );
UTILITY_TEST_FLOATING_EQUALITY( sample, 0.5*si::joule, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
TEUCHOS_UNIT_TEST( UniformDistribution, sampleWithRandomNumberInSubrange )
{
double sample = tab_distribution->sampleWithRandomNumberInSubrange( 0.0, 0.0 );
TEST_EQUALITY_CONST( sample, -1.0 );
sample = tab_distribution->sampleWithRandomNumberInSubrange( 0.5, 0.0 );
TEST_EQUALITY_CONST( sample, -0.5 );
sample = tab_distribution->sampleWithRandomNumberInSubrange( 1.0, 0.0 );
UTILITY_TEST_FLOATING_EQUALITY( sample, 0.0, 1e-14 );
}
//---------------------------------------------------------------------------//
// Check that the unit aware distribution can be sampled
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution,
sampleWithRandomNumberInSubrange )
{
quantity<si::energy> sample =
unit_aware_tab_distribution->sampleWithRandomNumberInSubrange(
0.0, 0.5*si::joule );
TEST_EQUALITY_CONST( sample, 0.0*si::joule );
sample = unit_aware_tab_distribution->sampleWithRandomNumberInSubrange(
0.5, 0.5*si::joule );
TEST_EQUALITY_CONST( sample, 0.25*si::joule );
sample = unit_aware_tab_distribution->sampleWithRandomNumberInSubrange(
1.0, 0.5*si::joule );
UTILITY_TEST_FLOATING_EQUALITY( sample, 0.5*si::joule, 1e-14 );
}
//---------------------------------------------------------------------------//
// Check that the upper bound of the distribution independent variable can be
// returned
TEUCHOS_UNIT_TEST( UniformDistribution, getUpperBoundOfIndepVar )
{
TEST_EQUALITY_CONST( distribution->getUpperBoundOfIndepVar(), 1.0 );
}
//---------------------------------------------------------------------------//
// Check that the upper bound of the unit-aware distribution independent
// variable can be returned
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, getUpperBoundOfIndepVar )
{
TEST_EQUALITY_CONST( unit_aware_distribution->getUpperBoundOfIndepVar(),
1.0*si::joule );
}
//---------------------------------------------------------------------------//
// Check that the lower bound of the distribution independent variable can be
// returned
TEUCHOS_UNIT_TEST( UniformDistribution, getLowerBoundOfIndepVar )
{
TEST_EQUALITY_CONST( distribution->getLowerBoundOfIndepVar(), -1.0 );
}
//---------------------------------------------------------------------------//
// Check that the lower bound of the unit-aware distribution independent
// variable can be returned
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, getLowerBoundOfIndepVar )
{
TEST_EQUALITY_CONST( unit_aware_distribution->getLowerBoundOfIndepVar(),
0.0*si::joule );
}
//---------------------------------------------------------------------------//
// Check that the distribution type can be returned
TEUCHOS_UNIT_TEST( UniformDistribution, getDistributionType )
{
TEST_EQUALITY_CONST( distribution->getDistributionType(),
Utility::UNIFORM_DISTRIBUTION );
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution type can be returned
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, getDistributionType )
{
TEST_EQUALITY_CONST( unit_aware_distribution->getDistributionType(),
Utility::UNIFORM_DISTRIBUTION );
}
//---------------------------------------------------------------------------//
// Check if the distribution is tabular
TEUCHOS_UNIT_TEST( UniformDistribution, isTabular )
{
TEST_ASSERT( distribution->isTabular() );
}
//---------------------------------------------------------------------------//
// Check if the unit-aware distribution is tabular
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, isTabular )
{
TEST_ASSERT( unit_aware_distribution->isTabular() );
}
//---------------------------------------------------------------------------//
// Check if the distribution is continuous
TEUCHOS_UNIT_TEST( UniformDistribution, isContinuous )
{
TEST_ASSERT( distribution->isContinuous() );
}
//---------------------------------------------------------------------------//
// Check if the unit-aware distribution is continuous
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, isContinuous )
{
TEST_ASSERT( unit_aware_distribution->isContinuous() );
}
//---------------------------------------------------------------------------//
// Check that the distribution can be written to an xml file
TEUCHOS_UNIT_TEST( UniformDistribution, toParameterList )
{
Teuchos::RCP<Utility::UniformDistribution> true_distribution =
Teuchos::rcp_dynamic_cast<Utility::UniformDistribution>( distribution );
Teuchos::ParameterList parameter_list;
parameter_list.set<Utility::UniformDistribution>( "test distribution",
*true_distribution );
Teuchos::writeParameterListToXmlFile( parameter_list,
"uniform_dist_test_list.xml" );
Teuchos::RCP<Teuchos::ParameterList> read_parameter_list =
Teuchos::getParametersFromXmlFile( "uniform_dist_test_list.xml" );
TEST_EQUALITY( parameter_list, *read_parameter_list );
Teuchos::RCP<Utility::UniformDistribution>
copy_distribution( new Utility::UniformDistribution );
*copy_distribution =
read_parameter_list->get<Utility::UniformDistribution>(
"test distribution");
TEST_EQUALITY( *copy_distribution, *true_distribution );
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be written to an xml file
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, toParameterList )
{
typedef Utility::UnitAwareUniformDistribution<si::energy,si::amount> UnitAwareUniformDistribution;
Teuchos::RCP<UnitAwareUniformDistribution> true_distribution =
Teuchos::rcp_dynamic_cast<UnitAwareUniformDistribution>( unit_aware_distribution );
Teuchos::ParameterList parameter_list;
parameter_list.set<UnitAwareUniformDistribution>( "test distribution",
*true_distribution );
Teuchos::writeParameterListToXmlFile( parameter_list,
"unit_aware_uniform_dist_test_list.xml" );
Teuchos::RCP<Teuchos::ParameterList> read_parameter_list =
Teuchos::getParametersFromXmlFile( "unit_aware_uniform_dist_test_list.xml" );
TEST_EQUALITY( parameter_list, *read_parameter_list );
Teuchos::RCP<UnitAwareUniformDistribution>
copy_distribution( new UnitAwareUniformDistribution );
*copy_distribution =
read_parameter_list->get<UnitAwareUniformDistribution>(
"test distribution");
TEST_EQUALITY( *copy_distribution, *true_distribution );
}
//---------------------------------------------------------------------------//
// Check that the distribution can be read from an xml file
TEUCHOS_UNIT_TEST( UniformDistribution, fromParameterList )
{
Utility::UniformDistribution xml_distribution =
test_dists_list->get<Utility::UniformDistribution>( "Uniform Distribution A" );
TEST_EQUALITY_CONST( xml_distribution.getLowerBoundOfIndepVar(), -1.0 );
TEST_EQUALITY_CONST( xml_distribution.getUpperBoundOfIndepVar(), 1.0 );
TEST_EQUALITY_CONST( xml_distribution.evaluate( 0.0 ), 2.0 );
xml_distribution =
test_dists_list->get<Utility::UniformDistribution>( "Uniform Distribution B" );
TEST_EQUALITY_CONST( xml_distribution.getLowerBoundOfIndepVar(), 0.0 );
TEST_EQUALITY_CONST( xml_distribution.getUpperBoundOfIndepVar(),
2*Utility::PhysicalConstants::pi );
TEST_EQUALITY_CONST( xml_distribution.evaluate( 1.0 ), 1.0 );
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be read from an xml file
TEUCHOS_UNIT_TEST( UnitAwareUniformDistribution, fromParameterList )
{
typedef Utility::UnitAwareUniformDistribution<si::energy,si::amount> UnitAwareUniformDistribution;
UnitAwareUniformDistribution xml_distribution =
test_dists_list->get<UnitAwareUniformDistribution>( "Unit-Aware Uniform Distribution A" );
TEST_EQUALITY_CONST( xml_distribution.getLowerBoundOfIndepVar(), 0.0*si::joule );
TEST_EQUALITY_CONST( xml_distribution.getUpperBoundOfIndepVar(), 10.0*si::joule);
TEST_EQUALITY_CONST( xml_distribution.evaluate( 5.0*si::joule ), 3.0*si::mole );
xml_distribution =
test_dists_list->get<UnitAwareUniformDistribution>( "Unit-Aware Uniform Distribution B" );
TEST_EQUALITY_CONST( xml_distribution.getLowerBoundOfIndepVar(), 0.0*si::joule );
TEST_EQUALITY_CONST( xml_distribution.getUpperBoundOfIndepVar(),
Utility::PhysicalConstants::pi*si::joule );
TEST_EQUALITY_CONST( xml_distribution.evaluate( 1.0*si::joule ), 1.0*si::mole );
}
//---------------------------------------------------------------------------//
// Check that a unit-aware distribution can be scaled
TEUCHOS_UNIT_TEST_TEMPLATE_4_DECL( UnitAwareUniformDistribution,
explicit_conversion,
IndepUnitA,
DepUnitA,
IndepUnitB,
DepUnitB )
{
typedef typename Utility::UnitTraits<IndepUnitA>::template GetQuantityType<double>::type IndepQuantityA;
typedef typename Utility::UnitTraits<typename Utility::UnitTraits<IndepUnitA>::InverseUnit>::template GetQuantityType<double>::type InverseIndepQuantityA;
typedef typename Utility::UnitTraits<IndepUnitB>::template GetQuantityType<double>::type IndepQuantityB;
typedef typename Utility::UnitTraits<typename Utility::UnitTraits<IndepUnitB>::InverseUnit>::template GetQuantityType<double>::type InverseIndepQuantityB;
typedef typename Utility::UnitTraits<DepUnitA>::template GetQuantityType<double>::type DepQuantityA;
typedef typename Utility::UnitTraits<DepUnitB>::template GetQuantityType<double>::type DepQuantityB;
// Copy from a unitless distribution to distribution type A (static method)
Utility::UnitAwareUniformDistribution<IndepUnitA,DepUnitA>
unit_aware_dist_a_copy = Utility::UnitAwareUniformDistribution<IndepUnitA,DepUnitA>::fromUnitlessDistribution( *Teuchos::rcp_dynamic_cast<Utility::UniformDistribution>( distribution ) );
// Copy from distribution type A to distribution type B (explicit cast)
Utility::UnitAwareUniformDistribution<IndepUnitB,DepUnitB>
unit_aware_dist_b_copy( unit_aware_dist_a_copy );
IndepQuantityA indep_quantity_a =
Utility::QuantityTraits<IndepQuantityA>::initializeQuantity( -1.0 );
InverseIndepQuantityA inv_indep_quantity_a =
Utility::QuantityTraits<InverseIndepQuantityA>::initializeQuantity( 0.5 );
DepQuantityA dep_quantity_a =
Utility::QuantityTraits<DepQuantityA>::initializeQuantity( 2.0 );
IndepQuantityB indep_quantity_b( indep_quantity_a );
InverseIndepQuantityB inv_indep_quantity_b( inv_indep_quantity_a );
DepQuantityB dep_quantity_b( dep_quantity_a );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluate( indep_quantity_a ),
dep_quantity_a,
1e-15 );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluatePDF( indep_quantity_a ),
inv_indep_quantity_a,
1e-15 );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluate( indep_quantity_b ),
dep_quantity_b,
1e-15 );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluatePDF( indep_quantity_b ),
inv_indep_quantity_b,
1e-15 );
Utility::setQuantity( indep_quantity_a, 1.0 );
Utility::setQuantity( inv_indep_quantity_a, 0.5 );
Utility::setQuantity( dep_quantity_a, 2.0 );
indep_quantity_b = IndepQuantityB( indep_quantity_a );
inv_indep_quantity_b = InverseIndepQuantityB( inv_indep_quantity_a );
dep_quantity_b = DepQuantityB( dep_quantity_a );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluate( indep_quantity_a ),
dep_quantity_a,
1e-15 );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluatePDF( indep_quantity_a ),
inv_indep_quantity_a,
1e-15 );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluate( indep_quantity_b ),
dep_quantity_b,
1e-15 );
UTILITY_TEST_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluatePDF( indep_quantity_b ),
inv_indep_quantity_b,
1e-15 );
}
typedef si::energy si_energy;
typedef cgs::energy cgs_energy;
typedef si::length si_length;
typedef cgs::length cgs_length;
typedef si::mass si_mass;
typedef cgs::mass cgs_mass;
typedef si::amount si_amount;
typedef si::dimensionless si_dimensionless;
typedef cgs::dimensionless cgs_dimensionless;
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
si_energy,
si_amount,
cgs_energy,
si_amount );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
cgs_energy,
si_amount,
si_energy,
si_amount );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
si_length,
si_energy,
cgs_length,
cgs_energy );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
cgs_length,
cgs_energy,
si_length,
si_energy );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
si_length,
si_mass,
cgs_length,
cgs_mass );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
cgs_length,
cgs_mass,
si_length,
si_mass );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
si_length,
void,
cgs_length,
void );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
cgs_length,
void,
si_length,
void );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
void,
si_mass,
void,
cgs_mass );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
void,
cgs_mass,
void,
si_mass );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
si_length,
si_dimensionless,
cgs_length,
cgs_dimensionless );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
cgs_length,
cgs_dimensionless,
si_length,
si_dimensionless );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
si_dimensionless,
si_length,
cgs_dimensionless,
cgs_length );
TEUCHOS_UNIT_TEST_TEMPLATE_4_INSTANT( UnitAwareUniformDistribution,
explicit_conversion,
cgs_dimensionless,
cgs_length,
si_dimensionless,
si_length );
//---------------------------------------------------------------------------//
// Custom main function
//---------------------------------------------------------------------------//
int main( int argc, char** argv )
{
std::string test_dists_xml_file;
Teuchos::CommandLineProcessor& clp = Teuchos::UnitTestRepository::getCLP();
clp.setOption( "test_dists_xml_file",
&test_dists_xml_file,
"Test distributions xml file name" );
const Teuchos::RCP<Teuchos::FancyOStream> out =
Teuchos::VerboseObjectBase::getDefaultOStream();
Teuchos::CommandLineProcessor::EParseCommandLineReturn parse_return =
clp.parse(argc,argv);
if ( parse_return != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) {
*out << "\nEnd Result: TEST FAILED" << std::endl;
return parse_return;
}
TEUCHOS_ADD_TYPE_CONVERTER( Utility::UniformDistribution );
typedef Utility::UnitAwareUniformDistribution<si::energy,si::amount> UnitAwareUniformDistribution;
TEUCHOS_ADD_TYPE_CONVERTER( UnitAwareUniformDistribution );
test_dists_list = Teuchos::getParametersFromXmlFile( test_dists_xml_file );
// Initialize the unit-aware distributions
unit_aware_tab_distribution.reset(
new Utility::UnitAwareUniformDistribution<si::energy,si::amount>(
quantity<si::energy>( 0.0*si::joule ),
quantity<si::energy>( 1.0*si::joule ),
quantity<si::amount>( 1.0*si::mole ) ) );
unit_aware_distribution = unit_aware_tab_distribution;
// Initialize the random number generator
Utility::RandomNumberGenerator::createStreams();
// Run the unit tests
Teuchos::GlobalMPISession mpiSession( &argc, &argv );
const bool success = Teuchos::UnitTestRepository::runUnitTests(*out);
if (success)
*out << "\nEnd Result: TEST PASSED" << std::endl;
else
*out << "\nEnd Result: TEST FAILED" << std::endl;
clp.printFinalTimerSummary(out.ptr());
return (success ? 0 : 1);
}
//---------------------------------------------------------------------------//
// end tstUniformDistribution.cpp
//---------------------------------------------------------------------------//
|
{"hexsha": "cb66497d146c80933182d4ea5e4b11a9bf0ae233", "size": 36370, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "packages/utility/distribution/test/tstUniformDistribution.cpp", "max_stars_repo_name": "lkersting/SCR-2123", "max_stars_repo_head_hexsha": "06ae3d92998664a520dc6a271809a5aeffe18f72", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "packages/utility/distribution/test/tstUniformDistribution.cpp", "max_issues_repo_name": "lkersting/SCR-2123", "max_issues_repo_head_hexsha": "06ae3d92998664a520dc6a271809a5aeffe18f72", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "packages/utility/distribution/test/tstUniformDistribution.cpp", "max_forks_repo_name": "lkersting/SCR-2123", "max_forks_repo_head_hexsha": "06ae3d92998664a520dc6a271809a5aeffe18f72", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1501532176, "max_line_length": 190, "alphanum_fraction": 0.6580423426, "num_tokens": 9184}
|
import random
import re
from multiprocessing import Pool
from collections import UserList, defaultdict
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import torch
from rdkit import rdBase
from rdkit import Chem
# https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
def set_torch_seed_to_all_gens(_):
seed = torch.initial_seed() % (2 ** 32 - 1)
random.seed(seed)
np.random.seed(seed)
class SpecialTokens:
bos = '<bos>'
eos = '<eos>'
pad = '<pad>'
unk = '<unk>'
class CharVocab:
@classmethod
def from_data(cls, data, *args, **kwargs):
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
chars = set()
for string in data:
chars.update(regex.findall(string))
return cls(chars, *args, **kwargs)
def __init__(self, chars, ss=SpecialTokens):
if (ss.bos in chars) or (ss.eos in chars) or \
(ss.pad in chars) or (ss.unk in chars):
raise ValueError('SpecialTokens in chars')
all_syms = sorted(list(chars)) + [ss.bos, ss.eos, ss.pad, ss.unk]
self.pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
self.regex = re.compile(self.pattern)
self.ss = ss
self.c2i = {c: i for i, c in enumerate(all_syms)}
self.i2c = {i: c for i, c in enumerate(all_syms)}
def __len__(self):
return len(self.c2i)
@property
def bos(self):
return self.c2i[self.ss.bos]
@property
def eos(self):
return self.c2i[self.ss.eos]
@property
def pad(self):
return self.c2i[self.ss.pad]
@property
def unk(self):
return self.c2i[self.ss.unk]
def char2id(self, char):
if char not in self.c2i:
return self.unk
return self.c2i[char]
def id2char(self, id):
if id not in self.i2c:
return self.ss.unk
return self.i2c[id]
def string2ids(self, string, add_bos=False, add_eos=False):
ids = [self.char2id(c) for c in self.regex.findall(string)]
if add_bos:
ids = [self.bos] + ids
if add_eos:
ids = ids + [self.eos]
return ids
def ids2string(self, ids, rem_bos=True, rem_eos=True):
if len(ids) == 0:
return ''
if rem_bos and ids[0] == self.bos:
ids = ids[1:]
if rem_eos and ids[-1] == self.eos:
ids = ids[:-1]
string = ''.join([self.id2char(id) for id in ids])
return string
class OneHotVocab(CharVocab):
def __init__(self, *args, **kwargs):
super(OneHotVocab, self).__init__(*args, **kwargs)
self.vectors = torch.eye(len(self.c2i))
def mapper(n_jobs):
'''
Returns function for map call.
If n_jobs == 1, will use standard map
If n_jobs > 1, will use multiprocessing pool
If n_jobs is a pool object, will return its map function
'''
if n_jobs == 1:
def _mapper(*args, **kwargs):
return list(map(*args, **kwargs))
return _mapper
if isinstance(n_jobs, int):
pool = Pool(n_jobs)
def _mapper(*args, **kwargs):
try:
result = pool.map(*args, **kwargs)
finally:
pool.terminate()
return result
return _mapper
return n_jobs.map
class Logger(UserList):
def __init__(self, data=None):
super().__init__()
self.sdata = defaultdict(list)
for step in (data or []):
self.append(step)
def __getitem__(self, key):
if isinstance(key, int):
return self.data[key]
if isinstance(key, slice):
return Logger(self.data[key])
ldata = self.sdata[key]
if isinstance(ldata[0], dict):
return Logger(ldata)
return ldata
def append(self, step_dict):
super().append(step_dict)
for k, v in step_dict.items():
self.sdata[k].append(v)
def save(self, path):
df = pd.DataFrame(list(self))
df.to_csv(path, index=False)
class LogPlotter:
def __init__(self, log):
self.log = log
def line(self, ax, name):
if isinstance(self.log[0][name], dict):
for k in self.log[0][name]:
ax.plot(self.log[name][k], label=k)
ax.legend()
else:
ax.plot(self.log[name])
ax.set_ylabel('value')
ax.set_xlabel('epoch')
ax.set_title(name)
def grid(self, names, size=7):
_, axs = plt.subplots(nrows=len(names) // 2, ncols=2,
figsize=(size * 2, size * (len(names) // 2)))
for ax, name in zip(axs.flatten(), names):
self.line(ax, name)
class CircularBuffer:
def __init__(self, size):
self.max_size = size
self.data = np.zeros(self.max_size)
self.size = 0
self.pointer = -1
def add(self, element):
self.size = min(self.size + 1, self.max_size)
self.pointer = (self.pointer + 1) % self.max_size
self.data[self.pointer] = element
return element
def last(self):
assert self.pointer != -1, "Can't get an element from an empty buffer!"
return self.data[self.pointer]
def mean(self):
if self.size > 0:
return self.data[:self.size].mean()
return 0.0
def disable_rdkit_log():
rdBase.DisableLog('rdApp.*')
def enable_rdkit_log():
rdBase.EnableLog('rdApp.*')
def get_mol(smiles_or_mol):
'''
Loads SMILES/molecule into RDKit's object
'''
if isinstance(smiles_or_mol, str):
if len(smiles_or_mol) == 0:
return None
mol = Chem.MolFromSmiles(smiles_or_mol)
if mol is None:
return None
try:
Chem.SanitizeMol(mol)
except ValueError:
return None
return mol
return smiles_or_mol
class StringDataset:
def __init__(self, vocab, data):
"""
Creates a convenient Dataset with SMILES tokenization
Arguments:
vocab: CharVocab instance for tokenization
data (list): SMILES strings for the dataset
"""
self.vocab = vocab
self.tokens = [vocab.string2ids(s) for s in data]
self.data = data
self.bos = vocab.bos
self.eos = vocab.eos
def __len__(self):
"""
Computes a number of objects in the dataset
"""
return len(self.tokens)
def __getitem__(self, index):
"""
Prepares torch tensors with a given SMILES.
Arguments:
index (int): index of SMILES in the original dataset
Returns:
A tuple (with_bos, with_eos, smiles), where
* with_bos is a torch.long tensor of SMILES tokens with
BOS (beginning of a sentence) token
* with_eos is a torch.long tensor of SMILES tokens with
EOS (end of a sentence) token
* smiles is an original SMILES from the dataset
"""
tokens = self.tokens[index]
with_bos = torch.tensor([self.bos] + tokens, dtype=torch.long)
with_eos = torch.tensor(tokens + [self.eos], dtype=torch.long)
return with_bos, with_eos, self.data[index]
def default_collate(self, batch, return_data=False):
"""
Simple collate function for SMILES dataset. Joins a
batch of objects from StringDataset into a batch
Arguments:
batch: list of objects from StringDataset
pad: padding symbol, usually equals to vocab.pad
return_data: if True, will return SMILES used in a batch
Returns:
with_bos, with_eos, lengths [, data] where
* with_bos: padded sequence with BOS in the beginning
* with_eos: padded sequence with EOS in the end
* lengths: array with SMILES lengths in the batch
* data: SMILES in the batch
Note: output batch is sorted with respect to SMILES lengths in
decreasing order, since this is a default format for torch
RNN implementations
"""
with_bos, with_eos, data = list(zip(*batch))
lengths = [len(x) for x in with_bos]
order = np.argsort(lengths)[::-1]
with_bos = [with_bos[i] for i in order]
with_eos = [with_eos[i] for i in order]
lengths = [lengths[i] for i in order]
with_bos = torch.nn.utils.rnn.pad_sequence(
with_bos, padding_value=self.vocab.pad
)
with_eos = torch.nn.utils.rnn.pad_sequence(
with_eos, padding_value=self.vocab.pad
)
if return_data:
data = np.array(data)[order]
return with_bos, with_eos, lengths, data
return with_bos, with_eos, lengths
def batch_to_device(batch, device):
return [
x.to(device) if isinstance(x, torch.Tensor) else x
for x in batch
]
|
{"hexsha": "a3fb30a0d2afd0850954413efb15b584e398c908", "size": 9163, "ext": "py", "lang": "Python", "max_stars_repo_path": "moses/utils.py", "max_stars_repo_name": "yair-schiff/moses", "max_stars_repo_head_hexsha": "563c364acf6091bf1781f0f98743589ce4eb4195", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "moses/utils.py", "max_issues_repo_name": "yair-schiff/moses", "max_issues_repo_head_hexsha": "563c364acf6091bf1781f0f98743589ce4eb4195", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "moses/utils.py", "max_forks_repo_name": "yair-schiff/moses", "max_forks_repo_head_hexsha": "563c364acf6091bf1781f0f98743589ce4eb4195", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0888888889, "max_line_length": 128, "alphanum_fraction": 0.5704463604, "include": true, "reason": "import numpy", "num_tokens": 2378}
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cases for Exponential distribution"""
import numpy as np
from scipy import stats
import mindspore.context as context
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import Tensor
from mindspore import dtype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Prob(nn.Cell):
"""
Test class: probability of Exponential distribution.
"""
def __init__(self):
super(Prob, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], dtype=dtype.float32)
def construct(self, x_):
return self.e.prob(x_)
def test_pdf():
"""
Test pdf.
"""
expon_benchmark = stats.expon(scale=[[1.0], [2.0]])
expect_pdf = expon_benchmark.pdf([-1.0, 0.0, 1.0]).astype(np.float32)
pdf = Prob()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = pdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
class LogProb(nn.Cell):
"""
Test class: log probability of Exponential distribution.
"""
def __init__(self):
super(LogProb, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], dtype=dtype.float32)
def construct(self, x_):
return self.e.log_prob(x_)
def test_log_likelihood():
"""
Test log_pdf.
"""
expon_benchmark = stats.expon(scale=[[1.0], [2.0]])
expect_logpdf = expon_benchmark.logpdf([0.5, 1.0, 2.0]).astype(np.float32)
logprob = LogProb()
x_ = Tensor(np.array([0.5, 1.0, 2.0]).astype(np.float32), dtype=dtype.float32)
output = logprob(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
class KL(nn.Cell):
"""
Test class: kl_loss between Exponential distributions.
"""
def __init__(self):
super(KL, self).__init__()
self.e = msd.Exponential([1.5], dtype=dtype.float32)
def construct(self, x_):
return self.e.kl_loss('Exponential', x_)
def test_kl_loss():
"""
Test kl_loss.
"""
rate_a = 1.5
rate_b = np.array([0.5, 2.0]).astype(np.float32)
expect_kl_loss = np.log(rate_a) - np.log(rate_b) + rate_b / rate_a - 1.0
kl = KL()
output = kl(Tensor(rate_b, dtype=dtype.float32))
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_kl_loss) < tol).all()
class Basics(nn.Cell):
"""
Test class: mean/sd/mode of Exponential distribution.
"""
def __init__(self):
super(Basics, self).__init__()
self.e = msd.Exponential([0.5], dtype=dtype.float32)
def construct(self):
return self.e.mean(), self.e.sd(), self.e.mode()
def test_basics():
"""
Test mean/standard/mode deviation.
"""
basics = Basics()
mean, sd, mode = basics()
expect_mean = 2.
expect_sd = 2.
expect_mode = 0.
tol = 1e-6
assert (np.abs(mean.asnumpy() - expect_mean) < tol).all()
assert (np.abs(sd.asnumpy() - expect_sd) < tol).all()
assert (np.abs(mode.asnumpy() - expect_mode) < tol).all()
class Sampling(nn.Cell):
"""
Test class: sample of Exponential distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], seed=seed, dtype=dtype.float32)
self.shape = shape
def construct(self, rate=None):
return self.e.sample(self.shape, rate)
def test_sample():
"""
Test sample.
"""
shape = (2, 3)
seed = 10
rate = Tensor([1.0, 2.0, 3.0], dtype=dtype.float32)
sample = Sampling(shape, seed=seed)
output = sample(rate)
assert output.shape == (2, 3, 3)
class CDF(nn.Cell):
"""
Test class: cdf of Exponential distribution.
"""
def __init__(self):
super(CDF, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], dtype=dtype.float32)
def construct(self, x_):
return self.e.cdf(x_)
def test_cdf():
"""
Test cdf.
"""
expon_benchmark = stats.expon(scale=[[1.0], [2.0]])
expect_cdf = expon_benchmark.cdf([-1.0, 0.0, 1.0]).astype(np.float32)
cdf = CDF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = cdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_cdf) < tol).all()
class LogCDF(nn.Cell):
"""
Test class: log_cdf of Exponential distribution.
"""
def __init__(self):
super(LogCDF, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], dtype=dtype.float32)
def construct(self, x_):
return self.e.log_cdf(x_)
def test_log_cdf():
"""
Test log_cdf.
"""
expon_benchmark = stats.expon(scale=[[1.0], [2.0]])
expect_logcdf = expon_benchmark.logcdf([0.5, 1.0, 2.5]).astype(np.float32)
logcdf = LogCDF()
x_ = Tensor(np.array([0.5, 1.0, 2.5]).astype(np.float32), dtype=dtype.float32)
output = logcdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logcdf) < tol).all()
class SF(nn.Cell):
"""
Test class: survival function of Exponential distribution.
"""
def __init__(self):
super(SF, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], dtype=dtype.float32)
def construct(self, x_):
return self.e.survival_function(x_)
def test_survival():
"""
Test survival function.
"""
expon_benchmark = stats.expon(scale=[[1.0], [2.0]])
expect_survival = expon_benchmark.sf([-1.0, 0.0, 1.0]).astype(np.float32)
survival = SF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = survival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_survival) < tol).all()
class LogSF(nn.Cell):
"""
Test class: log survival function of Exponential distribution.
"""
def __init__(self):
super(LogSF, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], dtype=dtype.float32)
def construct(self, x_):
return self.e.log_survival(x_)
def test_log_survival():
"""
Test log survival function.
"""
expon_benchmark = stats.expon(scale=[[1.0], [2.0]])
expect_logsurvival = expon_benchmark.logsf([-1.0, 0.0, 1.0]).astype(np.float32)
logsurvival = LogSF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = logsurvival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logsurvival) < tol).all()
class EntropyH(nn.Cell):
"""
Test class: entropy of Exponential distribution.
"""
def __init__(self):
super(EntropyH, self).__init__()
self.e = msd.Exponential([[1.0], [0.5]], dtype=dtype.float32)
def construct(self):
return self.e.entropy()
def test_entropy():
"""
Test entropy.
"""
expon_benchmark = stats.expon(scale=[[1.0], [2.0]])
expect_entropy = expon_benchmark.entropy().astype(np.float32)
entropy = EntropyH()
output = entropy()
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_entropy) < tol).all()
class CrossEntropy(nn.Cell):
"""
Test class: cross entropy between Exponential distribution.
"""
def __init__(self):
super(CrossEntropy, self).__init__()
self.e = msd.Exponential([1.0], dtype=dtype.float32)
def construct(self, x_):
entropy = self.e.entropy()
kl_loss = self.e.kl_loss('Exponential', x_)
h_sum_kl = entropy + kl_loss
cross_entropy = self.e.cross_entropy('Exponential', x_)
return h_sum_kl - cross_entropy
def test_cross_entropy():
"""
Test cross_entropy.
"""
cross_entropy = CrossEntropy()
rate = Tensor([0.5], dtype=dtype.float32)
diff = cross_entropy(rate)
tol = 1e-6
assert (np.abs(diff.asnumpy() - np.zeros(diff.shape)) < tol).all()
|
{"hexsha": "ba1689c6f913a7b1009363895f951405015b07ec", "size": 8480, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/st/probability/distribution/test_exponential.py", "max_stars_repo_name": "GuoSuiming/mindspore", "max_stars_repo_head_hexsha": "48afc4cfa53d970c0b20eedfb46e039db2a133d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3200, "max_stars_repo_stars_event_min_datetime": "2020-02-17T12:45:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:21:16.000Z", "max_issues_repo_path": "tests/st/probability/distribution/test_exponential.py", "max_issues_repo_name": "zimo-geek/mindspore", "max_issues_repo_head_hexsha": "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 176, "max_issues_repo_issues_event_min_datetime": "2020-02-12T02:52:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T22:15:55.000Z", "max_forks_repo_path": "tests/st/probability/distribution/test_exponential.py", "max_forks_repo_name": "zimo-geek/mindspore", "max_forks_repo_head_hexsha": "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 621, "max_forks_repo_forks_event_min_datetime": "2020-03-09T01:31:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T03:43:19.000Z", "avg_line_length": 30.1779359431, "max_line_length": 83, "alphanum_fraction": 0.6189858491, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2447}
|
# -*- coding: utf-8 -*-
#
## copyright (C) 2018
# The Icecube Collaboration
#
# $Id$
#
# @version $Revision$
# @date $LastChangedDate$
# @author Hershal Pandya <hershal@udel.edu> Last changed by: $LastChangedBy$
#
from icecube import phys_services, dataclasses, icetray
import numpy as np
import tables
from icecube.icetray.i3logging import log_fatal,log_warn
from llh_ratio_nd import get_slice_vector,log_likelihood_ratio
def signed_log(t):
return np.sign(t)*np.log10(np.absolute(t)+1)
def log_plus_one(t):
return np.log10(t+1)
def check_distinct_regions_add_up_to_full(distinct_regions_binedges,binedges,decimals=2):
combine_edges=[]
for i in range(len(distinct_regions_binedges)):
for j in range(len(distinct_regions_binedges[i])):
if i==0:
combine_edges.append(distinct_regions_binedges[i][j])
else:
combine_edges[j]=np.unique(np.sort(np.concatenate((combine_edges[j],distinct_regions_binedges[i][j]))))
for i in range(len(binedges)):
are_equal=(np.round(binedges[i],decimals=decimals)==np.round(combine_edges[i],decimals=decimals)).all()
if not are_equal:
print 'DistinctRegionsBinEdges do not add up to binedges for this dimension'
print combine_edges[i], binedges[i]
raise Exception('Inconsistency found')
for i in range(len(distinct_regions_binedges)):
for j in range(len(distinct_regions_binedges[0])):
if i<len(distinct_regions_binedges)-1:
next_one=i+1
else:
next_one=0
intersection=np.intersect1d(np.round(distinct_regions_binedges[i][j],decimals=decimals),np.round(distinct_regions_binedges[next_one][j],decimals=decimals))
if len(intersection)>1 and len(intersection)!=len(binedges[j]):
print 'comparing "Distinct" regions %i and %i, dimension %i'%(i,next_one,j)
print 'These regions Intersect'
print 'binedges of region1',distinct_regions_binedges[i]
print 'binedges of region2',distinct_regions_binedges[next_one]
raise Exception('Inconsistency found')
return
class IceTop_LLHRatio(icetray.I3ConditionalModule):
"""
Input takes I3VectorShieldHitRecords with following members:
distance
residual_time
charge
"""
def __init__(self,ctx):
icetray.I3ConditionalModule.__init__(self, ctx)
#common inputs
self.AddParameter('Hits_I3VectorShieldHitRecord',
'Shield applied to Pulses Using a reco',
None)
self.AddParameter('Unhits_I3VectorShieldHitRecord',
'Unhits from Shield and Charge/Time assigned false values',
None)
self.AddParameter('Excluded_I3VectorShieldHitRecord',
'Containing Dist of Excluded Tanks and Charge/time assigned false values',
None)
self.AddParameter('AngularReco_I3Particle',
'I3Particle from which cosZenith is to be drawn',
None)
self.AddParameter('EnergyReco_I3Particle',
'I3Particle from which logEnergy is to be drawn',
None)
self.AddParameter('LaputopParamsName',
'LaputopParams from which logS125 is to be drawn only accepted if EnergyReco_I3Particle not provided',
None)
self.AddParameter('RunMode','Options: GeneratePDF / CalcLLHR',None)
self.AddParameter('Output','Name of the output container','IceTopLLHR')
# inputs for RunMode CalcLLHR
self.AddParameter('OutputFileName','',None)
self.AddParameter('BinEdges5D','[logE_edges, cosZen_edges, logQ_edges, signed_logT_edges, logRplusone_edges]',[])
self.AddParameter('DistinctRegionsBinEdges3D',
'Disjoint Regions in Q, T, R PDF. e.g.Unhits/Excluded. [3dEdges1,3dEdges2,..]',
[])
# inputs for RunMode GeneratePDF
self.AddParameter('SigPDFInputFileName',
'Path to input file (Sig) made using GeneratePDF method in the previous run',None)
self.AddParameter('BkgPDFInputFileName',
'Path to input file (Bkg) made using GeneratePDF method in the previous run',None)
self.AddParameter('DecimalsForSanityCheck',
'Consistency checks will compare values rounded to these N decimals.Default:2',2)
self.AddParameter('SubtractEventFromPDF',
'subtract the event from the PDF if it was used for generating the PDF. Default:None',None)
return
def Configure(self):
self.HitsName = self.GetParameter('Hits_I3VectorShieldHitRecord')
self.UnhitsName = self.GetParameter('Unhits_I3VectorShieldHitRecord')
self.ExcludedName = self.GetParameter('Excluded_I3VectorShieldHitRecord')
self.AngularRecoName = self.GetParameter('AngularReco_I3Particle')
self.EnergyRecoName = self.GetParameter('EnergyReco_I3Particle')
self.LaputopParamsName = self.GetParameter('LaputopParamsName')
self.RunMode = self.GetParameter('RunMode')
self.Decimals= self.GetParameter('DecimalsForSanityCheck')
if self.RunMode=='GeneratePDF':
self.OutputName = self.GetParameter('OutputFileName')
self.binedges = self.GetParameter('BinEdges5D')
self.distinct_regions_binedges = self.GetParameter('DistinctRegionsBinEdges3D')
# make sure distinct regions binedges make sense
if len(self.distinct_regions_binedges)==0:
# give the whole region as a distinct single region
self.distinct_regions_binedges = [self.binedges[2:]]
else:
#check that each distinct region binedge is same shape as self.binedges i.e.
for i in self.distinct_regions_binedges:
if np.shape(i)!=np.shape(self.binedges[2:]):
print 'shape of self.binedges[2:] :',np.shape(self.binedges[2:])
print 'shape of self.distinct_regions_binedges',np.shape(self.distinct_regions_binedges)
log_fatal('DistinctRegionBinEdges and BinEdges* not compatible')
#check that joining all distinct regions gives total binedges
check_distinct_regions_add_up_to_full(self.distinct_regions_binedges, self.binedges[2:],decimals=self.Decimals)
self.labels = ['logE', 'cosZ', 'logQ', 'signedlogT', 'logRplusOne']
#creates the self.hist
self._init_hist()
elif self.RunMode=='CalcLLHR':
self.SigPDFInputName = self.GetParameter('SigPDFInputFileName')
self.BkgPDFInputName = self.GetParameter('BkgPDFInputFileName')
# this one should create self.bkg_hist, self.sig_hist, self.binedges, self.labels, self.distinct_regions_binedges
self._load_PDF_from_file()
self.SubtractEventFromPDF= self.GetParameter('SubtractEventFromPDF')
self.objname = self.GetParameter('Output')
return
def Physics(self,frame):
if self.RunMode=='GeneratePDF':
self._GenPDFsPhysics(frame)
elif self.RunMode=='CalcLLHR':
self._CalcLLHRPhysics(frame)
else:
log_fatal('RunMode can only accept one these two inputs: GeneratePDF / CalcLLHR')
self.PushFrame(frame)
return
def Finish(self):
if self.RunMode=='GeneratePDF':
# generate the outputfile. save histogram.
f=tables.open_file(self.OutputName,'w')
f.create_carray('/', 'hist', obj=self.hist,filters=tables.Filters(complib='blosc:lz4hc', complevel=1))
for i in range(len(self.binedges)):
f.create_carray('/', 'binedges_%i'%i,
obj=self.binedges[i],
filters=tables.Filters(complib='blosc:lz4hc',
complevel=1))
for i in range(len(self.distinct_regions_binedges)):
for j in range(len(self.distinct_regions_binedges[0])):
f.create_carray('/', 'region_%i_binedges_%i'%(i,j),
obj=self.distinct_regions_binedges[i][j],
filters=tables.Filters(complib='blosc:lz4hc',
complevel=1))
f.create_carray('/', 'labels', obj=self.labels,filters=tables.Filters(complib='blosc:lz4hc', complevel=1))
f.create_carray('/', 'n_events', obj=[self.n_events],filters=tables.Filters(complib='blosc:lz4hc', complevel=1))
f.close()
return
def _load_PDF_from_file(self):
'''
this part is hard wired for 5 dimensional PDFs
'''
f=tables.open_file(self.SigPDFInputName,'r')
self.sig_hist = f.root.hist[:]
self.binedges = [ f.root.binedges_0[:], f.root.binedges_1[:], f.root.binedges_2[:], f.root.binedges_3[:] , f.root.binedges_4[:]]
self.distinct_regions_binedges = [ ]
for r in range(1):
region_binedges=[]
for i in range(3):
temp=eval('f.root.region_%i_binedges_%i[:]'%(r,i))
region_binedges.append(temp)
self.distinct_regions_binedges.append(region_binedges)
self.labels = f.root.labels[:]
f.close()
f=tables.open_file(self.BkgPDFInputName,'r')
self.bkg_hist = f.root.hist[:]
binedges = [ f.root.binedges_0[:], f.root.binedges_1[:], f.root.binedges_2[:], f.root.binedges_3[:] , f.root.binedges_4[:]]
labels = f.root.labels[:]
f.close()
if np.shape(self.sig_hist)!=np.shape(self.bkg_hist):
print 'sig hist, bkg hist shapes dont match'
print 'sig hist shape',np.shape(sig_hist)
print 'bkg hist shape',np.shape(bkg_hist)
raise Exception('Inconsistency found')
for i in range(len(binedges)):
are_equal=(np.round(binedges[i],decimals=self.Decimals)==np.round(self.binedges[i],decimals=self.Decimals)).all()
if not are_equal:
print 'sig binedges dim %i'%i, self.binedges[i]
print 'bkg binedges dim %i'%i, binedges[i]
raise Exception('Sig and Bkg binedges are not equal')
if (labels!=self.labels).any():
print 'labels for sig and bkg are not same'
print 'are you sure you are loading correct sig/bkg pdfs?'
return
def _init_hist(self):
histogram_shape= np.array([len(i)-1 for i in self.binedges])
self.hist=np.zeros(histogram_shape)
self.n_events=0
return
def _fill(self,sample):
h,edges=np.histogramdd(sample,self.binedges)
if np.shape(h)!=np.shape(self.hist):
log_fatal('initialized histogram and fill histogram dont match in shape')
self.hist+= h
self.n_events+=1
return
def _GenPDFsPhysics(self,frame):
in_array=self._create_in_array(frame)
self._fill(in_array)
return
def _CalcLLHRPhysics(self,frame):
d={}
d['llh_ratio']= 0.
d['n_extrapolations_sig_PDF'] = 0.
d['n_extrapolations_bkg_PDF'] = 0.
d['llh_sig'] = 0.
d['llh_bkg'] = 0.
d['isGood'] = 0.
# load event information
in_array = self._create_in_array(frame)
logE=in_array[0][0]
coszen=in_array[0][1]
# select Q, T, R dimensions, generate event histogram
in_array = (in_array.T[2:]).T
binedges = self.binedges[2:]
event_hist,temp = np.histogramdd(in_array, binedges)
# check if event logE and coszen lies within range of binedges
if logE>self.binedges[0][-1] or logE<self.binedges[0][0]:
frame.Put(self.objname,dataclasses.I3MapStringDouble(d))
return
if coszen>self.binedges[1][-1] or coszen<self.binedges[1][0]:
frame.Put(self.objname,dataclasses.I3MapStringDouble(d))
return
# find the logE and coszen bins select those bins in sig/bkg pdfs
logEbincenters = np.array((self.binedges[0][1:] + self.binedges[0][:-1] )/2.)
coszenbincenters = np.array((self.binedges[1][1:] + self.binedges[1][:-1] )/2.)
dE = np.absolute(logEbincenters - logE)
Ebin=np.where(np.amin(dE)==dE)[0][0]
dcZ = np.absolute(coszenbincenters - coszen)
cZbin = np.where(np.amin(dcZ)==dcZ)[0][0]
sig_hist = self.sig_hist[Ebin][cZbin]
bkg_hist = self.bkg_hist[Ebin][cZbin]
# subtract the event from the PDF if it was used for generating the PDF
if self.SubtractEventFromPDF:
if self.SubtractEventFromPDF=='Sig':
sig_hist = sig_hist - event_hist
if (sig_hist<0).any():
log_fatal('Event subtraction led to negative values')
if self.SubtractEventFromPDF=='Bkg':
bkg_hist = bkg_hist - event_hist
if (bkg_hist<0).any():
log_fatal('Event subtraction led to negative values')
# normalize histogram, obtain PDFs
sig_pdf = sig_hist/ np.sum(sig_hist)
bkg_pdf = bkg_hist/ np.sum(bkg_hist)
# calculate llh ratio for each region separately and add it up
# separate calculation is done to avoid one region influencing
# extrapolated values of empty pixels in the PDF in another region
llh_map_sig=np.zeros_like(sig_hist)
llh_map_bkg=np.zeros_like(bkg_hist)
d['isGood']=1.
for region_edges in self.distinct_regions_binedges:
# obtain slice vector for the region of the PDF
region_range = [ [i[0],i[-1]] for i in region_edges]
slice_vector= get_slice_vector(binedges,region_range)
temp = log_likelihood_ratio(heatmap1=sig_pdf[slice_vector],
heatmap2=bkg_pdf[slice_vector],
event_hist = event_hist[slice_vector])
d['llh_ratio'] += temp[0]
# all the rest are debugging variables. some will be stored in I3VectorMap.
# not storing any histograms as output. Just numbers.
d['n_extrapolations_sig_PDF'] += temp[1]
d['n_extrapolations_bkg_PDF'] += temp[2]
d['llh_sig'] += temp[5]
d['llh_bkg'] += temp[6]
extrapolated_sig_PDF = temp[3]
extrapolated_bkg_PDF = temp[4]
llh_map_sig[slice_vector]=temp[7]
llh_map_bkg[slice_vector]=temp[8]
frame.Put(self.objname,dataclasses.I3MapStringDouble(d))
return
def _create_in_array(self,frame):
if self.EnergyRecoName:
En = np.log10(frame[self.EnergyRecoName].energy)
elif self.LaputopParamsName:
En = np.log10(frame[self.LaputopParamsName].s125)
else:
log_fatal('One of EnergyRecoName_I3Particle or LaputopParamsName needs to be given')
ze = np.cos(frame[self.AngularRecoName].dir.zenith)
hits = frame[self.HitsName]
unhits = frame[self.UnhitsName]
excluded = frame[self.ExcludedName]
#hits_t, hits_q, hits_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in hits]).T
hits_t = signed_log(np.array([hit.time_residual for hit in hits]))
hits_q = np.log10(np.array([hit.charge for hit in hits]))
hits_r = log_plus_one(np.array([hit.distance for hit in hits]))
hits_E = np.ones_like(hits_r)*En
hits_z = np.ones_like(hits_r)*ze
#unhits_t, unhits_q, unhits_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in unhits]).T
unhits_t = signed_log(np.array([hit.time_residual for hit in unhits]))
unhits_q = np.log10(np.array([hit.charge for hit in unhits]))
unhits_r = log_plus_one(np.array([hit.distance for hit in unhits]))
unhits_E = np.ones_like(unhits_r)*En
unhits_z = np.ones_like(unhits_r)*ze
#excluded_t, excluded_q, excluded_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in excluded]).T
excluded_t = signed_log(np.array([hit.time_residual for hit in excluded]))
excluded_q = np.log10(np.array([hit.charge for hit in excluded]))
excluded_r = log_plus_one(np.array([hit.distance for hit in excluded]))
excluded_E = np.ones_like(excluded_r)*En
excluded_z = np.ones_like(excluded_r)*ze
# ready data for entry to 5D hist
t = np.concatenate( (hits_t, unhits_t, excluded_t) )
q = np.concatenate( (hits_q, unhits_q, excluded_q) )
r = np.concatenate( (hits_r, unhits_r, excluded_r) )
E = np.concatenate( (hits_E, unhits_E, excluded_E) )
z = np.concatenate( (hits_z, unhits_z, excluded_z) )
if len(t)!=162 or len(q)!=162 or len(r)!=162:
print 'N_t %s N_q %s N_r %s'%(len(t),len(q),len(r))
log_fatal('Total Tanks in Event not 162')
if np.isnan(t).any() or np.isnan(q).any() or np.isnan(r).any():
print 't',t
print 'q',q
print 'r',r
log_warn('signed_time/logq/logr have nans')
in_array=np.vstack([E,z,q,t,r]).T
return in_array
|
{"hexsha": "b10d728b0abce06ab54ee884e51cda372fa59c93", "size": 17767, "ext": "py", "lang": "Python", "max_stars_repo_path": "icetray_version/releases/V00-00/i3module.py", "max_stars_repo_name": "hershalpandya/airshowerclassification_llhratio_test", "max_stars_repo_head_hexsha": "a2a2ce5234c8f455fe56c332ab4fcc65008e9409", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "icetray_version/releases/V00-00/i3module.py", "max_issues_repo_name": "hershalpandya/airshowerclassification_llhratio_test", "max_issues_repo_head_hexsha": "a2a2ce5234c8f455fe56c332ab4fcc65008e9409", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "icetray_version/releases/V00-00/i3module.py", "max_forks_repo_name": "hershalpandya/airshowerclassification_llhratio_test", "max_forks_repo_head_hexsha": "a2a2ce5234c8f455fe56c332ab4fcc65008e9409", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7531486146, "max_line_length": 167, "alphanum_fraction": 0.6149040356, "include": true, "reason": "import numpy", "num_tokens": 4354}
|
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE HeapTest
/*
* @author : Ionesio Junior
*/
#include <boost/test/unit_test.hpp>
#include "../BinaryHeap.hpp"
#include <cstdlib>
#include <iostream>
#include <time.h>
#include <vector>
struct setUp{
BinaryHeap<int> *heap = new BinaryHeap<int>(10);
int empty[0] = {};
int even[10] = {9,8,7,6,5,4,3,2,1,0};
int odd[9] = {9,8,7,6,5,4,3,2,1};
int repeated[9] = {8,5,8,5,9,5,7,8,5};
int equal[5] = {5,5,5,5,5};
int repeatedOrdered[9] = {5,5,5,5,7,8,8,8,9};
int evenOrdered[10] = {0,1,2,3,4,5,6,7,8,9};
int oddOrdered[9] = {1,2,3,4,5,6,7,8,9};
};
BOOST_FIXTURE_TEST_SUITE(HeapTest,setUp)
BOOST_AUTO_TEST_CASE(testInit){
BOOST_CHECK(true == heap->isEmpty());
BOOST_CHECK(false == heap->isFull());
BOOST_CHECK(NULL == heap->rootElement());
BOOST_CHECK(0 == heap->size());
}
BOOST_AUTO_TEST_CASE(testIsEmpty){
BOOST_CHECK(true == heap->isEmpty());
heap->insert(10);
BOOST_CHECK(false == heap->isEmpty());
heap->extractRoot();
BOOST_CHECK(true == heap->isEmpty());
}
BOOST_AUTO_TEST_CASE(testSize){
BOOST_CHECK(0 == heap->size());
for(int i = 0 ; i < 5;i++){
heap->insert(i);
BOOST_CHECK(i + 1 == heap->size());
}
BOOST_CHECK(5 == heap->size());
for(int i = 10; i < 20 ;i = i + 2){
heap->insert(i);
}
BOOST_CHECK(10 == heap->size());
for(int i = 0 ; i < 5;i++){
heap->extractRoot();
}
BOOST_CHECK(5 == heap->size());
}
BOOST_AUTO_TEST_CASE(testInsert){
BOOST_CHECK(0 == heap->size());
//Test insert repeated elements
heap->insert(5);
BOOST_CHECK(1 == heap->size());
heap->insert(5);
BOOST_CHECK(2 == heap->size());
heap->extractRoot();
heap->extractRoot();
BOOST_CHECK(0 == heap->size());
//Test responsive array
for(int i = 0 ; i < 10;i++){
heap->insert(i);
BOOST_CHECK(i == *heap->rootElement());
}
BOOST_CHECK(true == heap->isFull());
heap->insert(10);
BOOST_CHECK(false == heap->isFull());
BOOST_CHECK(11 == heap->size());
}
BOOST_AUTO_TEST_CASE(testExtractRoot){
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
for(int i = 0; i < 100;i++){
heap->insert(i);
}
BOOST_CHECK(100 == heap->size());
BOOST_CHECK(false == heap->isEmpty());
BOOST_CHECK(99 == *heap->rootElement());
for(int i = 99; i >= 0;i--){
BOOST_CHECK(i == heap->extractRoot());
}
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
for(int i = 0 ; i < 10;i++){
if(i < 5){
heap->insert(10);
}else{
heap->insert(20);
}
}
BOOST_CHECK(10 == heap->size());
for(int i = 0 ; i < 10;i++){
if(i < 5){
BOOST_CHECK(20 == heap->extractRoot());
}else{
BOOST_CHECK(10 == heap->extractRoot());
}
}
}
BOOST_AUTO_TEST_CASE(testRootElement){
BOOST_CHECK(true == heap->isEmpty());
heap->insert(100);
for(int i = 0 ; i < 10;i++){
BOOST_CHECK(100 == *heap->rootElement());
}
BOOST_CHECK(1 == heap->size());
for(int i = 0 ; i < 10;i++){
heap->insert(i);
BOOST_CHECK(100 == *heap->rootElement());
}
for(int i = 0 ; i <= 10;i++){
BOOST_CHECK(*heap->rootElement() == heap->extractRoot());
}
}
BOOST_AUTO_TEST_CASE(testHeapSort){
//Test Even Array
Array<int> result = heap->heapSort(even,10);
for(int i = 0 ; i < result.size;i++){
BOOST_CHECK(evenOrdered[i] == result[i]);
}
//Test Odd Array
result = heap->heapSort(odd,9);
for(int i = 0 ; i < result.size;i++){
BOOST_CHECK(oddOrdered[i] == result[i]);
}
//Test Empty array
result = heap->heapSort(empty,0);
for(int i = 0; i < result.size;i++){
BOOST_CHECK(empty[i] == result[i]);
}
//Test Repeated Array
result = heap->heapSort(repeated,9);
for(int i = 0 ; i < result.size;i++){
BOOST_CHECK(repeatedOrdered[i] == result[i]);
}
//Test equal array
result = heap->heapSort(equal,5);
for(int i = 0 ; i < result.size;i++){
BOOST_CHECK(equal[i] == result[i]);
}
}
BOOST_AUTO_TEST_CASE(testBuildHeap){
//Test even array
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
heap->buildHeap(even,10);
BOOST_CHECK(10 == heap->size());
BOOST_CHECK(false == heap->isEmpty());
for(int i = 9 ; i >= 0;i--){
BOOST_CHECK(i == heap->extractRoot());
}
//Test odd array
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
heap->buildHeap(odd,9);
BOOST_CHECK(9 == heap->size());
BOOST_CHECK(false == heap->isEmpty());
for(int i = 9 ; i > 0;i--){
BOOST_CHECK(i == heap->extractRoot());
}
//Test empty array
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
heap->buildHeap(empty,0);
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
//Test equal array
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
heap->buildHeap(equal,5);
BOOST_CHECK(5 == heap->size());
BOOST_CHECK(false == heap->isEmpty());
for(int i = 5 ; i > 0;i--){
BOOST_CHECK(5 == heap->extractRoot());
}
//Test repeated array
int reverseRepeated[9] = {9,8,8,8,7,5,5,5,5};
BOOST_CHECK(0 == heap->size());
BOOST_CHECK(true == heap->isEmpty());
heap->buildHeap(repeated,9);
BOOST_CHECK(9 == heap->size());
BOOST_CHECK(false == heap->isEmpty());
for(int i = 0 ; i < 9;i++){
BOOST_CHECK(reverseRepeated[i] == heap->extractRoot());
}
}
BOOST_AUTO_TEST_CASE(testException){
BOOST_CHECK_THROW(heap->extractRoot(),HeapUnderflowException);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "6042cfb2741bb9d5300016d81413a233d3ef2c21", "size": 5551, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Data Structures/Heap/C++/Test/test.cpp", "max_stars_repo_name": "Julian-Mentasti/codezilla", "max_stars_repo_head_hexsha": "ca157d75628f68ab01d589267f26d17751a87e86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 147.0, "max_stars_repo_stars_event_min_datetime": "2018-02-27T03:26:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T18:25:33.000Z", "max_issues_repo_path": "Data Structures/Heap/C++/Test/test.cpp", "max_issues_repo_name": "Julian-Mentasti/codezilla", "max_issues_repo_head_hexsha": "ca157d75628f68ab01d589267f26d17751a87e86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 273.0, "max_issues_repo_issues_event_min_datetime": "2018-02-26T18:40:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-31T10:37:44.000Z", "max_forks_repo_path": "Data Structures/Heap/C++/Test/test.cpp", "max_forks_repo_name": "Julian-Mentasti/codezilla", "max_forks_repo_head_hexsha": "ca157d75628f68ab01d589267f26d17751a87e86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 234.0, "max_forks_repo_forks_event_min_datetime": "2018-02-27T03:27:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-11T08:44:22.000Z", "avg_line_length": 23.4219409283, "max_line_length": 63, "alphanum_fraction": 0.5979102864, "num_tokens": 1660}
|
Require Import Bedrock.Platform.Thread Bedrock.Platform.Arrays8 Bedrock.Platform.MoreArrays Bedrock.Platform.Buffers Bedrock.Platform.Io.
Local Hint Extern 1 (@eq W _ _) => words.
Module Type S.
Parameters globalSched globalSock : W.
Parameter inbuf_size : nat.
Axiom inbuf_size_lower : (inbuf_size >= 2)%nat.
Axiom inbuf_size_upper : (N_of_nat (inbuf_size * 4) < Npow2 32)%N.
Parameters port numWorkers : W.
End S.
Module Make(M : S).
Import M.
Module M'''.
Definition globalSched := M.globalSched.
Open Scope Sep_scope.
Definition globalInv (fs : files) : HProp := Ex fr, globalSock =*> fr * [| fr %in fs |].
End M'''.
Module T := Thread.Make(M''').
Import T M'''.
Export T M'''.
Module MyM.
Definition sched := sched.
Definition globalInv := globalInv.
End MyM.
Ltac unf := unfold MyM.sched, MyM.globalInv, M'''.globalSched, M'''.globalInv in *.
Module MyIo := Io.Make(MyM).
Definition hints : TacPackage.
prepare (materialize_buffer, buffer_split_tagged) buffer_join_tagged.
Defined.
Definition mainS := SPEC reserving 49
PREmain[_] globalSched =?> 1 * globalSock =?> 1 * mallocHeap 0.
Definition handlerS := SPEC reserving 99
Al fs, PREmain[_] sched fs * globalInv fs * mallocHeap 0.
Definition bsize := (inbuf_size * 4)%nat.
Definition m := bimport [[ "buffers"!"bmalloc" @ [bmallocS],
"scheduler"!"init"@ [T.Q''.initS], "scheduler"!"exit" @ [T.Q''.exitS],
"scheduler"!"spawn" @ [T.Q''.spawnS], "scheduler"!"listen" @ [T.Q''.listenS],
"scheduler"!"accept" @ [T.Q''.acceptS], "scheduler"!"close" @ [T.Q''.closeS],
"scheduler"!"read" @ [T.Q''.readS], "io"!"writeAll" @ [MyIo.writeAllS] ]]
bmodule "echo" {{
bfunctionNoRet "handler"("buf", "fr", "n") [handlerS]
"buf" <-- Call "buffers"!"bmalloc"(inbuf_size)
[Al fs, PREmain[V, R] R =?>8 bsize * sched fs * globalInv fs * mallocHeap 0];;
[Al fs, PREmain[V] V "buf" =?>8 bsize * sched fs * globalInv fs * mallocHeap 0]
While (0 = 0) {
"fr" <-- Call "scheduler"!"accept"($[globalSock])
[Al fs, PREmain[V, R] [| R %in fs |] * V "buf" =?>8 bsize * sched fs * globalInv fs * mallocHeap 0];;
"n" <-- Call "scheduler"!"read"("fr", "buf", bsize)
[Al fs, PREmain[V] [| V "fr" %in fs |] * V "buf" =?>8 bsize * sched fs * globalInv fs * mallocHeap 0];;
[Al fs, PREmain[V] [| V "fr" %in fs |] * V "buf" =?>8 bsize * sched fs * globalInv fs * mallocHeap 0]
While ("n" <> 0) {
If ("n" <= bsize) {
Call "io"!"writeAll"("fr", "buf", 0, "n")
[Al fs, PREmain[V] [| V "fr" %in fs |] * V "buf" =?>8 bsize * sched fs * globalInv fs * mallocHeap 0]
} else {
Skip
};;
"n" <-- Call "scheduler"!"read"("fr", "buf", bsize)
[Al fs, PREmain[V] [| V "fr" %in fs |] * V "buf" =?>8 bsize * sched fs * globalInv fs * mallocHeap 0]
};;
Call "scheduler"!"close"("fr")
[Al fs, PREmain[V] V "buf" =?>8 bsize * sched fs * globalInv fs * mallocHeap 0]
}
end with bfunctionNoRet "main"("fr", "x") [mainS]
Init
[Al fs, Al v, PREmain[_] sched fs * globalSock =*> v * mallocHeap 0];;
"fr" <- 0;;
[Al fs, PREmain[_] sched fs * globalSock =?> 1 * mallocHeap 0]
While ("fr" < numWorkers) {
Spawn("echo"!"handler", 100)
[Al fs, PREmain[_] sched fs * globalSock =?> 1 * mallocHeap 0];;
"fr" <- "fr" + 1
};;
"fr" <-- Call "scheduler"!"listen"(port)
[Al fs, Al v, PREmain[_, R] [| R %in fs |] * sched fs * globalSock =*> v * mallocHeap 0];;
globalSock *<- "fr";;
Exit 50
end
}}.
Lemma le_bsize : forall w : W,
w <= natToW bsize
-> (wordToNat w <= bsize)%nat.
intros; pre_nomega;
rewrite wordToNat_natToWord_idempotent in * by apply inbuf_size_upper; assumption.
Qed.
Local Hint Immediate le_bsize.
Lemma inbuf_size_small : (N.of_nat inbuf_size < Npow2 32)%N.
specialize inbuf_size_upper; generalize (Npow2 32); intros; nomega.
Qed.
Hint Rewrite Nat2N.inj_mul N2Nat.inj_mul : N.
Lemma le_inbuf_size : natToW 2 <= natToW inbuf_size.
pre_nomega; rewrite wordToNat_natToWord_idempotent by apply inbuf_size_small;
rewrite wordToNat_natToWord_idempotent by reflexivity; apply inbuf_size_lower.
Qed.
Local Hint Immediate le_inbuf_size.
Lemma roundTrip_inbuf_size : wordToNat (natToW inbuf_size) = inbuf_size.
rewrite wordToNat_natToWord_idempotent by apply inbuf_size_small; auto.
Qed.
Lemma roundTrip_bsize : wordToNat (natToW bsize) = bsize.
rewrite wordToNat_natToWord_idempotent by apply inbuf_size_upper; auto.
Qed.
Hint Rewrite roundTrip_inbuf_size roundTrip_bsize : sepFormula.
Theorem goodSize_bsize : goodSize bsize.
apply inbuf_size_upper.
Qed.
Local Hint Immediate goodSize_bsize.
Ltac t := try solve [ sep unf hints; auto ];
unf; unfold localsInvariantMain; post; evaluate hints; descend;
try match_locals; sep unf hints; auto.
Theorem ok : moduleOk m.
vcgen; abstract t.
Qed.
End Make.
|
{"author": "mit-plv", "repo": "bedrock", "sha": "e3ff3c2cba9976ac4351caaabb4bf7278bb0dcbd", "save_path": "github-repos/coq/mit-plv-bedrock", "path": "github-repos/coq/mit-plv-bedrock/bedrock-e3ff3c2cba9976ac4351caaabb4bf7278bb0dcbd/Bedrock/Platform/tests/EchoServer.v"}
|
[STATEMENT]
lemma register_unit_register[simp]: \<open>is_unit_register U \<Longrightarrow> register U\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_unit_register U \<Longrightarrow> register U
[PROOF STEP]
by (simp add: compatible_def complements_def is_unit_register_def)
|
{"llama_tokens": 95, "file": "Registers_Laws_Complement", "length": 1}
|
from typing import List, Dict, Any, Tuple, Union, Optional
from collections import namedtuple, deque
import torch
import numpy as np
import copy
from ding.torch_utils import Adam, to_device
from ding.rl_utils import v_1step_td_data, v_1step_td_error, get_train_sample
from ding.model import model_wrap
from ding.utils import POLICY_REGISTRY
from ding.utils.data import default_collate, default_decollate
from .base_policy import Policy
from .common_utils import default_preprocess_learn
@POLICY_REGISTRY.register('atoc')
class ATOCPolicy(Policy):
r"""
Overview:
Policy class of ATOC algorithm.
Interface:
__init__, set_setting, __repr__, state_dict_handle
Property:
learn_mode, collect_mode, eval_mode
"""
config = dict(
# (str) RL policy register name (refer to function "POLICY_REGISTRY").
type='atoc',
# (bool) Whether to use cuda for network.
cuda=False,
# (bool) whether use on-policy training pipeline(behaviour policy and training policy are the same)
on_policy=False,
# (bool) Whether use priority(priority sample, IS weight, update priority)
priority=False,
# (bool) Whether use Importance Sampling Weight to correct biased update. If True, priority must be True.
priority_IS_weight=False,
model=dict(
# (bool) Whether to use communication module in ATOC, if not, it is a multi-agent DDPG
communication=True,
# (int) The number of thought size
thought_size=8,
# (int) The number of agent for each communication group
agent_per_group=2,
),
learn=dict(
# (bool) Whether to use multi gpu
multi_gpu=False,
# (int) Collect n_sample data, update model n_iteration time
update_per_collect=5,
# (int) The number of data for a train iteration
batch_size=64,
# (float) Gradient-descent step size of actor
learning_rate_actor=0.001,
# (float) Gradient-descent step size of critic
learning_rate_critic=0.001,
# ==============================================================
# The following configs is algorithm-specific
# ==============================================================
# (float) Target network update weight, theta * new_w + (1 - theta) * old_w, defaults in [0, 0.1]
target_theta=0.005,
# (float) Discount factor for future reward, defaults int [0, 1]
discount_factor=0.99,
# (bool) Whether to use communication module in ATOC, if not, it is a multi-agent DDPG
communication=True,
# (int) The frequency of actor update, each critic update
actor_update_freq=1,
# (bool) Whether use noise in action output when learning
noise=True,
# (float) The std of noise distribution for target policy smooth
noise_sigma=0.15,
# (float, float) The minimum and maximum value of noise
noise_range=dict(
min=-0.5,
max=0.5,
),
# (bool) Whether to use reward batch norm in the total batch
reward_batch_norm=False,
ignore_done=False,
),
collect=dict(
# (int) Collect n_sample data, update model n_iteration time
# n_sample=64,
# (int) Unroll length of a train iteration(gradient update step)
unroll_len=1,
# ==============================================================
# The following configs is algorithm-specific
# ==============================================================
# (float) The std of noise distribution for exploration
noise_sigma=0.4,
),
eval=dict(),
other=dict(
replay_buffer=dict(
# (int) The max size of replay buffer
replay_buffer_size=100000,
# (int) The max use count of data, if count is bigger than this value, the data will be removed
max_use=10,
),
),
)
def _init_learn(self) -> None:
r"""
Overview:
Learn mode init method. Called by ``self.__init__``.
Init actor and critic optimizers, algorithm config, main and target models.
"""
self._priority = self._cfg.priority
self._priority_IS_weight = self._cfg.priority_IS_weight
assert not self._priority and not self._priority_IS_weight
# algorithm config
self._communication = self._cfg.learn.communication
self._gamma = self._cfg.learn.discount_factor
self._actor_update_freq = self._cfg.learn.actor_update_freq
# actor and critic optimizer
self._optimizer_actor = Adam(
self._model.actor.parameters(),
lr=self._cfg.learn.learning_rate_actor,
)
self._optimizer_critic = Adam(
self._model.critic.parameters(),
lr=self._cfg.learn.learning_rate_critic,
)
if self._communication:
self._optimizer_actor_attention = Adam(
self._model.actor.attention.parameters(),
lr=self._cfg.learn.learning_rate_actor,
)
self._reward_batch_norm = self._cfg.learn.reward_batch_norm
# main and target models
self._target_model = copy.deepcopy(self._model)
self._target_model = model_wrap(
self._target_model,
wrapper_name='target',
update_type='momentum',
update_kwargs={'theta': self._cfg.learn.target_theta}
)
if self._cfg.learn.noise:
self._target_model = model_wrap(
self._target_model,
wrapper_name='action_noise',
noise_type='gauss',
noise_kwargs={
'mu': 0.0,
'sigma': self._cfg.learn.noise_sigma
},
noise_range=self._cfg.learn.noise_range
)
self._learn_model = model_wrap(self._model, wrapper_name='base')
self._learn_model.reset()
self._target_model.reset()
self._forward_learn_cnt = 0 # count iterations
def _forward_learn(self, data: dict) -> Dict[str, Any]:
r"""
Overview:
Forward and backward function of learn mode.
Arguments:
- data (:obj:`dict`): Dict type data, including at least ['obs', 'action', 'reward', 'next_obs']
Returns:
- info_dict (:obj:`Dict[str, Any]`): Including at least actor and critic lr, different losses.
"""
loss_dict = {}
data = default_preprocess_learn(data, ignore_done=self._cfg.learn.ignore_done, use_nstep=False)
if self._cuda:
data = to_device(data, self._device)
# ====================
# critic learn forward
# ====================
self._learn_model.train()
self._target_model.train()
next_obs = data['next_obs']
reward = data['reward']
if self._reward_batch_norm:
reward = (reward - reward.mean()) / (reward.std() + 1e-8)
# current q value
q_value = self._learn_model.forward(data, mode='compute_critic')['q_value']
# target q value. SARSA: first predict next action, then calculate next q value
with torch.no_grad():
next_action = self._target_model.forward(next_obs, mode='compute_actor')['action']
next_data = {'obs': next_obs, 'action': next_action}
target_q_value = self._target_model.forward(next_data, mode='compute_critic')['q_value']
td_data = v_1step_td_data(q_value.mean(-1), target_q_value.mean(-1), reward, data['done'], data['weight'])
critic_loss, td_error_per_sample = v_1step_td_error(td_data, self._gamma)
loss_dict['critic_loss'] = critic_loss
# ================
# critic update
# ================
self._optimizer_critic.zero_grad()
critic_loss.backward()
self._optimizer_critic.step()
# ===============================
# actor learn forward and update
# ===============================
# actor updates every ``self._actor_update_freq`` iters
if (self._forward_learn_cnt + 1) % self._actor_update_freq == 0:
if self._communication:
output = self._learn_model.forward(data['obs'], mode='compute_actor', get_delta_q=False)
output['delta_q'] = data['delta_q']
attention_loss = self._learn_model.forward(output, mode='optimize_actor_attention')['loss']
loss_dict['attention_loss'] = attention_loss
self._optimizer_actor_attention.zero_grad()
attention_loss.backward()
self._optimizer_actor_attention.step()
output = self._learn_model.forward(data['obs'], mode='compute_actor', get_delta_q=False)
critic_input = {'obs': data['obs'], 'action': output['action']}
actor_loss = -self._learn_model.forward(critic_input, mode='compute_critic')['q_value'].mean()
loss_dict['actor_loss'] = actor_loss
# actor update
self._optimizer_actor.zero_grad()
actor_loss.backward()
self._optimizer_actor.step()
# =============
# after update
# =============
loss_dict['total_loss'] = sum(loss_dict.values())
self._forward_learn_cnt += 1
self._target_model.update(self._learn_model.state_dict())
return {
'cur_lr_actor': self._optimizer_actor.defaults['lr'],
'cur_lr_critic': self._optimizer_critic.defaults['lr'],
'priority': td_error_per_sample.abs().tolist(),
'q_value': q_value.mean().item(),
**loss_dict,
}
def _state_dict_learn(self) -> Dict[str, Any]:
return {
'model': self._learn_model.state_dict(),
'optimizer_actor': self._optimizer_actor.state_dict(),
'optimizer_critic': self._optimizer_critic.state_dict(),
'optimize_actor_attention': self._optimizer_actor_attention.state_dict(),
}
def _load_state_dict_learn(self, state_dict: Dict[str, Any]) -> None:
self._learn_model.load_state_dict(state_dict['model'])
self._optimizer_actor.load_state_dict(state_dict['optimizer_actor'])
self._optimizer_critic.load_state_dict(state_dict['optimizer_critic'])
self._optimizer_actor_attention.load_state_dict(state_dict['optimize_actor_attention'])
def _init_collect(self) -> None:
r"""
Overview:
Collect mode init method. Called by ``self.__init__``.
Init traj and unroll length, collect model.
"""
self._unroll_len = self._cfg.collect.unroll_len
# collect model
self._collect_model = model_wrap(
self._model,
wrapper_name='action_noise',
noise_type='gauss',
noise_kwargs={
'mu': 0.0,
'sigma': self._cfg.collect.noise_sigma
},
noise_range=None, # no noise clip in actor
)
self._collect_model.reset()
def _forward_collect(self, data: dict) -> dict:
r"""
Overview:
Forward function of collect mode.
Arguments:
- data (:obj:`dict`): Dict type data, including at least ['obs'].
Returns:
- output (:obj:`dict`): Dict type data, including at least inferred action according to input obs.
"""
data_id = list(data.keys())
data = default_collate(list(data.values()))
if self._cuda:
data = to_device(data, self._device)
self._collect_model.eval()
with torch.no_grad():
output = self._collect_model.forward(data, mode='compute_actor', get_delta_q=True)
if self._cuda:
output = to_device(output, 'cpu')
output = default_decollate(output)
return {i: d for i, d in zip(data_id, output)}
def _process_transition(self, obs: Any, model_output: dict, timestep: namedtuple) -> Dict[str, Any]:
r"""
Overview:
Generate dict type transition data from inputs.
Arguments:
- obs (:obj:`Any`): Env observation
- model_output (:obj:`dict`): Output of collect model, including at least ['action']
- timestep (:obj:`namedtuple`): Output after env step, including at least ['obs', 'reward', 'done'] \
(here 'obs' indicates obs after env step, i.e. next_obs).
Return:
- transition (:obj:`Dict[str, Any]`): Dict type transition data.
"""
if self._communication:
transition = {
'obs': obs,
'next_obs': timestep.obs,
'action': model_output['action'],
'delta_q': model_output['delta_q'],
'reward': timestep.reward,
'done': timestep.done,
}
else:
transition = {
'obs': obs,
'next_obs': timestep.obs,
'action': model_output['action'],
'reward': timestep.reward,
'done': timestep.done,
}
return transition
def _get_train_sample(self, data: deque) -> Union[None, List[Any]]:
if self._communication:
delta_q_batch = [d['delta_q'] for d in data]
delta_min = torch.stack(delta_q_batch).min()
delta_max = torch.stack(delta_q_batch).max()
for i in range(len(data)):
data[i]['delta_q'] = (data[i]['delta_q'] - delta_min) / (delta_max - delta_min + 1e-8)
return get_train_sample(data, self._unroll_len)
def _init_eval(self) -> None:
r"""
Overview:
Evaluate mode init method. Called by ``self.__init__``.
Init eval model. Unlike learn and collect model, eval model does not need noise.
"""
self._eval_model = model_wrap(self._model, wrapper_name='base')
self._eval_model.reset()
def _forward_eval(self, data: dict) -> dict:
r"""
Overview:
Forward function of collect mode, similar to ``self._forward_collect``.
Arguments:
- data (:obj:`dict`): Dict type data, including at least ['obs'].
Returns:
- output (:obj:`dict`): Dict type data, including at least inferred action according to input obs.
"""
data_id = list(data.keys())
data = default_collate(list(data.values()))
if self._cuda:
data = to_device(data, self._device)
self._eval_model.eval()
with torch.no_grad():
output = self._eval_model.forward(data, mode='compute_actor')
if self._cuda:
output = to_device(output, 'cpu')
output = default_decollate(output)
return {i: d for i, d in zip(data_id, output)}
def default_model(self) -> Tuple[str, List[str]]:
return 'atoc', ['ding.model.template.atoc']
def _monitor_vars_learn(self) -> List[str]:
r"""
Overview:
Return variables' name if variables are to used in monitor.
Returns:
- vars (:obj:`List[str]`): Variables' name list.
"""
return [
'cur_lr_actor',
'cur_lr_critic',
'critic_loss',
'actor_loss',
'attention_loss',
'total_loss',
'q_value',
]
|
{"hexsha": "e08f23900694a5cf011eab8c8c94a9f5911fa342", "size": 15813, "ext": "py", "lang": "Python", "max_stars_repo_path": "ding/policy/atoc.py", "max_stars_repo_name": "uuid0000/DI-engine", "max_stars_repo_head_hexsha": "cc2713fa01e5288bae21cfeb595729d665e092d1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-13T02:56:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-13T02:56:34.000Z", "max_issues_repo_path": "ding/policy/atoc.py", "max_issues_repo_name": "uuid0000/DI-engine", "max_issues_repo_head_hexsha": "cc2713fa01e5288bae21cfeb595729d665e092d1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ding/policy/atoc.py", "max_forks_repo_name": "uuid0000/DI-engine", "max_forks_repo_head_hexsha": "cc2713fa01e5288bae21cfeb595729d665e092d1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0558510638, "max_line_length": 114, "alphanum_fraction": 0.5738316575, "include": true, "reason": "import numpy", "num_tokens": 3343}
|
"""Class to train a PyTorch model"""
import os
import json
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from rssnet.utils.functions import normalize, define_loss, get_transformations
from rssnet.utils.tensorboard_visualizer import TensorboardVisualizer
from rssnet.loaders.dataloaders import MultiFrameCarradaDataset
from rssnet.learners.tester import Tester
class Model(nn.Module):
"""Class to train a pytorch model
PARAMETERS
----------
net: Pytorch model
data: dict
Contains the config file for training, the dict with necessary paths
and the dataloaders
"""
def __init__(self, net, data):
super().__init__()
self.net = net
self.cfg = data['cfg']
self.paths = data['paths']
self.dataloaders = data['dataloaders']
self.signal_type = self.cfg['signal_type']
self.process_signal = self.cfg['process_signal']
self.annot_type = self.cfg['annot_type']
self.w_size = self.cfg['w_size']
self.h_size = self.cfg['h_size']
self.batch_size = self.cfg['batch_size']
self.nb_epochs = self.cfg['nb_epochs']
self.lr = self.cfg['lr']
self.lr_step = self.cfg['lr_step']
self.loss_step = self.cfg['loss_step']
self.val_step = self.cfg['val_step']
self.viz_step = self.cfg['viz_step']
self.torch_seed = self.cfg['torch_seed']
self.numpy_seed = self.cfg['numpy_seed']
self.nb_classes = self.cfg['nb_classes']
self.device = self.cfg['device']
self.custom_loss = self.cfg['custom_loss']
self.comments = self.cfg['comments']
self.n_input_ch = self.cfg['nb_input_channels']
self.transform_names = self.cfg['transformations'].split(',')
self.norm_type = self.cfg['norm_type']
self.writer = SummaryWriter(self.paths['writer'])
self.visualizer = TensorboardVisualizer(self.writer)
self.tester = Tester(self.cfg, self.visualizer)
self.results = dict()
def train(self):
"""Method to train the model."""
self.writer.add_text('Comments', self.comments)
train_loader, val_loader, test_loader = self.dataloaders
transformations = get_transformations(self.transform_names,
sizes=(self.w_size, self.h_size))
self._set_seeds()
self.net.apply(self._init_weights)
running_losses = list()
criterion = define_loss(self.signal_type, self.custom_loss, self.device)
optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
scheduler = ExponentialLR(optimizer, gamma=0.9)
iteration = 0
best_val_prec = 0
self.net.to(self.device)
for epoch in range(self.nb_epochs):
if epoch % self.lr_step == 0 and epoch != 0:
scheduler.step()
for _, sequence_data in enumerate(train_loader):
seq_name, seq = sequence_data
path_to_frames = os.path.join(self.paths['carrada'], seq_name[0])
frame_dataloader = DataLoader(MultiFrameCarradaDataset(seq,
self.annot_type,
self.signal_type,
path_to_frames,
self.process_signal,
self.n_input_ch,
transformations),
shuffle=False,
batch_size=self.batch_size,
num_workers=4)
for _, frame in enumerate(frame_dataloader):
data = frame['matrix'].to(self.device).float()
mask = frame['mask'].to(self.device).float()
data = normalize(data, self.signal_type, self.paths['carrada'],
norm_type=self.norm_type)
optimizer.zero_grad()
outputs = self.net(data).to(self.device)
mask = F.interpolate(mask, (self.w_size, self.h_size))
loss = criterion(outputs, torch.argmax(mask, axis=1))
loss.backward()
optimizer.step()
running_losses.append(loss.data.cpu().numpy()[()])
if iteration % self.loss_step == 0:
train_loss = np.mean(running_losses)
print('[Epoch {}/{}, iter {}]: '
'train loss {}'.format(epoch+1,
self.nb_epochs,
iteration,
train_loss))
self.visualizer.update_train_loss(train_loss, iteration)
running_losses = list()
self.visualizer.update_learning_rate(scheduler.get_lr()[0], iteration)
if iteration % self.val_step == 0 and iteration > 0:
if iteration % self.viz_step == 0 and iteration > 0:
val_metrics = self.tester.predict(self.net, val_loader, iteration)
else:
val_metrics = self.tester.predict(self.net, val_loader)
self.visualizer.update_val_metrics(val_metrics, iteration)
print('[Epoch {}/{}] Validation loss: {}'.format(epoch+1,
self.nb_epochs,
val_metrics['loss']))
print('[Epoch {}/{}] Validation Pixel Prec: {}'.format(epoch+1,
self.nb_epochs,
val_metrics['prec']))
print('[Epoch {}/{}] Validation Pixel Prec by class: '
'{}'.format(epoch+1,
self.nb_epochs,
val_metrics['prec_by_class']))
if val_metrics['prec'] > best_val_prec and iteration > 0:
best_val_prec = val_metrics['prec']
test_metrics = self.tester.predict(self.net, test_loader)
print('[Epoch {}/{}] Test loss: {}'.format(epoch+1,
self.nb_epochs,
test_metrics['loss']))
print('[Epoch {}/{}] Test Pixel Prec: {}'.format(epoch+1,
self.nb_epochs,
test_metrics['prec']))
print('[Epoch {}/{}] Test Pixel Prec by class: '
'{}'.format(epoch+1,
self.nb_epochs,
test_metrics['prec_by_class']))
self.results['train_loss'] = train_loss.item()
self.results['val_metrics'] = val_metrics
self.results['test_metrics'] = test_metrics
self._save_results()
self.net.train() # Train mode after evaluation process
iteration += 1
self.writer.close()
def _init_weights(self, m):
"""Method to initialize weights"""
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.)
elif isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
elif isinstance(m, nn.BatchNorm2d):
nn.init.uniform_(m.weight, 0., 1.)
nn.init.constant_(m.bias, 0.)
def _save_results(self):
"""Method to save trained model and results"""
with open(os.path.join(self.paths['results'], 'results.json'), "w") as fp:
json.dump(self.results, fp)
torch.save(self.net.state_dict(),
os.path.join(self.paths['results'],
'model.pt'))
def _set_seeds(self):
"""Method to fix different seeds"""
torch.cuda.manual_seed_all(self.torch_seed)
torch.manual_seed(self.torch_seed)
np.random.seed(self.numpy_seed)
|
{"hexsha": "68db89e9d4252a18a5d77cca4241a9fdcbfc8dda", "size": 9255, "ext": "py", "lang": "Python", "max_stars_repo_path": "rssnet/learners/model.py", "max_stars_repo_name": "ArthurOuaknine/RSS-Net", "max_stars_repo_head_hexsha": "95e5f3df9f0fe4af7d37ff28f786141e48f14937", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-12T14:57:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T13:38:00.000Z", "max_issues_repo_path": "rssnet/learners/model.py", "max_issues_repo_name": "ArthurOuaknine/RSS-Net", "max_issues_repo_head_hexsha": "95e5f3df9f0fe4af7d37ff28f786141e48f14937", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rssnet/learners/model.py", "max_forks_repo_name": "ArthurOuaknine/RSS-Net", "max_forks_repo_head_hexsha": "95e5f3df9f0fe4af7d37ff28f786141e48f14937", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.8516483516, "max_line_length": 100, "alphanum_fraction": 0.4841707185, "include": true, "reason": "import numpy", "num_tokens": 1590}
|
import os
import pandas as pd
import torch
import torch.nn as nn
from PIL import Image
from torch.utils.data import Dataset
from torchvision import models, transforms
from tqdm import tqdm
import numpy as np
import pickle
data_dir = 'images'
metadata_fn = "metadata.json"
features_dir = "features"
features_file = os.path.join(features_dir, "pytorch_rn50.pkl")
featurize_images = True
device = torch.device("cuda:0")
os.makedirs(data_dir, exist_ok=True)
os.makedirs(features_dir, exist_ok=True)
if featurize_images:
class ArtDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, metadata_json, image_dir, transform):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.metadata = pd.read_json(metadata_json, lines=True)
self.image_dir = image_dir
self.transform = transform
def __len__(self):
return len(self.metadata)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
metadata = self.metadata.iloc[idx]
with open(os.path.join(self.image_dir, metadata["id"] + ".jpg"), "rb") as f:
image = Image.open(f).convert("RGB")
return self.transform(image), metadata["id"]
data_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dataset = ArtDataset(metadata_fn, data_dir, data_transform)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=4)
dataset_size = len(dataset)
# Get a batch of training data
model = models.resnet50(pretrained=True)
model.eval()
model.to(device)
cut_model = nn.Sequential(*list(model.children())[:-1])
all_outputs = []
all_ids = []
for i, (inputs, ids) in enumerate(tqdm(data_loader)):
inputs = inputs.to(device)
outputs = torch.squeeze(cut_model(inputs)).detach().cpu().numpy()
all_outputs.append(outputs)
all_ids.append(list(ids))
all_outputs = np.concatenate(all_outputs, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
with open(features_file, "wb+") as f:
pickle.dump((all_outputs, all_ids), f)
with open(features_file, "rb") as f:
with torch.no_grad():
(all_outputs, all_ids) = pickle.load(f)
all_urls = np.array(pd.read_json(metadata_fn, lines=True).loc[:, "Thumbnail_Url"])
features = torch.from_numpy(all_outputs).float().to("cpu:0")
features = features / torch.sqrt(torch.sum(features ** 2, dim=1, keepdim=True))
features = features.to(device)
indicies = torch.arange(0, features.shape[0]).to(device)
print("loaded features")
metadata = pd.read_json(metadata_fn, lines=True)
culture_arr = np.array(metadata["Culture"])
cultures = metadata.groupby("Culture").count()["id"].sort_values(ascending=False).index.to_list()
media_arr = np.array(metadata["Classification"])
media = metadata.groupby("Classification").count()["id"].sort_values(ascending=False).index.to_list()
ids = np.array(metadata["id"])
masks = {"culture": {}, "medium": {}}
for culture in cultures:
masks["culture"][culture] = torch.from_numpy(culture_arr == culture).to(device)
for medium in media:
masks["medium"][medium] = torch.from_numpy(media_arr == medium).to(device)
all_matches = []
for i, row in tqdm(metadata.iterrows()):
feature = features[i]
matches = {"culture": {}, "medium": {}}
all_dists = torch.sum(features * feature, dim=1).to(device)
for culture in cultures:
selected_indicies = indicies[masks["culture"][culture]]
k = min(10, selected_indicies.shape[0])
dists, inds = torch.topk(all_dists[selected_indicies], k, sorted=True)
matches["culture"][culture] = ids[selected_indicies[inds].cpu().numpy()]
for medium in media:
selected_indicies = indicies[masks["medium"][medium]]
k = min(10, selected_indicies.shape[0])
dists, inds = torch.topk(all_dists[selected_indicies], k, sorted=True)
matches["medium"][medium] = ids[selected_indicies[inds].cpu().numpy()]
all_matches.append(matches)
metadata["matches"] = all_matches
metadata.to_json("results/metadata_enriched.json")
print("here")
|
{"hexsha": "c8b87d68a0ad1180811e4607928f9318d3f103ab", "size": 4848, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_prep/featurize_and_match.py", "max_stars_repo_name": "microsoft/art", "max_stars_repo_head_hexsha": "3ea0616a449cc1c03ee49f1777ce6b331c520b57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-07-29T17:02:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:43:55.000Z", "max_issues_repo_path": "data_prep/featurize_and_match.py", "max_issues_repo_name": "microsoft/art", "max_issues_repo_head_hexsha": "3ea0616a449cc1c03ee49f1777ce6b331c520b57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-08-13T20:11:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-23T21:58:52.000Z", "max_forks_repo_path": "data_prep/featurize_and_match.py", "max_forks_repo_name": "microsoft/art", "max_forks_repo_head_hexsha": "3ea0616a449cc1c03ee49f1777ce6b331c520b57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2020-07-30T17:13:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-10T08:15:06.000Z", "avg_line_length": 37.5813953488, "max_line_length": 109, "alphanum_fraction": 0.6243811881, "include": true, "reason": "import numpy", "num_tokens": 1081}
|
% Written by:
% * Joachim Trescher
% * Hans de Vreught
\section{Types}
In \Booster\ a {\em type} is specified by an expression in
\Language{\NT{Type}} and represents a set of values. \Booster\ uses
structural type equivalence: two types are the same if their
definitions become the same when expanded; i.e.\ when all constant
expressions are replaced by their values and all type names are
replaced by their definitions.
Every expression has a statically determined type, which contains
every value that the expression can produce. The type of a designator
is the type of the variable it produces.
{\em Assignability} and {\em type compatibility} are defined in terms
of a syntactically specified inclusion relation on types. In the
following we discuss type constructors and type inclusion rules in more
detail.
\subsection*{Intrinsic Types}
There are six intrinsic types which are denoted by the following
predeclared identifiers.
\begin{tabular}{ll}
\hlf
\T{BOOLEAN} & the enumeration $\{$ {\sf true, false} $\}$\\
\T{NATURAL} & all natural values represented by the implementation\\
\T{INTEGER} & all integers represented by the implementation\\
\T{REAL} & all real values represented by the implementation\\
\T{COMPLEX} & all complex values represented by the implementation\\
\T{STRING} & the elements of \Language{\TC{StringLiteral}}\\
\hlf
\end{tabular}
\subsection*{Shape and view types}
A shape type specifies an indexed collection of component variables,
called the {\em elements} of the shape. A shape type declaration has
the syntactical form:
\begin{frag}
TYPE S = SHAPE \{ E \} OF BaseType;
\end{frag}
\noindent where {\tt E} is a constant expression of type natural and
{\tt BaseType} is a type specification.
The indices are the natural values in the range {\tt [0..E-1]}, where
the value of {\tt E} is determined at compile time and is called the
length of the shape. The elements of a shape all have the same size
and the same type, called the {\em base type}.
An expression of the syntactical form
\begin{frag}
SHAPE \{<E$_{\tt 0}$> \verb'#' <E$_{\tt 1}$> \verb'#' \ldots \verb'#'
<E$_{\tt n}$>\} OF <T>;
\end{frag}
\noindent is understood as an abbreviation of
\begin{frag}
SHAPE \{<E$_{\tt 0}$>\} OF\\
\> SHAPE \{<E$_{\tt 1}$>\} OF\\
\>\> \dots\\
\>\>\> SHAPE \{<E$_{\tt n}$>\} OF <T>;
\end{frag}
\noindent The form of a shape is the sequence of its lengths in each
dimension. More precisely, the form of a shape is its length followed
by the form of its base type. The length of the form of a shape is
called its {\em rank}.
In the declaration of formal parameters of functions and procedures
\Booster\ allows to specify the length of a dimension by a
\texttt{CONST <Id>} declaration. The corresponding
dimension of such a parameter is arbitrary but does not change during
the execution of a procedure call. The {\em generic} constant
\texttt{<Id>} may be used as a constant in the body of the
corresponding function or procedure. Each function or procedure call
initializes the generic constant \texttt{<Id>} with the length of the
corresponding dimension of the actual parameter.
A view type is a ``virtual'' shape type whose rank and (intrinsic)
base type are determined by a view type specification. Unlike shapes,
a view type specification may specify the length of one or more
dimensions to be arbitrary (and possibly changing dynamically) using
the operator \T{*}. Informally, views may be considered as bounded set
of references which point to elements (whose type is an intrinsic
type) of a single variable, thus ``viewing'' the elements of this
variable through a different indexing scheme. However, the declaration
of a view will not cause the allocation of computer memory. Therefore,
during the compilation process views will be eliminated by replacing
accesses to them by calls to the appropriate access function.
A view type declaration has the syntactical form
\begin{frag}
TYPE V = VIEW \{<E$_{\tt 0}$> \verb'#' <E$_{\tt 1}$> \verb'#'
\ldots \verb'#' <E$_{\tt n}$>\} OF IntrinsicType;
\end{frag}
\noindent where the form of \textsf{V} is defined by the expressions
{\tt <E$_{\tt 0}$>} \ldots {\tt <E$_{\tt n}$>}. And {\tt
IntrinsicType} is an intrinsic type.
\subsection*{Defined-by and Uses relations}
The type declarations visible in a scope $S$ define two relations on
type identifiers. The {\em defined-by} relation ``$\succ$'' is used to
track down type equivalences, and is defined as follows:
A type identifier $Id_2$ is defined by a type identifier $Id_1$, or
$Id_2 \succ Id_1$, iff
\begin{itemize}
\item there is a type declaration of the form {\tt <Id$_{\tt 2}$> =
<Id$_{\tt 1}$>} visible in $S$, or
\item there exists a type identifier $Id_3$ in $S$ such that,
\(
Id_2 \succ Id_3 \wedge Id_3 \succ Id_1
\)
\end{itemize}
The {\em uses} relation ``$\gg$'' is used to check which type is used
in the type declaration of an other type identifier. A type identifier
$Id_1$ uses a type identifier $Id_2$, or $Id_1 \gg Id_2$, iff
\begin{itemize}
\item in $S$ a type declaration of the following form is visible
\begin{frag}
<Id$_{\tt 1}$> =
SHAPE <CardList> OF <Id$_{\tt 2}$>;
\end{frag}
\item there exists a type identifier $Id_3$ such that,
\(
Id_1 \gg Id_3 \wedge Id_3 \gg Id_2
\)
\end{itemize}
\noindent The identifiers of a given scope and name space must be
well-defined. Therefore, we require the ordering induced by the union
of the relations $\succ$ and $\gg$ to be a strict partial ordering
(i.e.\ there are no cycles in the defined-by and uses relations). The
minimal elements of this induced ordering are intrinsic types (i.e.\
all \Booster\ types are defined in terms of intrinsic types and type
constructors). Moreover, there is no identifier $Id: Types.tex,v 1.1.1.1 1995/10/17 13:34:01 leo Exp $ such that \(
\mbox{\em IntrinsicType} \gg Id \; \vee \; \mbox{\em IntrinsicType}
\succ Id \) for any intrinsic type (i.e.\ intrinsic types may not be
redefined).
We define the set of all types {\em used} by a variable declaration as
the set of type identifiers {\small \tt <Id'>} such that
\begin{itemize}
\item the variable declaration has the form {\tt "<IdList> : SHAPE
<CardList> OF <Id>;" }
\item the variable declaration has the form { \tt "<IdList> : <Id>;" }
\item there exists an {\small \tt <Id>} such that {\small \tt <Id>}
$\gg$ {\small \tt <Id'>} and one of the above conditions hold for
{\small \tt <Id'>}.
\end{itemize}
\subsection*{Relations on types}
\noindent In order to define some relations on types we first
introduce two functions on \Booster\ types. The function {\em form}
computes the form of a member of a type and represents it as a
sequence. In the result of the function \form\ a definite length is
represented by a natural number, the generic constants in the
declaration of a formal parameter are represented by a {\em generic
variable}, and an indefinite dimension of a view type is represented
by a {\em free variable}. The base type function \bt\ yields the base
type of a type identifier by first expanding a type (i.e.\ recursively
replacing all identifiers by their definition) and then returning the
base type of the expanded type. Thus the result of an application of
the function \bt\ is always an intrinsic type. More formally:
Let \genvar\ and \freevar\ be infinite sets of generic and free
variables, respectively, such that $\freevar \cap \genvar =
\emptyset$. Let $\Forms ~ \widehat{=} ~ \mbox{\bf seq}(\mbox{\tt
NATURAL} \cup \genvar \cup \freevar)$, and let the operator $\concat$
denote the concatenation of sequences. In the following definitions
the identifiers $F$ and $G$ denote a fresh free or generic variable,
respecitvely, $C$ and $C'$ denote an element of
$\Language{\NT{CardinalityList}}$, $c$ denotes an element of
$\Language{\NT{Cardinality}}$, $E$ denotes an element of
$\Language{\NT{Expressions}}$, and {\em Id} denotes an element of
$\Language{\TC{Identifier}}$. Given the set \TypeDefSet\ we define:
\[
\begin{array}{rcl}
\multicolumn{3}{c}{
\form: \Language{\NT{Type}} \rightarrow \Forms}\\
\hlf
\form(T) & \widehat{=} &
\left\{\begin{array}{ll}
\langle \rangle
& \mbox{if } T = \T{STRING} \vee\\
& \mbox{if } T = \T{BOOLEAN} \vee\\
& \mbox{if } T = \T{NATURAL} \vee\\
& \mbox{if } T = \T{INTEGER} \vee\\
& \mbox{if } T = \T{REAL} \vee\\
& \mbox{if } T = \T{COMPLEX} \vee\\
\form_c(C)\concat\form(T')
& \mbox{if } T = \T{SHAPE}\ \T{\{}\ C\ \T{\}}\ \T{OF}\ T' \\
\form_c(C)\concat\form(T')
& \mbox{if } T = \T{VIEW}\ \T{\{}\ C\ \T{\}}\ \T{OF}\ T' \\
\form(T')
& \mbox{if } T' \in \Language{\NT{Type}}\ \mbox{for some } (T,
T') \in \TypeDefSet \\
\end{array}
\right.\\
\end{array}
\]
\noindent
The cardinality function $\form_{c}: \Language{\NT{CardinalityList}}
\rightarrow \Forms$, auxiliar to \form, is defined as follows:
\[
\begin{array}{rcl}
form_{c}(C) & \widehat{=} & \left\{
\begin{array}{ll}
\langle v(E)\rangle
& \mbox{if } C = Id\ \T{:}\ E \\
& \mbox{or } C = E \\
\langle F \rangle
& \mbox{if } C = Id\ \T{*}\\
\langle G \rangle
& \mbox{if } C = \T{CONST}\ Id \\
\form_c(c)\concat\form_c(C')
& \mbox{if } C = c\ \T{\#}\ C' \\
\end{array}
\right.\\
\end{array}
\]
\noindent where the function $v$ evaluates to a natural value if its
argument is a constant expression, or returns a free new variable from
\genvar:
\[
\begin{array}{rcl}
\multicolumn{3}{c}{
v: \Language{\NT{Expression}} \rightarrow \mbox{\tt NATURAL} \cup
\genvar}\\
\hlf
v(E) & \widehat{=} & \left\{
\begin{array}{ll}
n
& \mbox{if $E$ is a constant expression that evaluates to } n\\
G
& \mbox{for a fresh free variable $G \in \genvar$ otherwise}
\end{array}
\right.
\end{array}
\]
\noindent We now define the base type function \bt:
\[
\begin{array}{rcl}
\multicolumn{3}{c}{
\bt: \Language{\NT{Types}} \rightarrow \Language{\NT{Types}}}\\
\hlf
\bt(T) = \left\{\begin{array}{ll}
\bt(T') & \mbox{if } T = \T{SHAPE}\ C\ \T{OF}\ T'\\
T' & \mbox{if } T = \T{VIEW}\ C\ \T{OF}\ T'\\
\bt(T') & \mbox{if } (T,T') \in \TypeDefSet \mathrm{ for\ some } T'\\
T & \mbox{otherwise}
\end{array}
\right.
\end{array}
\]
\noindent Given the following transitive and symmetric {\em inclusion}
relation on intrinsic types
\begin{quote}
\(
\T{COMPLEX} \sqsupseteq \T{REAL} \sqsupseteq
\T{INTEGER} \sqsupseteq \T{NATURAL}
\)
\end{quote}
\noindent we say two types $T_1$ and $T_2$ are {\em compatible}
$(\approx$) if the base type of $T_1$ includes the base type of $T_2$
and $\form(T_1)$ and $\form(T_2)$ can be made equal by an appropriate
substitution of the occuring free and generic variables. A type $T_1$
{\em includes} $(\sqsupseteq)$ a type $T_2$, if $T_1$ is a
(multi-dimensional) shape of type $T$ and $T$ is compatible to $T_2$. In
the following we define the type compatibility and type inclusion
relations formally.
\[
\begin{array}{rcl}
T_1 \approx T_2 & \widehat{=}
& \bt(T_1) \sqsupseteq \bt(T_2) \wedge\\
& & \mathrm{Let}\ \form(T_1) = \langle e_1,\ldots,e_m\rangle;
\form(T_2) = \langle d_1,\ldots,d_n\rangle\ \mathrm{then} \\
& & \tab n = m \wedge \forall i \in \{1,\ldots,n\} \bullet (e_i = d_i \vee\\
& & \tab \tab (\{e_i, d_i\} \cap \freevar \cup \genvar \neq \emptyset)\\
\hlf
T_1 \sqsupseteq T_2 & \widehat{=}
& \bt(T_1) \sqsupseteq \bt(T_2) \wedge\\
& & \mathrm{Let}\ \form(T_1) = \langle e_1,\ldots,e_m\rangle;
\form(T_2) = \langle d_1,\ldots,d_n\rangle\ \mathrm{then} \\
& & \tab \exists i \in \{1,\ldots,m\} \bullet \langle e_i,\ldots,e_m\rangle
= \langle d_1,\ldots,d_n\rangle
\end{array}
\]
|
{"hexsha": "ace54780e8b2fa6bf44fff22d3bd6a64c0e6ce3c", "size": 11536, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "compiler/frontends/toyfront/doc/Types.tex", "max_stars_repo_name": "CvR42/timber-compiler", "max_stars_repo_head_hexsha": "fcd35f78378dff111902052916f76adb037f2cf1", "max_stars_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-04-21T00:43:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-14T03:22:08.000Z", "max_issues_repo_path": "compiler/frontends/toyfront/doc/Types.tex", "max_issues_repo_name": "CvR42/timber-compiler", "max_issues_repo_head_hexsha": "fcd35f78378dff111902052916f76adb037f2cf1", "max_issues_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compiler/frontends/toyfront/doc/Types.tex", "max_forks_repo_name": "CvR42/timber-compiler", "max_forks_repo_head_hexsha": "fcd35f78378dff111902052916f76adb037f2cf1", "max_forks_repo_licenses": ["Naumen", "Condor-1.1", "MS-PL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2782874618, "max_line_length": 115, "alphanum_fraction": 0.6963418863, "num_tokens": 3626}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
old_version = False
class GaussianConditional(nn.Module):
def __init__(self, dim, clip=False):
super(GaussianConditional, self).__init__()
self.dim = dim
self.sp = nn.Softplus()
self.clip = clip
def forward(self, input):
mu = input.narrow(1, 0, self.dim)
log_sigma = input.narrow(1, self.dim, self.dim)
if self.clip:
output = Variable(input.data.new(input.size(0), self.dim).normal_()) * torch.exp(
torch.min(log_sigma, Variable(log_sigma.data.new(log_sigma.size()).fill_(10.0)))) + mu
else:
output = Variable(input.data.new(input.size(0), self.dim).normal_()) * torch.exp(log_sigma) + mu
return output
class GaussianNoise(nn.Module):
def __init__(self, sigma):
super(GaussianNoise, self).__init__()
self.sigma = sigma
def forward(self, input):
if self.training:
noise = Variable(input.data.new(input.size()).normal_(std=self.sigma))
return input + noise
else:
return input
class Expression(nn.Module):
def __init__(self, func):
super(Expression, self).__init__()
self.func = func
def forward(self, input):
return self.func(input)
class WN_Linear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, train_scale=False, init_stdv=1.0):
super(WN_Linear, self).__init__(in_features, out_features, bias=bias)
if train_scale:
self.weight_scale = Parameter(torch.ones(self.out_features))
else:
self.register_buffer('weight_scale', torch.Tensor(out_features))
self.train_scale = train_scale
self.init_mode = False
self.init_stdv = init_stdv
self._reset_parameters()
def _reset_parameters(self):
self.weight.data.normal_(0, std=0.05)
if self.bias is not None:
self.bias.data.zero_()
if self.train_scale:
self.weight_scale.data.fill_(1.)
else:
self.weight_scale.fill_(1.)
def forward(self, input):
if self.train_scale:
weight_scale = self.weight_scale
else:
weight_scale = Variable(self.weight_scale)
# normalize weight matrix and linear projection
norm_weight = self.weight * (
weight_scale.unsqueeze(1) / torch.sqrt((self.weight ** 2).sum(1) + 1e-6)).expand_as(self.weight)
activation = F.linear(input, norm_weight)
if self.init_mode == True:
mean_act = activation.mean(0).squeeze(0)
activation = activation - mean_act.expand_as(activation)
inv_stdv = self.init_stdv / torch.sqrt((activation ** 2).mean(0) + 1e-6).squeeze(0)
activation = activation * inv_stdv.expand_as(activation)
if self.train_scale:
self.weight_scale.data = self.weight_scale.data * inv_stdv.data
else:
self.weight_scale = self.weight_scale * inv_stdv.data
self.bias.data = - mean_act.data * inv_stdv.data
else:
if self.bias is not None:
activation = activation + self.bias.expand_as(activation)
return activation
def assert_nan(x):
assert not np.isnan(x.data.cpu().numpy().sum())
class WN_Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
train_scale=False, init_stdv=1.0):
super(WN_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
if train_scale:
self.weight_scale = Parameter(torch.Tensor(out_channels))
else:
self.register_buffer('weight_scale', torch.Tensor(out_channels))
self.train_scale = train_scale
self.init_mode = False
self.init_stdv = init_stdv
self._reset_parameters()
def _reset_parameters(self):
self.weight.data.normal_(std=0.05)
if self.bias is not None:
self.bias.data.zero_()
if self.train_scale:
self.weight_scale.data.fill_(1.)
else:
self.weight_scale.fill_(1.)
def forward(self, input):
if self.train_scale:
weight_scale = self.weight_scale
else:
weight_scale = Variable(self.weight_scale)
# normalize weight matrix and linear projection [out x in x h x w]
# for each output dimension, normalize through (in, h, w) = (1, 2, 3) dims
norm_weight = self.weight * (weight_scale[:,None,None,None] /
torch.sqrt((self.weight ** 2).sum(3).sum(2).sum(1) + 1e-6).reshape([-1, 1, 1, 1])).expand_as(self.weight)
if old_version:
bias = self.bias
else:
bias = None
activation = F.conv2d(input, norm_weight, bias=bias,
stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups)
if self.init_mode == True:
mean_act = activation.mean(3).mean(2).mean(0).squeeze()
activation = activation - mean_act[None, :, None, None].expand_as(activation)
inv_stdv = self.init_stdv / torch.sqrt((activation ** 2).mean(3).mean(2).mean(0) + 1e-8).squeeze()
activation = activation * inv_stdv[None, :, None, None].expand_as(activation)
if self.train_scale:
self.weight_scale.data = self.weight_scale.data * inv_stdv.data
else:
self.weight_scale = self.weight_scale * inv_stdv.data
self.bias.data = - mean_act.data * inv_stdv.data
else:
if self.bias is not None:
activation = activation + self.bias[None, :, None, None].expand_as(activation)
return activation
class WN_ConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1,
bias=True, train_scale=False, init_stdv=1.0):
super(WN_ConvTranspose2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding,
output_padding, groups, bias)
if train_scale:
self.weight_scale = Parameter(torch.Tensor(out_channels))
else:
self.register_buffer('weight_scale', torch.Tensor(out_channels))
self.train_scale = train_scale
self.init_mode = False
self.init_stdv = init_stdv
self._reset_parameters()
def _reset_parameters(self):
self.weight.data.normal_(std=0.05)
if self.bias is not None:
self.bias.data.zero_()
if self.train_scale:
self.weight_scale.data.fill_(1.)
else:
self.weight_scale.fill_(1.)
def forward(self, input, output_size=None):
if self.train_scale:
weight_scale = self.weight_scale
else:
weight_scale = Variable(self.weight_scale)
# normalize weight matrix and linear projection [in x out x h x w]
# for each output dimension, normalize through (in, h, w) = (0, 2, 3) dims
norm_weight = self.weight * (weight_scale[None, :, None, None] / torch.sqrt(
(self.weight ** 2).sum(3).sum(2).sum(0) + 1e-6).reshape([-1, 1, 1, 1])).expand_as(self.weight)
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
if old_version:
bias = self.bias
else:
bias = None
activation = F.conv_transpose2d(input, norm_weight, bias=bias,
stride=self.stride, padding=self.padding,
output_padding=output_padding, groups=self.groups)
if self.init_mode == True:
mean_act = activation.mean(3).mean(2).mean(0).squeeze()
activation = activation - mean_act[None, :, None, None].expand_as(activation)
inv_stdv = self.init_stdv / torch.sqrt((activation ** 2).mean(3).mean(2).mean(0) + 1e-8).squeeze()
activation = activation * inv_stdv[None, :, None, None].expand_as(activation)
if self.train_scale:
self.weight_scale.data = self.weight_scale.data * inv_stdv.data
else:
self.weight_scale = self.weight_scale * inv_stdv.data
self.bias.data = - mean_act.data * inv_stdv.data
else:
if self.bias is not None:
activation = activation + self.bias[None, :, None, None].expand_as(activation)
return activation
|
{"hexsha": "581096bea9835dbde03cc6b3b1784ce161be3456", "size": 8910, "ext": "py", "lang": "Python", "max_stars_repo_path": "pixelcnn_model.py", "max_stars_repo_name": "zcwang0702/ssl_bad_gan", "max_stars_repo_head_hexsha": "a3d25b852ef5f17ea788b7141f0be0ac4d9fd631", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pixelcnn_model.py", "max_issues_repo_name": "zcwang0702/ssl_bad_gan", "max_issues_repo_head_hexsha": "a3d25b852ef5f17ea788b7141f0be0ac4d9fd631", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pixelcnn_model.py", "max_forks_repo_name": "zcwang0702/ssl_bad_gan", "max_forks_repo_head_hexsha": "a3d25b852ef5f17ea788b7141f0be0ac4d9fd631", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0769230769, "max_line_length": 142, "alphanum_fraction": 0.6071829405, "include": true, "reason": "import numpy", "num_tokens": 2024}
|
import numpy as np
import pandas as pd
import os
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.externals.six import StringIO
import pydot
def get_csv():
if os.path.exists("Update2.csv"):
df = pd.read_csv("Update2.csv")
return df
def scrub_csv(data):
#print("* df.head()", data.head())
features = list(df.columns[:3])
#print("* features:", features)
X = df[features]
Y = df["Prediction"]
#print("Head", X.tail())
#print("Head2", Y.tail())
return X,Y,features
def prediction(F, T, N):
clf = tree.DecisionTreeClassifier()
F_train, F_test, T_train, T_test = train_test_split(F, T, test_size = .2)
clf.fit(F_train, T_train)
predictions = clf.predict(F_test)
print accuracy_score(T_test, predictions)
class_names_tree = clf.classes_
temp = class_names_tree.tolist()
for x,y in enumerate(temp):
temp[x] = str(y)
print type(temp[x])
tree.export_graphviz(clf, out_file='tree.dot', feature_names=N, class_names=temp, filled=True, rounded=True)
os.system('dot -Tpng tree.dot -o tree.png')
if __name__ == "__main__":
df = get_csv()
features, targets, names = scrub_csv(df)
prediction(features, targets, names)
|
{"hexsha": "a9e778a85ae453647e6e08666caf272a49f5adca", "size": 1337, "ext": "py", "lang": "Python", "max_stars_repo_path": "MachineLearning/CSV_Prediction.py", "max_stars_repo_name": "SKravitsky/MachineLearningServer", "max_stars_repo_head_hexsha": "170081fe1ea53be7394e35ce208bd665ae002b73", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MachineLearning/CSV_Prediction.py", "max_issues_repo_name": "SKravitsky/MachineLearningServer", "max_issues_repo_head_hexsha": "170081fe1ea53be7394e35ce208bd665ae002b73", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MachineLearning/CSV_Prediction.py", "max_forks_repo_name": "SKravitsky/MachineLearningServer", "max_forks_repo_head_hexsha": "170081fe1ea53be7394e35ce208bd665ae002b73", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7592592593, "max_line_length": 112, "alphanum_fraction": 0.6641735228, "include": true, "reason": "import numpy", "num_tokens": 336}
|
# author: Arun Ponnusamy
# website: https://www.arunponnusamy.com
# import necessary packages
from keras.preprocessing.image import img_to_array
from keras.models import load_model
from keras.utils import get_file
import numpy as np
import argparse
import cv2
import os
import cvlib as cv
# handle command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = ap.parse_args()
# download pre-trained model file (one-time download)
dwnld_link = "https://s3.ap-south-1.amazonaws.com/arunponnusamy/pre-trained-weights/gender_detection.model"
model_path = get_file("gender_detection.model", dwnld_link,
cache_subdir="pre-trained", cache_dir=os.getcwd())
# load model
model = load_model(model_path)
# read input image
image = cv2.imread(args.image)
if image is None:
print("Could not read input image")
exit()
# load pre-trained model
model = load_model(model_path)
# detect faces in the image
face, confidence = cv.detect_face(image)
classes = ['man','woman']
# loop through detected faces
for idx, f in enumerate(face):
# get corner points of face rectangle
(startX, startY) = f[0], f[1]
(endX, endY) = f[2], f[3]
# draw rectangle over face
cv2.rectangle(image, (startX,startY), (endX,endY), (0,255,0), 2)
# crop the detected face region
face_crop = np.copy(image[startY:endY,startX:endX])
# preprocessing for gender detection model
face_crop = cv2.resize(face_crop, (96,96))
face_crop = face_crop.astype("float") / 255.0
face_crop = img_to_array(face_crop)
face_crop = np.expand_dims(face_crop, axis=0)
# apply gender detection on face
conf = model.predict(face_crop)[0]
print(conf)
print(classes)
# get label with max accuracy
idx = np.argmax(conf)
label = classes[idx]
label = "{}: {:.2f}%".format(label, conf[idx] * 100)
Y = startY - 10 if startY - 10 > 10 else startY + 10
# write label and confidence above face rectangle
cv2.putText(image, label, (startX, Y), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 255, 0), 2)
# display output
cv2.imshow("gender detection", image)
# press any key to close window
cv2.waitKey()
# save output
cv2.imwrite("gender_detection.jpg", image)
# release resources
cv2.destroyAllWindows()
|
{"hexsha": "a1f5f114841e45c0cfa17595c5f5e8f0aa2ef007", "size": 2389, "ext": "py", "lang": "Python", "max_stars_repo_path": "detect_gender.py", "max_stars_repo_name": "bekturaitbaev/GenderDetection", "max_stars_repo_head_hexsha": "c7a449879daf8eabc38486c7afbe02f7e667c33a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-07T19:16:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-07T19:16:38.000Z", "max_issues_repo_path": "detect_gender.py", "max_issues_repo_name": "bekturaitbaev/GenderDetection", "max_issues_repo_head_hexsha": "c7a449879daf8eabc38486c7afbe02f7e667c33a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detect_gender.py", "max_forks_repo_name": "bekturaitbaev/GenderDetection", "max_forks_repo_head_hexsha": "c7a449879daf8eabc38486c7afbe02f7e667c33a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-12-26T15:27:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-05T13:36:07.000Z", "avg_line_length": 26.5444444444, "max_line_length": 107, "alphanum_fraction": 0.687316869, "include": true, "reason": "import numpy", "num_tokens": 621}
|
\problemname{Arithmetic Decoding}
Arithmetic coding is a method to represent a message as a real number
$x$ such that $0 \leq x < 1$. We will assume that the
message consists only of uppercase `A's and `B's. The two letters
have associated probabilities $p_A$ and $p_B = 1 - p_A$ such that
$0 < p_A < 1$.
The current interval $[a,b)$ is initially set to $[0,1)$ and we will
update this interval one letter at a time. To encode
a letter, the current interval is divided into two subintervals
as follows. Let $c = a + p_A(b-a)$. If the next letter is `A',
$[a,c)$ becomes the current interval. Otherwise, the current interval
is now $[c,b)$. This process is repeated for each letter in the
message. If $[k,\ell)$ is the final interval, the encoded message is
chosen to be $k$.
For example, if the original message is ``ABAB'' and $p_A = p_B = 0.5$,
the sequence of intervals encountered in the algorithm is
\[ [0,1) \xrightarrow{A} [0, 0.5) \xrightarrow{B} [0.25, 0.5)
\xrightarrow{A} [0.25, 0.375) \xrightarrow{B} [0.3125, 0.375). \]
The encoded message is therefore 0.3125, or 0.0101 in binary.
Given the length of the message, the probabilities, and the encoded
message, determine the original message.
\section*{Input}
The first line contains the integer $N$ ($1 \leq N \leq 15$), which
is the length of the original message. The
second line contains the integer $D$ ($1 \leq D \leq 7$),
which indicates that $p_A = \frac{D}{8}$.
The third line contains the binary representation of the encoded message.
It is guaranteed that the binary representation of the encoded message
starts with ``0.'' and contains at most $3N+2$ characters.
It is guaranteed that the encoded message came from an initial message
of length $N$ consisting only of `A' and `B' using this value of $p_A$.
\section*{Output}
Display the original message.
|
{"hexsha": "91a1b6ed7053ccc1df631fc84988e51b8d8193f4", "size": 1846, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "problems/arithmeticdecoding/problem_statement/problem.tex", "max_stars_repo_name": "icpc/na-rocky-mountain-2020-public", "max_stars_repo_head_hexsha": "d77cd0dfd9bd707f34497977251c4cc583647fef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-11T21:49:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-19T22:31:57.000Z", "max_issues_repo_path": "problems/arithmeticdecoding/problem_statement/problem.tex", "max_issues_repo_name": "icpc/na-rocky-mountain-2020-public", "max_issues_repo_head_hexsha": "d77cd0dfd9bd707f34497977251c4cc583647fef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problems/arithmeticdecoding/problem_statement/problem.tex", "max_forks_repo_name": "icpc/na-rocky-mountain-2020-public", "max_forks_repo_head_hexsha": "d77cd0dfd9bd707f34497977251c4cc583647fef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-03-11T18:15:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-24T00:15:32.000Z", "avg_line_length": 42.9302325581, "max_line_length": 73, "alphanum_fraction": 0.7237269772, "num_tokens": 545}
|
# Copyright 2016, Iain Dunning, Joey Huchette, Miles Lubin, and contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#############################################################################
# JuMP
# An algebraic modelling langauge for Julia
# See http://github.com/JuliaOpt/JuMP.jl
#############################################################################
# Hock-Schittkowski Nonlinear Test Suite - HS112
# This file is JuMP implementation of the model described in
# W. Hock, K. Schittkowski, Test Examples for Nonlinear Programming
# Codes, Lecture Notes in Economics and Mathematical Systems,
# Springer, No, 187, 1981
# More information, including original model description, at
# http://www.ai7.uni-bayreuth.de/downloads.htm
#
# This problem has an objective with the logarithm of a fraction where both
# the nominator and denominator have variables in them. Constraints linear.
#############################################################################
using JuMP
using Base.Test
let
c = [-6.089, -17.164, -34.054, -5.914, -24.721, -14.986, -24.100, -10.708, -26.662, -22.179]
m = Model()
@variable(m, x[1:10] >= 1e-6, start = 0.1)
@NLobjective(m, Min, sum{x[j]*(c[j] + log(x[j]/sum{x[k],k=1:10})), j=1:10})
@NLconstraint(m, x[1] + 2*x[2] + 2*x[3] + x[6] + x[10] == 2)
@NLconstraint(m, x[4] + 2*x[5] + x[6] + x[7] == 1)
@NLconstraint(m, x[3] + x[7] + x[8] + 2*x[9] + x[10] == 1)
solve(m)
@test_approx_eq_eps getobjectivevalue(m) -47.76109026 1e-5
end
|
{"hexsha": "2ee1b54da18460d4eae9d1a113b3e6d42f4636fa", "size": 1646, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "public/.julia/v0.5/JuMP/test/hockschittkowski/hs112.jl", "max_stars_repo_name": "Giarcr0b/MVO_Tool", "max_stars_repo_head_hexsha": "8f3348b8b56968febca8307acea3ebe1817fccae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-12T03:24:25.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-12T03:24:25.000Z", "max_issues_repo_path": "public/.julia/v0.5/JuMP/test/hockschittkowski/hs112.jl", "max_issues_repo_name": "Giarcr0b/MVO_Tool", "max_issues_repo_head_hexsha": "8f3348b8b56968febca8307acea3ebe1817fccae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "public/.julia/v0.5/JuMP/test/hockschittkowski/hs112.jl", "max_forks_repo_name": "Giarcr0b/MVO_Tool", "max_forks_repo_head_hexsha": "8f3348b8b56968febca8307acea3ebe1817fccae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2790697674, "max_line_length": 92, "alphanum_fraction": 0.5886998785, "num_tokens": 504}
|
\chapter*{Abbreviations}
\begin{acronym}[Kurzformsdf]
\acro{ECU}{Electrical Control Unit}
\acro{VM}{Virtual Machine}
\acro{SoC}{System on a Chip}
\acro{NIST}{National Institute for Standards and Technology}
\acro{IaaS}{Infrastructure-as-a-Service}
\acro{PaaS}{Platform-as-a-Service}
\acro{SaaS}{Software-as-a-Service}
\acro{OS}{Operating System}
\acro{CPU}{Central Processing Unit}
\acro{KVM}{Kernel-based Virtual Machine}
\acro{API}{Application Programming Interface}
\acro{RPC}{Remote Procedure Call}
\acro{SDK}{Software Development Kit}
\acro{vCPU}{Virtual CPU}
\acro{SSH}{Secure Shell}
\acro{EPS}{Events per Second}
\acro{VNC}{Virtual Network Computing}
\acro{ATF}{ARM Trusted Firmware}
\acroplural{ECU}[ECUs]{Electrical Control Units}
\acroplural{VM}[VMs]{Virtual Machines}
\acroplural{SoC}[SoCs]{Systems-on-a-Chip}
\acroplural{OS}[OSs]{Operating Systems}
\acroplural{CPU}[CPUs]{Central Processing Units}
\acroplural{API}[APIs]{Application Programming Interfaces}
\acroplural{RPC}[RPCs]{Remote Procedure Calls}
\acroplural{vCPU}[vCPUs]{Virtual CPUs}
\end{acronym}
|
{"hexsha": "2435946ee6d0ff6e1e51811277284d2c4e8196d5", "size": 1184, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/glossary.tex", "max_stars_repo_name": "MaxMalinowski/BachelorThesis", "max_stars_repo_head_hexsha": "91a5286a3d1cd32e628b85efcc2194a013cc7233", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/glossary.tex", "max_issues_repo_name": "MaxMalinowski/BachelorThesis", "max_issues_repo_head_hexsha": "91a5286a3d1cd32e628b85efcc2194a013cc7233", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/glossary.tex", "max_forks_repo_name": "MaxMalinowski/BachelorThesis", "max_forks_repo_head_hexsha": "91a5286a3d1cd32e628b85efcc2194a013cc7233", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0, "max_line_length": 64, "alphanum_fraction": 0.6993243243, "num_tokens": 343}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 11:59:55 2020
@author: José Carlos Aradillas jaradillas@us.es
"""
import numpy as np
import matplotlib.pyplot as plt
from labcomdig import detectaSBF, simbolobit
def load_signal(filename):
try:
Xt = np.load(filename)
except FileNotFoundError:
raise Exception('No se ha encontrado la señal a la entradada del receptor, en el fichero '+filename)
return Xt
def procesa_datos_recibidos(Bn, nombre_destino = './img_recibida.jpg', cabecera_block=32, pixel_block=8):
cabecera_size = 3*cabecera_block
cabecera = Bn[:cabecera_size]
dimensiones = cabecera.reshape([-1,cabecera_block]) @ 2**np.flipud(np.arange(cabecera_block))
data_size = pixel_block*np.product(dimensiones)
data_bits = Bn[cabecera_size:(cabecera_size+data_size)]
try:
data_plain_rx = data_bits.reshape([-1,pixel_block]) @ 2**np.flipud(np.arange(pixel_block))
data_rx = data_plain_rx.reshape(dimensiones)
except:
raise Exception('No has resuelto bien el ejercicio, no has demodulado bien la señal')
plt.figure()
plt.imshow(data_rx)
plt.imsave(nombre_destino, data_rx.astype(np.uint8))
def detecta_y_procesa(r, alfabeto, fichero_destino):
if r is None:
raise Exception('Debes obtener el vector de observación r a partir de R(t)')
if alfabeto is None:
raise Exception('Debes introducir en "alfabeto" el vector con los valores posibles de la constelación')
sn = detectaSBF(r, alfabeto)
Bn = simbolobit(sn, alfabeto)
procesa_datos_recibidos(Bn, fichero_destino)
|
{"hexsha": "b321f8c96c5cff3e0fb6605122444da9fc1c8084", "size": 1638, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "josarajar/labcomdig-extra", "max_stars_repo_head_hexsha": "8c81be58d1edd6a4d88598a88bc4e83c22de6b24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "josarajar/labcomdig-extra", "max_issues_repo_head_hexsha": "8c81be58d1edd6a4d88598a88bc4e83c22de6b24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "josarajar/labcomdig-extra", "max_forks_repo_head_hexsha": "8c81be58d1edd6a4d88598a88bc4e83c22de6b24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2272727273, "max_line_length": 111, "alphanum_fraction": 0.7112332112, "include": true, "reason": "import numpy", "num_tokens": 452}
|
from numba import njit, generated_jit, vectorize, types
import numpy as np
from . import settings
from .constants import _MAXCOSH
@njit('float64(float64)', cache=settings.CACHE)
def _dsinpi(x):
"""Compute sin(pi*x) for real arguments."""
s = 1.0
if x < 0.0:
x = -x
s = -1.0
r = np.fmod(x, 2.0)
if r < 0.5:
return s*np.sin(np.pi*r)
elif r > 1.5:
return s*np.sin(np.pi*(r - 2.0))
else:
return -s*np.sin(np.pi*(r - 1.0))
@njit('float64(float64)', cache=settings.CACHE)
def _dcospi(x):
"""Compute cos(pi*x) for real arguments."""
if x < 0.0:
x = -x
r = np.fmod(x, 2.0)
if r == 0.5:
# Don't want to return -0.0
return 0.0
elif r < 1.0:
return -np.sin(np.pi*(r - 0.5))
else:
return np.sin(np.pi*(r - 1.5))
@njit('complex128(complex128)', cache=settings.CACHE)
def _csinpi(z):
"""Compute sin(pi*z) for complex arguments."""
x = z.real
piy = np.pi*z.imag
abspiy = abs(piy)
sinpix = _dsinpi(x)
cospix = _dcospi(x)
if abspiy <= _MAXCOSH:
return np.complex(sinpix*np.cosh(piy), cospix*np.sinh(piy))
# Have to be careful--sinh/cosh could overflow while cos/sin are
# small. At this large of values
#
# cosh(y) ~ exp(y)/2
# sinh(y) ~ sgn(y)*exp(y)/2
#
# so we can compute exp(y/2), scale by the right factor of sin/cos
# and then multiply by exp(y/2) to avoid overflow.
exphpiy = np.exp(abspiy/2)
if exphpiy == np.inf:
if sinpix == 0:
# Preserve the sign of zero
coshfac = sinpix
else:
coshfac = np.copysign(np.inf, sinpix)
if cospix == 0:
sinhfac = cospix
else:
sinhfac = np.sign(piy)*np.copysign(np.inf, cospix)
return np.complex(coshfac, sinhfac)
coshfac = 0.5*sinpix*exphpiy
sinhfac = 0.5*cospix*exphpiy
return np.complex(coshfac*exphpiy, sinhfac*exphpiy)
@njit('complex128(complex128)', cache=settings.CACHE)
def _ccospi(z):
"""Compute cos(pi*z) for complex arguments."""
x = z.real
piy = np.pi*z.imag
abspiy = abs(piy)
sinpix = _dsinpi(x)
cospix = _dcospi(x)
if abspiy <= _MAXCOSH:
return np.complex(cospix*np.cosh(piy), -sinpix*np.sinh(piy))
# See csinpi(z) for an idea of what's going on here
exphpiy = np.exp(abspiy/2)
if exphpiy == np.inf:
if cospix == 0:
coshfac = cospix
else:
coshfac = np.copysign(np.inf, cospix)
if sinpix == 0:
sinhfac = sinpix
else:
sinhfac = -np.sign(piy)*np.copysign(np.inf, sinpix)
return np.complex(coshfac, sinhfac)
coshfac = 0.5*cospix*exphpiy
sinhfac = -0.5*sinpix*exphpiy
return np.complex(coshfac*exphpiy, sinhfac*exphpiy)
@generated_jit(nopython=True, cache=settings.CACHE)
def _sinpi(a):
if a == types.float64:
return lambda a: _dsinpi(a)
elif a == types.complex128:
return lambda a: _csinpi(a)
else:
raise NotImplementedError
@vectorize(
['float64(float64)', 'complex128(complex128)'],
nopython=True,
cache=settings.CACHE,
)
def sinpi(x):
r"""Compute :math:`\sin(\pi x)`.
Parameters
----------
x : array-like
Points on the real line or complex plane
out : ndarray, optional
Output array for the values of `sinpi` at `x`
Returns
-------
ndarray
Values of `sinpi` at `x`
"""
return _sinpi(x)
@generated_jit(nopython=True, cache=settings.CACHE)
def _cospi(a):
if a == types.float64:
return lambda a: _dcospi(a)
elif a == types.complex128:
return lambda a: _ccospi(a)
else:
raise NotImplementedError
@vectorize(
['float64(float64)', 'complex128(complex128)'],
nopython=True,
cache=settings.CACHE,
)
def cospi(x):
r"""Compute :math:`\cos(\pi x)`.
Parameters
----------
x : array-like
Points on the real line or complex plane
out : ndarray, optional
Output array for the values of `cospi` at `x`
Returns
-------
ndarray
Values of `cospi` at `x`
"""
return _cospi(x)
|
{"hexsha": "de6a4b18526b52a76b4abc4d56bd2846f81496c6", "size": 4215, "ext": "py", "lang": "Python", "max_stars_repo_path": "spycial/trig.py", "max_stars_repo_name": "person142/spycial", "max_stars_repo_head_hexsha": "d017048c8c09ee0714f438cb75c2e221e068baee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-04-04T21:53:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-10T17:16:40.000Z", "max_issues_repo_path": "spycial/trig.py", "max_issues_repo_name": "person142/special", "max_issues_repo_head_hexsha": "d017048c8c09ee0714f438cb75c2e221e068baee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spycial/trig.py", "max_forks_repo_name": "person142/special", "max_forks_repo_head_hexsha": "d017048c8c09ee0714f438cb75c2e221e068baee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-14T15:09:21.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-14T15:09:21.000Z", "avg_line_length": 24.0857142857, "max_line_length": 70, "alphanum_fraction": 0.5784104389, "include": true, "reason": "import numpy,from numba", "num_tokens": 1345}
|
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* (C) Copyright 2007 Anthony Williams
* (C) Copyright 2011-2012 Vicente J. Botet Escriba
* (C) Copyright 2013 Andrey Semashev
*/
#ifndef BOOST_SYNC_LOCKS_LOCK_OPTIONS_HPP_INCLUDED_
#define BOOST_SYNC_LOCKS_LOCK_OPTIONS_HPP_INCLUDED_
#include <boost/sync/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#include <boost/sync/detail/header.hpp>
namespace boost {
namespace sync {
struct defer_lock_t
{
};
struct try_to_lock_t
{
};
struct adopt_lock_t
{
};
BOOST_CONSTEXPR_OR_CONST defer_lock_t defer_lock = {};
BOOST_CONSTEXPR_OR_CONST try_to_lock_t try_to_lock = {};
BOOST_CONSTEXPR_OR_CONST adopt_lock_t adopt_lock = {};
} // namespace sync
} // namespace boost
#include <boost/sync/detail/footer.hpp>
#endif // BOOST_SYNC_LOCKS_LOCK_OPTIONS_HPP_INCLUDED_
|
{"hexsha": "4b8f8fb780204ce0352b0eda312a31f8dff1cf24", "size": 965, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/sync/locks/lock_options.hpp", "max_stars_repo_name": "ballisticwhisper/boost", "max_stars_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-01-02T14:24:56.000Z", "max_stars_repo_stars_event_max_datetime": "2015-01-02T14:25:17.000Z", "max_issues_repo_path": "boost/sync/locks/lock_options.hpp", "max_issues_repo_name": "ballisticwhisper/boost", "max_issues_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2019-01-13T23:45:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-03T08:13:26.000Z", "max_forks_repo_path": "boost/sync/locks/lock_options.hpp", "max_forks_repo_name": "ballisticwhisper/boost", "max_forks_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-04-04T10:55:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-23T18:52:06.000Z", "avg_line_length": 20.5319148936, "max_line_length": 61, "alphanum_fraction": 0.7689119171, "num_tokens": 239}
|
import json
from PIL import Image
import numpy as np
import copy
import os
class armorstandgeo:
def __init__(self, name, alpha = 0.8,offsets=[9,0,0], size=[64, 64, 64], ref_pack="Vanilla_Resource_Pack"):
self.ref_resource_pack = ref_pack
## we load all of these items containing the mapping of blocks to the some property that is either hidden, implied or just not clear
with open("{}/blocks.json".format(self.ref_resource_pack)) as f:
## defines the blocks from the NBT name tells us sides vs textures
self.blocks_def = json.load(f)
with open("{}/textures/terrain_texture.json".format(self.ref_resource_pack)) as f:
##maps textures names to texture files.
self.terrain_texture = json.load(f)
with open("lookups/block_rotation.json") as f:
## custom look up table i wrote to help with rotations, error messages dump if somehting has undefined rotations
self.block_rotations = json.load(f)
with open("lookups/variants.json") as f:
## custom lookup table mapping the assume array location in the terrian texture to the relevant blocks IE log2 index 2 implies a specific wood type not captured anywhere
self.block_variants = json.load(f)
with open("lookups/hacks.json") as f:
self.hacks = json.load(f)
self.name = name.replace(" ","_").lower()
self.stand = {}
self.offsets = offsets
self.alpha=alpha
self.texture_list = []
self.geometry = {}
self.stand_init()
self.uv_map = {}
self.blocks = {}
self.size = []
self.bones = []
self.errors={}
self.uv_array = None
## The stuff below is a horrible cludge that should get cleaned up. +1 karma to whomever has a better plan for this.
# this is how i determine if something should be thin. it is ugly, but kinda works
self.lower_objects = self.hacks["slab_like"]
self.slab_like_objects = self.hacks["trapdoor_like"]
## these blocks are either not needed, or cause issue. Grass is banned because the terrian_texture.json has a biome map in it. If someone wants to fix we can un-bann it
self.excluded = ["air", "grass", "structure_block"]
def export(self, pack_folder):
## This exporter just packs up the armorstand json files and dumps them where it should go. as well as exports the UV file
self.add_blocks_to_bones()
self.geometry["description"]["texture_height"] = len(
self.uv_map.keys())
self.stand["minecraft:geometry"] = [self.geometry] ## this is insuring the geometries are imported, there is an implied refference other places.
path_to_geo = "{}/models/entity/armor_stand.ghost_blocks_{}.geo.json".format(
pack_folder,self.name)
os.makedirs(os.path.dirname(path_to_geo), exist_ok=True)
with open(path_to_geo, "w+") as json_file:
json.dump(self.stand, json_file, indent=2)
texture_name = "{}/textures/entity/ghost_blocks_{}.png".format(
pack_folder,self.name)
os.makedirs(os.path.dirname(texture_name), exist_ok=True)
self.save_uv(texture_name)
def make_layer(self, y):
# sets up a layer for us to refference in the animation controller later. Layers are moved during the poses
layer_name = "layer_{}".format(y)
self.geometry["bones"].append(
{"name": layer_name, "pivot": [-8, 0, 8], "parent": "ghost_blocks"})
def make_block(self, x, y, z, block_name, rot=None, top=False, trap_open=False, parent=None,variant=None):
# make_block handles all the block processing, This function does need cleanup and probably should be broken into other helperfunctions for ledgiblity.
if block_name not in self.excluded:
slab = ("slab" in block_name and "double" not in block_name) or block_name in self.slab_like_objects
wall = "wall" in block_name
torch = "torch" in block_name
lantern = "lantern" in block_name
stair = "stair" in block_name
hopper = "hopper" in block_name
trapdoor = "trapdoor" in block_name or block_name in self.lower_objects
uv = self.block_name_to_uv(block_name,variant=variant)
non_block=False
if rot is not None and not stair and not hopper:
if "trapdoor" in block_name:
rot_name = "trapdoor"
else:
rot_name = block_name
if rot_name in self.block_rotations.keys():
piv = self.block_rotations[rot_name][str(int(rot))]
else:
piv = [0, 0, 0]
print("no rotation for {} found".format(block_name))
else:
piv = [0, 0, 0]
pivot_point = None
#the section below is the hardcoded block geometries. This likely should be broken into helper functions. It will make it easier to maintain....
if slab:
size = [1, .5, 1]
uv["east"]["uv_size"][1] = 0.5
uv["west"]["uv_size"][1] = 0.5
uv["north"]["uv_size"][1] = 0.5
uv["south"]["uv_size"][1] = 0.5
if top:
origin = [-1*(x + self.offsets[0]), y + 0.5 + self.offsets[1], z + self.offsets[2]]
else:
origin = [-1*(x + self.offsets[0]), y + self.offsets[1], z + self.offsets[2]]
elif trapdoor:
if top and not trap_open:
origin = [-1*(x + self.offsets[0]), y + 14/16 + self.offsets[1], z + self.offsets[2]]
else:
origin = [-1*(x + self.offsets[0]), y + self.offsets[1], z + self.offsets[2]]
if trap_open:
pivot_point=[-1*(x + self.offsets[0])+.5, y + 0.5 + self.offsets[1], z + 0.5 + self.offsets[2]]
size = [1, 1, 2/16]
else:
size = [1, 2/16, 1]
elif wall:
size = [.5, 1, .5]
origin = [-1 * (x + self.offsets[0]) + 0.25, y + self.offsets[1], z + 0.25 + self.offsets[2]]
uv["east"]["uv_size"] = [0.5, 1]
uv["west"]["uv_size"] = [0.5, 1]
uv["north"]["uv_size"] = [0.5, 1]
uv["south"]["uv_size"] = [0.5, 1]
elif torch:
size = [3/16, 10/16, 3/16]
origin = [-1 * (x + self.offsets[0]) + (16-2)/32, y + self.offsets[1], z + (16-2)/32 + self.offsets[2]]
uv["east"]["uv"] = [7/16, uv["east"]["uv"][1]+6/16]
uv["east"]["uv_size"] = [2/16, 10/16]
uv["west"]["uv"] = [7/16, uv["west"]["uv"][1]+6/16]
uv["west"]["uv_size"] = [2/16, 10/16]
uv["north"]["uv"] = [7/16, uv["north"]["uv"][1]+6/16]
uv["north"]["uv_size"] = [2/16, 10/16]
uv["south"]["uv"] = [7/16, uv["south"]["uv"][1]+6/16]
uv["south"]["uv_size"] = [2/16, 10/16]
uv["up"]["uv"] = [7/16, uv["up"]["uv"][1]+6/16]
uv["up"]["uv_size"] = [2/16, 2/16]
uv["down"]["uv"] = [7/16, uv["down"]["uv"][1]+14/16]
uv["down"]["uv_size"] = [2/16,2/16]
elif lantern:
size = [6/16, 7/16, 6/16]
origin = [-1*(x + self.offsets[0]) + (16-6)/32, y + (16-7)/32 + self.offsets[1], z + (16-6)/32 + self.offsets[2]]
uv["east"]["uv"] = [0, uv["east"]["uv"][1]+2/16]
uv["east"]["uv_size"] = [6/16, 7/16]
uv["west"]["uv"] = [0, uv["west"]["uv"][1]+2/16]
uv["west"]["uv_size"] = [6/16, 7/16]
uv["north"]["uv"] = [0, uv["north"]["uv"][1]+2/16]
uv["north"]["uv_size"] = [6/16, 7/16]
uv["south"]["uv"] = [0, uv["south"]["uv"][1]+2/16]
uv["south"]["uv_size"] = [6/16, 7/16]
uv["up"]["uv"] = [0, uv["up"]["uv"][1]+9/16]
uv["up"]["uv_size"] = [6/16, 6/16]
uv["down"]["uv"] = [0, uv["down"]["uv"][1]+9/16]
uv["down"]["uv_size"] = [6/16, 6/16]
elif stair:
self.stair(x, y, z, block_name,uv, rot, top)
non_block=True
elif hopper:
self.make_hopper(x, y, z, block_name,uv, rot)
non_block=True
else:
origin = [-1*(x + self.offsets[0]), y + self.offsets[1], z + self.offsets[2]]
size = [1, 1, 1]
## the code below assumes 1 cube, the helper functions for hoppers and stairs handle proper blocks,
## Probably should just move all this to helper functions for each block geo
if not non_block:
block_name = "block_{}_{}_{}".format(x, y, z)
self.blocks[block_name] = {}
self.blocks[block_name]["name"] = block_name
self.blocks[block_name]["parent"] = "layer_{}".format(y)
self.blocks[block_name]["pivot"] = [0, 0, 0]
self.blocks[block_name]["cubes"] = []
if pivot_point is not None:
self.blocks[block_name]["cubes"].append(
{"origin": origin, "size": size, "rotation": piv, "uv": uv, "inflate": -0.03,"pivot":pivot_point})
else:
self.blocks[block_name]["cubes"].append(
{"origin": origin, "size": size, "rotation": piv, "uv": uv, "inflate": -0.03})
def make_hopper(self, x, y, z, block_name,uv, rot=None):
## helper function for hoppers. it is a bit ugly, but it works
block_name = "block_{}_{}_{}".format(x, y, z)
block1 = {}
block1["origin"] = [-1*(x + self.offsets[0]), y + 9/16 + self.offsets[1], z + self.offsets[2]]
block1["size"] = [1, 7/16, 1]
block1["inflate"] = -0.03
block1uv=dict(uv)
block1uv["east"]["uv_size"] = [1, 6/16]
block1uv["west"]["uv_size"] = [1, 6/16]
block1uv["north"]["uv_size"] = [1, 6/16]
block1uv["south"]["uv_size"] = [1, 6/16]
block1["uv"]=block1uv
block2={}
block2["origin"] = [-1*(x + self.offsets[0]) + 0.25, y+4/16 + self.offsets[1], z + 0.25 + self.offsets[2]]
block2["size"] = [.5, 6/16, 0.5]
block2["inflate"] = -0.03
block2uv=dict(uv)
block2uv["east"]["uv_size"] = [0.5, 6/16]
block2uv["west"]["uv_size"] = [0.5, 6/16]
block2uv["north"]["uv_size"] = [0.5, 6/16]
block2uv["south"]["uv_size"] = [0.5, 6/16]
block2["uv"]=block2uv
block3={}
block3["size"] = [4/16, 4/16, 4/16]
block3["inflate"] = -0.03
block3uv=dict(uv)
block3uv["east"]["uv_size"] = [4/16, 4/16]
block3uv["west"]["uv_size"] = [4/16, 4/16]
block3uv["north"]["uv_size"] = [4/16, 4/16]
block3uv["south"]["uv_size"] = [4/16, 4/16]
block3["uv"]=block2uv
if rot == 0:
block3["origin"] = [-1*(x + self.offsets[0]) + 6/16, y+1/16 + self.offsets[1], z + 6/16 + self.offsets[2]]
elif rot == 5:
block3["origin"] = [-1*(x + self.offsets[0]) + 1/16 , y + 5/16 + self.offsets[1], z + 6/16 + self.offsets[2]]
elif rot == 2:
block3["origin"] = [-1*(x + self.offsets[0]) + 6/16, y + 5/16 + self.offsets[1], z + 1/16 + self.offsets[2]]
elif rot == 3:
block3["origin"] = [-1*(x + self.offsets[0]) + 6/16, y + 5/16 + self.offsets[1], z + 1 - 6/16 + self.offsets[2]]
elif rot == 4:
block3["origin"] = [-1*(x + self.offsets[0]) + 1 - 6/16, y + 5/16 + self.offsets[1], z + 6/16 + self.offsets[2]]
self.blocks[block_name] = {}
self.blocks[block_name]["name"] = block_name
self.blocks[block_name]["parent"] = "layer_{}".format(y)
self.blocks[block_name]["pivot"] = [0, 0, 0]
self.blocks[block_name]["cubes"] = [block1]
self.blocks[block_name]["cubes"].append(block2)
self.blocks[block_name]["cubes"].append(block3)
def stair(self, x, y, z, block_name,uv, rot=None,top=None):
##helper function for stair creation. currently does not support corner stairs.
block_name = "block_{}_{}_{}".format(x, y, z)
block1 = {}
offset = 0
if top:
offset = 7/16
block1["origin"] = [-1*(x + self.offsets[0]), y + offset + self.offsets[1], z + self.offsets[2]]
block1["size"] = [1, 0.5, 1]
block1["inflate"] = -0.03
block1uv=dict(uv)
block1uv["east"]["uv_size"] = [1, 0.5]
block1uv["west"]["uv_size"] = [1, 0.5]
block1uv["north"]["uv_size"] = [1, 0.5]
block1uv["south"]["uv_size"] = [1, 0.5]
block1["uv"]=block1uv
if rot == 0:
rotation = [0, 90, 0]
elif rot == 1:
rotation = [0, -90, 0]
elif rot == 2:
rotation = [0, 180, 0]
elif rot == 3:
rotation = [0, 0, 0]
block2={}
block2["origin"] = [-1*(x + self.offsets[0]), y + 7/16 - offset + self.offsets[1], z + self.offsets[2]]
block2["size"] = [1, 0.5, 0.5]
block2["rotation"]=rotation
block2["pivot"]=[-1*(x + self.offsets[0]) + 0.5, y + 0.5 - offset + self.offsets[1], z + 0.5 + self.offsets[2]]
block2["inflate"] = -0.03
block2uv=dict(uv)
block2uv["east"]["uv_size"] = [1, 1]
block2uv["west"]["uv_size"] = [1, 1]
block2uv["north"]["uv_size"] = [1, 1]
block2uv["south"]["uv_size"] = [1, 1]
block2uv["up"]["uv_size"] = [1, 1]
block2uv["down"]["uv_size"] = [1, 1]
block2["uv"]=block2uv
self.blocks[block_name] = {}
self.blocks[block_name]["name"] = block_name
self.blocks[block_name]["parent"] = "layer_{}".format(y)
self.blocks[block_name]["pivot"] = [0.5, 0.5, 0.5]
self.blocks[block_name]["cubes"] = [block1]
self.blocks[block_name]["cubes"].append(block2)
def save_uv(self, name):
# saves the texture file where you tell it to
im = Image.fromarray(self.uv_array)
im.save(name)
def stand_init(self):
# helper function to initialize the dictionary that will be exported as the json object
self.stand["format_version"] = "1.12.0"
self.geometry["description"] = {
"identifier": "geometry.armor_stand.ghost_blocks_{}".format(self.name)}
self.geometry["description"]["texture_width"] = 1
self.geometry["description"]["visible_bounds_offset"] = [
0.0, 1.5, 0.0]
# Changed render distance of the block geometry
self.geometry["description"]["visible_bounds_width"] = 5120
# Changed render distance of the block geometry
self.geometry["description"]["visible_bounds_height"] = 5120
self.geometry["bones"] = []
self.stand["minecraft:geometry"] = [self.geometry]
self.geometry["bones"] = [
{"name": "ghost_blocks",
"pivot": [-8, 0, 8]}]
def extend_uv_image(self, new_image_filename):
# helper function that just appends to the uv array to make things
image = Image.open(new_image_filename)
impt = np.array(image)
shape=list(impt.shape)
if shape[0]>16:
shape[0]=16
impt=impt[0:16,:,:]
if shape[1]>16:
shape[1]=16
impt=impt[:,0:16,:]
image_array = np.ones([16, 16, 4],np.uint8)*255
image_array[0:shape[0], 0:shape[1], 0:impt.shape[2]] = impt
image_array[:, :, 3] = image_array[:, :, 3] * self.alpha
if type(self.uv_array) is type(None):
self.uv_array = image_array
else:
startshape = list(self.uv_array.shape)
endshape = startshape.copy()
endshape[0] += image_array.shape[0]
temp_new = np.zeros(endshape, np.uint8)
temp_new[0:startshape[0], :, :] = self.uv_array
temp_new[startshape[0]:, :, :] = image_array
self.uv_array = temp_new
def block_name_to_uv(self, block_name, variant = ""):
# helper function maps the the section of the uv file to the side of the block
temp_uv = {}
if block_name not in self.excluded: # if you dont want a block to be rendered, exclude the UV
texture_files = self.get_block_texture_paths(block_name, variant = variant)
if block_name == "sticky_piston":
texture_files["up"] = "textures/blocks/piston_top_sticky"
if block_name == "piston":
texture_files["up"] = "textures/blocks/piston_top_normal"
for key in texture_files.keys():
if texture_files[key] not in self.uv_map.keys():
self.extend_uv_image(
"{}/{}.png".format(self.ref_resource_pack, texture_files[key]))
self.uv_map[texture_files[key]] = len(self.uv_map.keys())
temp_uv[key] = {
"uv": [0, self.uv_map[texture_files[key]]], "uv_size": [1, 1]}
return temp_uv
def add_blocks_to_bones(self):
# helper function for adding all of the bars, this is called during the writing step
for key in self.blocks.keys():
self.geometry["bones"].append(self.blocks[key])
def get_block_texture_paths(self, blockName, variant = ""):
# helper function for getting the texture locations from the vanilla files.
textureLayout = self.blocks_def[blockName]["textures"]
texturedata = self.terrain_texture["texture_data"]
textures = {}
if type(textureLayout) is dict:
if "side" in textureLayout.keys():
textures["east"] = textureLayout["side"]
textures["west"] = textureLayout["side"]
textures["north"] = textureLayout["side"]
textures["south"] = textureLayout["side"]
if "east" in textureLayout.keys():
textures["east"] = textureLayout["east"]
if "west" in textureLayout.keys():
textures["west"] = textureLayout["west"]
if "north" in textureLayout.keys():
textures["north"] = textureLayout["north"]
if "south" in textureLayout.keys():
textures["south"] = textureLayout["south"]
if "down" in textureLayout.keys():
textures["down"] = textureLayout["down"]
if "up" in textureLayout.keys():
textures["up"] = textureLayout["up"]
elif type(textureLayout) is str:
textures["east"] = textureLayout
textures["west"] = textureLayout
textures["north"] = textureLayout
textures["south"] = textureLayout
textures["up"] = textureLayout
textures["down"] = textureLayout
for key in textures.keys():
if type(texturedata[textures[key]]["textures"]) is str:
textures[key] = texturedata[textures[key]]["textures"]
elif type(texturedata[textures[key]]["textures"]) is list:
index=0
if variant[0] in self.block_variants.keys():
index=self.block_variants[variant[0]][variant[1] ]
textures[key] = texturedata[textures[key]]["textures"][index]
return textures
|
{"hexsha": "cddf6168b552bc1ad6ff7632ad322fdd3c01e7a4", "size": 19849, "ext": "py", "lang": "Python", "max_stars_repo_path": "armor_stand_geo_class.py", "max_stars_repo_name": "AL-Hareth/Structura", "max_stars_repo_head_hexsha": "56b5ea24960fb908f16ef97e82f402fcaf3690f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-01T10:34:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-26T09:21:10.000Z", "max_issues_repo_path": "armor_stand_geo_class.py", "max_issues_repo_name": "AL-Hareth/Structura", "max_issues_repo_head_hexsha": "56b5ea24960fb908f16ef97e82f402fcaf3690f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "armor_stand_geo_class.py", "max_forks_repo_name": "AL-Hareth/Structura", "max_forks_repo_head_hexsha": "56b5ea24960fb908f16ef97e82f402fcaf3690f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6225, "max_line_length": 181, "alphanum_fraction": 0.5299007507, "include": true, "reason": "import numpy", "num_tokens": 5412}
|
```python
%matplotlib inline
```
<style type="text/css">
.reveal h1, .reveal h2 {
font-family:"League Gothic"
}
</style>
Hello everyone! My name is Amit Saha and today I am here to talk to about "Doing Math with Python".
Thank you for coming to my talk - i know you could have chosen the other talk, so it's good to know that my talk's topic interests you.
A bit about me - I am a software engineer at Freelancer.com in Sydney Australia. I am a fairly regular writer for Linux Voice and other Linux magazines. And last, but not the least, I am the author of the book "Doing Math with Python" (not coincidentally titled the same as this talk - haha) published by No Starch Press in 2015.
There is a link to my blog, GitHub, twitter, etc. so, if you want to learn more about my work or get in touch, those are the means to do so!
Okay, so all that aside - let's start with the talk!
# <center> My first lab </center>
### <center> Kirstie Whitaker </center>
#### <center> June 15th 2017 </center>
#### <center> Cape Town, SA </center>
## About me
- 2016/17 Mozilla Fellow for Science
- Research Fellow at the Alan Turing Institute for Data Science, London
- Research Associate in the Department of Psychiatry at the University of Cambridge
### Contact
- Twitter: [@kirstie_j](http://twitter.com/kirstie_j)
- Email: [kw401@cam.ac.uk](mailto:kw401@cam.ac.uk)
- GitHub: [KirstieJane](http://github.com/KirstieJane)
<a href="http://hakim.se" data-preview-link>Hakim El Hattab</a>
Hello!
So, what am I selling to you today? Not my book (I am, but in a subtle way). I am presenting an idea, a hypothesis or even making a statement - Python can lead to a more enriching learning and teaching experience in the classroom.
Let me explain where I am coming from. When I think back about when I was learning to program and learning all other subjects in standards 7-10. I think it's true today as well. Programming and other subjects such as Math, Science are taught in a disconnected fashion. Programming seems to be all about finding the sum of a series or generating fibonacci numbers. Make no mistake, these exercises are what builds up the programming logic. Some students get really excited about being able to do these, but a lot of them don't. It's a lot like not everyone gets interested in solving puzzles - i don't, i never took to them.
I think I know of a way we could excite more students! Show them how you can write programs to do your homework, or experiment without having to go the science lab or setup elaborate experimental setups. This is my goal for today - in the following slides and notebooks, I will hypothesise on a way of connecting Python programming and other subjects. That will show that programming is a way to get real work done, not something to learn for the sake of it.
We need some tools to help us on our quest. The Python community has some giant shoulders we can stand upon - Python 3, SymPy and matplotlib.
### This talk - a proposal, a hypothesis, a statement
What? *Python can lead to a more enriching learning and teaching experience in the classroom*
How? *Next slides*
### Tools (or, Giant shoulders we will stand on)
</img>
*Python 3*, *SymPy*, *matplotlib*
*Individual logos are copyright of the respective projects. [Source](http://www.orphancaremovement.org/standing-on-the-shoulder-of-giants/) of the "giant shoulders" image.
Whose calculator looks like this?
Who uses Python as a calculator? Raise of hands please!
I do! Specifically, I use Python 3 because of 1/2=0 messes up my monthly expenditure calculation.
Besides the usual addition and subtraction, we have of course the math module and more recently the statistics module which makes Python a worthy scientific calculator.
But then, there's more! You are not limited to the functions from those libraries, you can write your own custom functions and make them available whenever you start your Python interpreter. How?
Use PYTHONSTARTUP!
### Python - a scientific calculator
Whose calculator looks like this?
```python
>>> (131 + 21.5 + 100.2 + 88.7 + 99.5 + 100.5 + 200.5)/4
185.475
```
*Python 3 is my favorite calculator (not Python 2 because 1/2 = 0)*
Beyond basic operations:
- `fabs()`, `abs()`, `sin()`, `cos()`, `gcd()`, `log()` and more (See [math](https://docs.python.org/3/library/math.html))
- Descriptive statistics (See [statistics](https://docs.python.org/3/library/statistics.html#module-statistics))
### Python - a scientific calculator
- Develop your own functions: unit conversion, finding correlation, .., anything really
- Use PYTHONSTARTUP to extend the battery of readily available mathematical functions
```python
$ PYTHONSTARTUP=~/work/dmwp/pycon-us-2016/startup_math.py idle3 -s
```
### Unit conversion functions
```python
>>> unit_conversion()
1. Kilometers to Miles
2. Miles to Kilometers
3. Kilograms to Pounds
4. Pounds to Kilograms
5. Celsius to Fahrenheit
6. Fahrenheit to Celsius
Which conversion would you like to do? 6
Enter temperature in fahrenheit: 98
Temperature in celsius: 36.66666666666667
>>>
```
### Finding linear correlation
```python
>>>
>>> x = [1, 2, 3, 4]
>>> y = [2, 4, 6.1, 7.9]
>>> find_corr_x_y(x, y)
0.9995411791453812
```
So, that was Python and it's standard libraries. When you bring in third party libraries to the mix, Python becomes a seriously fancy calculator.
Who has heard about SymPy?
You can give it algebraic expressions to a function and a graph will be created for you.
You can give an equation and out comes the solutions for that equation.
We can even solve calculus problems.
### Python - a really fancy calculator
SymPy - a pure Python symbolic math library
*from sympy import awesomeness* - don't try that :)
```python
# Create graphs from algebraic expressions
from sympy import Symbol, plot
x = Symbol('x')
p = plot(2*x**2 + 2*x + 2)
```
```python
# Solve equations
from sympy import solve, Symbol
x = Symbol('x')
solve(2*x + 1)
```
[-1/2]
```python
# Limits
from sympy import Symbol, Limit, sin
x = Symbol('x')
Limit(sin(x)/x, x, 0).doit()
```
1
```python
# Derivative
from sympy import Symbol, Derivative, sin, init_printing
x = Symbol('x')
init_printing()
Derivative(sin(x)**(2*x+1), x).doit()
```
```python
# Indefinite integral
from sympy import Symbol, Integral, sqrt, sin, init_printing
x = Symbol('x')
init_printing()
Integral(sqrt(x)).doit()
```
```python
# Definite integral
from sympy import Symbol, Integral, sqrt
x = Symbol('x')
Integral(sqrt(x), (x, 0, 2)).doit()
```
I will pause for a moment now. In the first two slides, we have seen how Python can be a super awesome calculator. What does that buy us? We have now been able to show that you can make computer programs literally do your homework. Write a program to do your work once and you will never have to make those lengthy calculations yourselves. Can we use Python to do more?
Let's continue.
<center><h1>Can we do more than write smart calculators?</h1></center>
Python can be more than a super powerful calculator. We can use it to enhance the learning experience of other subjects. Next, I have three examples including a demo. First up, a video of a projectile motion. This program uses matplotlib's animation API to create a basic animation of a projectile motion - a fairly common subject introduced in introductory Physics. The program which is linked asks for the angle of projection and speed and then draws the trajectory of the projectile. Just by running the program multiple times, we can see how the trajectory changes. We don't have to go outside and start throwing balls..
Next, we will put Jupyter Notebook's interactive widgets to good effect by drawing a Barnsley Fern. Let's see how the demo goes.
Next, with the help of basemap, we can draw places on a world map like we would draw points on a graph paper.
I know I would be excited if someone was showing me all these cool things when I was learning these things!
### Python - Making other subjects more lively
</img>
- matplotlib
- basemap
- Interactive Jupyter Notebooks
#### Bringing Science to life
*Animation of a Projectile motion* [(Python Source)](https://github.com/doingmathwithpython/pycon-us-2016/blob/master/py-files/projectile_animation.py)
```python
from IPython.display import YouTubeVideo
YouTubeVideo("8uWRVh58KdQ")
```
#### Exploring Fractals in Nature
*Interactively drawing a Barnsley Fern* [(Notebook)](https://github.com/doingmathwithpython/pycon-us-2016/blob/master/notebooks/Interactive%20Barnsley%20Fern.ipynb)
</img>
#### The world is your graph paper
*Showing places on a digital map* [(Notebook)](https://github.com/doingmathwithpython/pycon-us-2016/blob/master/notebooks/Maps%20using%20Basemap%20-%20demo.ipynb)
Next, I would like to talk about my book "Doing Math with Python". My idea was attractive enough to get it published by No Starch Press which makes me hope that I am probably onto something.
Has anybody read my book? What do you think of it? You have read it and came to my talk? I am feeling better :)
I discuss all of the topics I discuss today in my talk. In addition, I discuss sets, probability and random numbers and descriptive statistics.
It's being translated into several non-English languages.
The reviews/feedback so far has been really positive. I don't have any first hand involvement in teaching, so it's very appreciative of people to share their viewpoints with me.
### Book: Doing Math With Python
</img>
Overview
- All of what I have discussed so far
- In addition: Descriptive statistics, Sets and Probability, Random numbers
Published by [No Starch Press](https://www.nostarch.com/doingmathwithpython) in August, 2015.
*Upcoming/In-progress translations*: Simplified Chinese, Japanese, French and Korean.
#### Comments
> Saha does an excellent job providing a clear link between Python and upper-level math concepts, and demonstrates how Python can be transformed into a mathematical stage. This book deserves a spot on every geometry teacher’s bookshelf.
[School Library Journal](http://www.slj.com/2016/05/collection-development/read-watch-alikes/coding-lets-begin/#_)
> Outstanding guide to using Python to do maths. Working back through my undergrad maths using Python.
> Saha does an excellent job providing a clear link between Python and upper-level math concepts, and demonstrates how Python can be transformed into a mathematical stage.
> This book is highly recommended for the high school or college student and anyone who is looking for a more natural way of programming math and scientific functions
> As a teacher I highly recommend this book as a way to work with someone in learning both math and programming
Okay, so that's great. We have successfully used Python to make the learning experience of young learners more fun and immediately applicable. Can we derive more benefit from doing that? Like something for the future? We all love doing things for the future, don't we?
I think yes, i think if we teach young learners the things we have discussed today, it is a great base for someone wanting to go into data science or machine learning.
Statistics and visualising data are two very key factors of data science.
Differential calculus and specifically the gradient descent method is a simple but useful optimization method used in Machine Learning. Let's see a demo of using gradient descent to find the minimum value of a function.
Now, let's apply gradient descent as an optimizer in a Linear Regression problem.
### Great base for the future
*Statistics and Graphing data* -> *Data Science*
*Differential Calculus* -> *Machine learning*
### Application of differentiation
Use gradient descent to find a function's minimum value [(Notebook)](https://github.com/doingmathwithpython/pycon-us-2016/blob/master/notebooks/Gradient%20Descent.ipynb)
### Predict the college admission score based on high school math score
Use gradient descent as the optimizer for single variable linear regression model [(Notebook)](https://github.com/doingmathwithpython/pycon-us-2016/blob/master/notebooks/Simple%20Linear%20Regression.ipynb)
### Advanced libraries
- [scipy](https://scipy.org)
- [numpy](http://www.numpy.org/)
- [scikit-learn](http://scikit-learn.org/stable/)
- [pandas](http://pandas.pydata.org/)
- [Statsmodels](http://statsmodels.sourceforge.net/)
### Dialogue
Questions, Thoughts, comments, discussions?
#### Online
- Twitter: @echorand
- Email: amitsaha.in@gmail.com
### PyCon Special!
*Use PYCONMATH code to get 30% off "Doing Math with Python" from [No Starch Press](https://www.nostarch.com/doingmathwithpython)*
</img>
(Valid from May 26th - June 8th)
Book Signing - May 31st - 2.00 PM - No Starch Press booth
### Acknowledgements
PyCon US Education Summit team for inviting me
Thanks to PyCon US for reduced registration rates
Massive thanks to my employer, Freelancer.com for sponsoring my travel and stay
### Links
- [Upcoming O'Reilly Webcast](http://www.oreilly.com/pub/e/3712)
- [Doing Math with Python](https://nostarch.com/doingmathwithpython)
- [Doing Math with Python Blog](https://doingmathwithpython.github.io)
- [Doing Math with Python on GitHub](https://github.com/doingmathwithpython)
|
{"hexsha": "7760f707bbfc05213b896561a6e4f7776f2c359d", "size": 52447, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "slides.ipynb", "max_stars_repo_name": "WhitakerLab/slides", "max_stars_repo_head_hexsha": "a1b36f4114ddb1d330795cd9b0967d476c493df2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slides.ipynb", "max_issues_repo_name": "WhitakerLab/slides", "max_issues_repo_head_hexsha": "a1b36f4114ddb1d330795cd9b0967d476c493df2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slides.ipynb", "max_forks_repo_name": "WhitakerLab/slides", "max_forks_repo_head_hexsha": "a1b36f4114ddb1d330795cd9b0967d476c493df2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.1377912867, "max_line_length": 13386, "alphanum_fraction": 0.7560584971, "converted": true, "num_tokens": 3221}
|
[STATEMENT]
lemma keys_empty [simp]: "keys DAList.empty = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. keys DAList.empty = {}
[PROOF STEP]
by transfer simp
|
{"llama_tokens": 64, "file": "Containers_AssocList", "length": 1}
|
C *********************************************************
C * *
C * TEST NUMBER: 09.01.03.02/12 *
C * TEST TITLE : Error 113 handled by system *
C * *
C * PHIGS Validation Tests, produced by NIST *
C * *
C *********************************************************
CALL INITGL ('09.01.03.02/12')
CALL E03113 (2)
CALL ENDIT
END
|
{"hexsha": "7f327af6c022afd675f0810428e079189c23f1a1", "size": 578, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "third_party/Phigs/PVT/PVT_fort/09/01/03/02/p12.f", "max_stars_repo_name": "n1ckfg/Telidon", "max_stars_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2017-07-08T02:34:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T03:42:48.000Z", "max_issues_repo_path": "third_party/Phigs/PVT/PVT_fort/09/01/03/02/p12.f", "max_issues_repo_name": "n1ckfg/Telidon", "max_issues_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/Phigs/PVT/PVT_fort/09/01/03/02/p12.f", "max_forks_repo_name": "n1ckfg/Telidon", "max_forks_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-02-03T04:44:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-05T15:31:18.000Z", "avg_line_length": 38.5333333333, "max_line_length": 60, "alphanum_fraction": 0.2370242215, "num_tokens": 105}
|
import argparse
import copy
from io import UnsupportedOperation
import json
import os
import sys
import os.path as osp
from collections import OrderedDict
try:
import apex
except:
print("No APEX!")
import numpy as np
import torch
import yaml
from det3d import __version__, torchie
from det3d.datasets import build_dataloader, build_dataset
from det3d.models import build_detector
from det3d.torchie import Config
from det3d.torchie.apis import (
batch_processor,
build_optimizer,
get_root_logger,
init_dist,
set_random_seed,
train_detector,
)
from det3d.torchie.trainer import get_dist_info, load_checkpoint
from det3d.torchie.trainer.utils import all_gather, synchronize
from torch.nn.parallel import DistributedDataParallel
import pickle
import time
def convert_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict into a module
"""
unexpected_keys = []
shape_mismatch_pairs = []
own_state = module.state_dict()
for name, param in state_dict.items():
# a hacky fixed to load a new voxelnet
if name not in own_state:
if name[:20] == 'backbone.middle_conv':
index = int(name[20:].split('.')[1])
if index in [0, 1, 2]:
new_name = 'backbone.conv_input.{}.{}'.format(str(index), name[23:])
elif index in [3, 4]:
new_name = 'backbone.conv1.{}.{}'.format(str(index-3), name[23:])
elif index in [5, 6, 7, 8, 9]:
new_name = 'backbone.conv2.{}.{}'.format(str(index-5), name[23:])
elif index in [10, 11, 12, 13, 14]:
new_name = 'backbone.conv3.{}.{}'.format(str(index-10), name[24:])
elif index in [15, 16, 17, 18, 19]:
new_name = 'backbone.conv4.{}.{}'.format(str(index-15), name[24:])
elif index in [20, 21, 22]:
new_name = 'backbone.extra_conv.{}.{}'.format(str(index-20), name[24:])
else:
raise NotImplementedError(index)
if param.size() != own_state[new_name].size():
shape_mismatch_pairs.append([name, own_state[name].size(), param.size()])
continue
own_state[new_name].copy_(param)
print("load {}'s param from {}".format(new_name, name))
continue
unexpected_keys.append(name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
if param.size() != own_state[name].size():
shape_mismatch_pairs.append([name, own_state[name].size(), param.size()])
continue
own_state[name].copy_(param)
all_missing_keys = set(own_state.keys()) - set(state_dict.keys())
# ignore "num_batches_tracked" of BN layers
missing_keys = [key for key in all_missing_keys if "num_batches_tracked" not in key]
err_msg = []
if unexpected_keys:
err_msg.append(
"unexpected key in source state_dict: {}\n".format(
", ".join(unexpected_keys)
)
)
if missing_keys:
err_msg.append(
"missing keys in source state_dict: {}\n".format(", ".join(missing_keys))
)
if shape_mismatch_pairs:
mismatch_info = "these keys have mismatched shape:\n"
header = ["key", "expected shape", "loaded shape"]
table_data = [header] + shape_mismatch_pairs
table = AsciiTable(table_data)
err_msg.append(mismatch_info + table.table)
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(0, "The model and loaded state dict do not match exactly\n")
err_msg = "\n".join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def parse_args():
parser = argparse.ArgumentParser(description="Train a detector")
parser.add_argument("config", help="train config file path")
parser.add_argument("--work_dir", help="the dir to save logs and models")
parser.add_argument(
"--checkpoint", help="the dir to checkpoint which the model read from"
)
args = parser.parse_args()
return args
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
def save_checkpoint(model, filename, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError("meta must be a dict or None, but got {}".format(type(meta)))
torchie.mkdir_or_exist(osp.dirname(filename))
if hasattr(model, "module"):
model = model.module
checkpoint = {"meta": meta, "state_dict": weights_to_cpu(model.state_dict())}
torch.save(checkpoint, filename)
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['state_dict']
if list(state_dict.keys())[0].startswith("module."):
state_dict = {k[7:]: v for k, v in checkpoint["state_dict"].items()}
convert_state_dict(model, state_dict)
save_checkpoint(model, osp.join(args.work_dir, 'voxelnet_converted.pth'))
main()
|
{"hexsha": "9b56fbceb8765892f3b382598c59758685a359bf", "size": 6303, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/convert_voxelnet.py", "max_stars_repo_name": "alsun-oven/CenterPoint", "max_stars_repo_head_hexsha": "cafd89c4008270e648e97202bc256aff968e8109", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1124, "max_stars_repo_stars_event_min_datetime": "2020-06-22T00:48:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:03:35.000Z", "max_issues_repo_path": "tools/convert_voxelnet.py", "max_issues_repo_name": "alsun-oven/CenterPoint", "max_issues_repo_head_hexsha": "cafd89c4008270e648e97202bc256aff968e8109", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 290, "max_issues_repo_issues_event_min_datetime": "2020-06-23T01:29:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:27:32.000Z", "max_forks_repo_path": "tools/convert_voxelnet.py", "max_forks_repo_name": "alsun-oven/CenterPoint", "max_forks_repo_head_hexsha": "cafd89c4008270e648e97202bc256aff968e8109", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 326, "max_forks_repo_forks_event_min_datetime": "2020-06-22T01:48:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:15:08.000Z", "avg_line_length": 33.8870967742, "max_line_length": 93, "alphanum_fraction": 0.6250991591, "include": true, "reason": "import numpy", "num_tokens": 1453}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import numpy as np
from scipy.special import erf
from ReplicatedFocusingBeliefPropagation import atanherf
__author__ = ["Nico Curti", "Daniele Dall'Olio"]
__email__ = ['nico.curti2@unibo.it', 'daniele.dallolio@studio.unibo.it']
def test_atanherf ():
'''
Tests:
- if approximated formula is "equal" to the std one for large x
'''
np.random.seed(42)
for _ in range(10):
x = np.random.uniform(low=4, high=5)
assert np.isclose(np.arctanh(erf(x)), atanherf(x), rtol=1e-4, atol=1e-4)
for _ in range(10):
x = np.random.uniform(low=0., high=1.)
assert np.isclose(np.arctanh(erf(x)), atanherf(x), rtol=1e-4, atol=1e-4)
|
{"hexsha": "dc97091f331e610e6c4413d335eee579910cffe0", "size": 772, "ext": "py", "lang": "Python", "max_stars_repo_path": "ReplicatedFocusingBeliefPropagation/rfbp/test/test_atanherf.py", "max_stars_repo_name": "DanielLenz/rFBP", "max_stars_repo_head_hexsha": "fc8ae71a8ff58858f6800eeb3a3f25a56c143d18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-12-03T17:45:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T15:46:41.000Z", "max_issues_repo_path": "ReplicatedFocusingBeliefPropagation/rfbp/test/test_atanherf.py", "max_issues_repo_name": "DanielLenz/rFBP", "max_issues_repo_head_hexsha": "fc8ae71a8ff58858f6800eeb3a3f25a56c143d18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-09-28T06:57:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-22T05:41:12.000Z", "max_forks_repo_path": "ReplicatedFocusingBeliefPropagation/rfbp/test/test_atanherf.py", "max_forks_repo_name": "DanielLenz/rFBP", "max_forks_repo_head_hexsha": "fc8ae71a8ff58858f6800eeb3a3f25a56c143d18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-11T08:59:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-11T08:59:41.000Z", "avg_line_length": 23.3939393939, "max_line_length": 76, "alphanum_fraction": 0.6852331606, "include": true, "reason": "import numpy,from scipy", "num_tokens": 253}
|
import copy
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mplt
from matplotlib import pyplot as plt
from labellines import labelLines
from disease_spread_model.config import Directories
from disease_spread_model.names import TRANSLATE
from model_runs import RunModel, HInfProb
class Plotter(ABC):
def __init__(self):
pass
@abstractmethod
def plot(self) -> None:
pass
class StabilityPlotter(Plotter):
"""PLots avg result from csvs in `Logger` with the same `run_purpose`."""
params_changing_pop_size = {'grid_size', 'N', 'customers_in_household'}
def __init__(self, run_purpose: str):
super().__init__()
self.run_purpose = run_purpose
self.important_fixed_params = self._get_important_fixed_params()
self.variable = self._get_variable_param()
self.plot_values_in_percent = \
self.variable in self.params_changing_pop_size
def _get_sub_df_from_csv(self) -> pd.DataFrame:
"""Returns `DataFrame` with given `run_purpose`."""
df = pd.read_csv(Directories.LOGGING_MODEL_RUNS_FNAME)
return df[df['run_purpose'] == self.run_purpose]
def _get_important_fixed_params(self) -> dict:
"""Return `{param_name: value}` for most important params."""
important_params = {
'beta', 'mortality', 'visibility', 'N', 'grid_size',
'infected_cashiers_at_start',
'percent_of_infected_customers_at_start'
}
sub_df = self._get_sub_df_from_csv()
first_row = sub_df.iloc[0]
all_params = first_row.to_dict()
return {k: v for k, v in all_params.items() if k in important_params}
def _get_variable_param(self) -> str:
"""Return `variable_param` based on `run_purpose` and remove
it from `self.important_fixed_params`."""
variable_param = None
for param in self.important_fixed_params:
if param in self.run_purpose:
variable_param = param
if variable_param:
self.important_fixed_params.pop(variable_param)
return variable_param
else:
raise ValueError("Can't find `variable_param` in `run_purpose`!")
@staticmethod
def _get_pop_size(ser: pd.Series) -> int:
"""Return population size (num of all customers on the grid.)"""
N = ser['N']
grid_size = eval(ser['grid_size'])
customers_in_household = ser['customers_in_household']
return N * grid_size[0] * grid_size[1] * customers_in_household
def _get_fdirs(self) -> list[str]:
"""Returns fnames (full path) with given `run_purpose`."""
return list(self._get_sub_df_from_csv()['avg fname'])
@staticmethod
def _create_empty_fig_and_ax() -> (mplt.figure.Figure, mplt.axes.Axes):
sns.set_style("ticks")
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
sns.despine(fig, ax)
return fig, ax
@staticmethod
def _format_values_of_params(params: dict) -> dict[str, str]:
"""
Return new dict with nicely formatted (ready to put on the plot)
values of `params`.
`Params` names passed to this function must be
in long english form.
"""
in_params = copy.deepcopy(params)
# Format those params, other params return unchanged.
percent_0_digits = {'visibility'}.intersection(in_params)
percent_2_digits = {'beta', 'mortality'}.intersection(in_params)
float_0_digits = {
'N', 'infected_cashiers_at_start'}.intersection(in_params)
float_3_digits = {
'percent_of_infected_customers_at_start'}.intersection(in_params)
pct_0 = {k: f"{in_params[k] * 100:.0f}%" for k in percent_0_digits}
pct_2 = {k: f"{in_params[k] * 100:.2f}%" for k in percent_2_digits}
float_0 = {k: f"{in_params[k]:.0f}" for k in float_0_digits}
float_3 = {k: f"{in_params[k]:.3f}" for k in float_3_digits}
return in_params | pct_0 | pct_2 | float_0 | float_3
def _label_axis_and_titles(self,
fig: mplt.figure.Figure,
ax: mplt.axes.Axes) -> None:
"""Automatically label axis and `ax` and `fig` title."""
fig.suptitle(
"Czułość sumy zgonów na " + TRANSLATE.to_polish(self.variable))
ax.set_xlabel('t, dni')
ax.set_ylabel(
'Suma zgonów (% populacji)' if self.plot_values_in_percent
else 'Suma zgonów')
fixed_params_formatted = self._format_values_of_params(
self.important_fixed_params)
fixed_params_in_polish = TRANSLATE.to_polish(fixed_params_formatted)
ax_title = ''
for i, (k, v) in enumerate(fixed_params_in_polish.items()):
ax_title += f'{k}={v}'
ax_title += '\n' if (i + 1) % 4 == 0 else ' '
ax.set_title(ax_title)
def plot(self):
"""Plot death toll. One line for each avg_df."""
fig, ax = self._create_empty_fig_and_ax()
for _, df_row in self._get_sub_df_from_csv().iterrows():
avg_df = pd.read_csv(df_row['avg fname'])
death_toll = avg_df['Dead people']
param_value = df_row[self.variable]
if self.plot_values_in_percent:
death_toll /= self._get_pop_size(df_row)
death_toll *= 100
label = self._format_values_of_params(
{self.variable: param_value})[self.variable]
ax.plot(death_toll, label=label)
ax.legend(title=TRANSLATE.to_polish(self.variable))
labelLines(ax.get_lines(), zorder=2)
self._label_axis_and_titles(fig, ax)
plt.show()
class Tester(ABC):
def __init__(self):
self.model_fixed_params = {
'grid_size': (30, 30),
'N': 800,
'customers_in_household': 3,
'beta': 2.0 / 100,
'mortality': 2.0 / 100,
'visibility': 65 / 100,
'infected_cashiers_at_start': 30,
'percent_of_infected_customers_at_start': 0,
'housemate_infection_probability': HInfProb.BY_BETA,
'extra_shopping_boolean': True,
'max_steps': 150,
}
self.model_sweep_params = {
'beta': np.array([2 / 100]),
'mortality': np.array([2 / 100]),
'visibility': np.array([65 / 100]),
}
self.tester_params = {
'make_log': True,
'iterations': 12,
'run_purpose': 'not specified',
}
@abstractmethod
def _set_run_purpose(self) -> None:
pass
@property
def run_purpose(self) -> str:
return self.tester_params['run_purpose']
@abstractmethod
def run(self) -> None:
pass
class StabilityTester(Tester, ABC):
def __init__(self, param_range: np.ndarray):
super().__init__()
self._apply_param_range(param_range)
@abstractmethod
def _apply_param_range(self, param_range: np.ndarray) -> None:
pass
def run(self) -> None:
"""Make simulations and plot Results"""
self._set_run_purpose()
RunModel.run_simulation_to_test_sth(
**self.model_fixed_params,
sweep_params=self.model_sweep_params,
**self.tester_params,
)
StabilityPlotter(self.run_purpose).plot()
class BetaStabilityTester(StabilityTester):
def __init__(self, param_range: np.ndarray):
super().__init__(param_range)
def _set_run_purpose(self) -> None:
self.tester_params['run_purpose'] = 'test beta stability'
def _apply_param_range(self, param_range: np.ndarray) -> None:
self.model_sweep_params['beta'] = param_range
class MortalityStabilityTester(StabilityTester):
def __init__(self, param_range: np.ndarray):
super().__init__(param_range)
def _set_run_purpose(self) -> None:
self.tester_params['run_purpose'] = 'test mortality stability'
def _apply_param_range(self, param_range: np.ndarray) -> None:
self.model_sweep_params['mortality'] = param_range
class NStabilityTester(StabilityTester):
def __init__(self, param_range: np.ndarray):
super().__init__(param_range)
def _set_run_purpose(self) -> None:
self.tester_params['run_purpose'] = 'test N stability'
def _apply_param_range(self, param_range: np.ndarray) -> None:
self.model_sweep_params['N'] = param_range
class GridSizeStabilityTester(StabilityTester):
def __init__(self,
param_range: np.ndarray,
infected_cashiers_at_start: np.ndarray):
super().__init__(param_range)
self.nums_of_cashiers = infected_cashiers_at_start
self.grid_sizes = param_range
def _set_run_purpose(self) -> None:
self.tester_params['run_purpose'] = 'test grid_size stability'
def _apply_param_range(self, param_range: np.ndarray) -> None:
self.model_sweep_params['grid_size'] = param_range
def run(self) -> None:
"""Make simulations and plot Results"""
self._set_run_purpose()
iterable = zip(self.grid_sizes, self.nums_of_cashiers)
for grid_size, num_of_cashiers in iterable:
self.model_sweep_params['grid_size'] = [tuple(grid_size)]
self.model_sweep_params['infected_cashiers_at_start'] = [
num_of_cashiers]
RunModel.run_simulation_to_test_sth(
**self.model_fixed_params,
sweep_params=self.model_sweep_params,
**self.tester_params,
)
StabilityPlotter(self.run_purpose).plot()
def run_stability_testers() -> None:
param_range = np.linspace(2., 3.6, 1) / 100
# param_range = np.array([100, 200, 500, 800, 1000])
param_range = np.array([[i, i] for i in range(3, 40, 3)])
init_cashiers = (np.arange(3, 40, 3) / 3).astype(int)
# print(f"{param_range = }")
# BetaStabilityTester(param_range).run()
# MortalityStabilityTester(param_range).run()
# NStabilityTester(param_range).run()
GridSizeStabilityTester(param_range, init_cashiers).run()
def run_stability_plotter() -> None:
# stability_plotter = StabilityPlotter('test N stability')
StabilityPlotter('test beta stability').plot()
def main() -> None:
run_stability_testers()
# run_stability_plotter()
if __name__ == '__main__':
main()
|
{"hexsha": "6127382b480128ffd4659513bd567107c3274522", "size": 10583, "ext": "py", "lang": "Python", "max_stars_repo_path": "disease_spread_model/model/param_stability_tests.py", "max_stars_repo_name": "BartoszBiernacki/ABM", "max_stars_repo_head_hexsha": "6c993356aab86ce3d456e228c0a3aa41b2efa10c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "disease_spread_model/model/param_stability_tests.py", "max_issues_repo_name": "BartoszBiernacki/ABM", "max_issues_repo_head_hexsha": "6c993356aab86ce3d456e228c0a3aa41b2efa10c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "disease_spread_model/model/param_stability_tests.py", "max_forks_repo_name": "BartoszBiernacki/ABM", "max_forks_repo_head_hexsha": "6c993356aab86ce3d456e228c0a3aa41b2efa10c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5630769231, "max_line_length": 77, "alphanum_fraction": 0.6318624209, "include": true, "reason": "import numpy", "num_tokens": 2591}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Generate map plots.
Example::
$ python plot_maps.py -c : plot from files from combined
output file
$ python plot_maps.py -m max_id : plot from files with maximal subset
id of max_id
$ python plot_maps.py -h : display this help
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.colors as colors
from matplotlib.ticker import LogFormatter
import cartopy
import cartopy.crs as ccrs
# General plot settings.
cline_label_format_default = '%.1f'
n_fill_levels_default = 14
n_line_levels_default = 6
color_map = plt.get_cmap('YlOrRd') # 'coolwarm' 'RdBu' 'bwr' # plt.get_cmap('YlOrRd')
if __name__ == "__main__":
# TODO this has to be changed to work via class!
# from ..utils.convenience_utils import hour_to_date_str
from plotting_utils import read_dataset_user_input
# Load the processed data from the NetCDF files specified in the input.
nc = read_dataset_user_input()
# TODO remove - use config for this!
lons = nc['longitude'].values
lats = nc['latitude'].values
height_range_floor = 50.
height_range_ceilings = list(nc['height_range_ceiling'].values)
fixed_heights = list(nc['fixed_height'].values)
integration_range_ids = list(nc['integration_range_id'].values)
p_integral_mean = nc['p_integral_mean'].values
# Hours since 1900-01-01 00:00:00, see: print(nc['time'].values).
hours = nc['time'].values
# print("Analyzing " + hour_to_date_str(hours[0]) + " till "
# + hour_to_date_str(hours[-1]))
# TODO fix from config
else:
lons = list(np.arange(-20, 20.25, .25))
lats = list(np.arange(65, 29.75, -.25))
# #lons = np.arange(-12, -5.0, .25) # config.Data.all_lons
# #lats = np.arange(51, 56.25, .25) # config.Data.all_lats
# else:
# # TODO make more understandable
# # TODO make into utils -> use for map plots in production
# # TODO fix from config
# # Ireland
# # lons = list(np.arange(-12, -5.0, .25)) # -5.75, .25))
# # lats = list(np.arange(51, 56.25, .25))
# # Europe map
# lons = list(np.arange(-20, 20.25, .25))
# lats = list(np.arange(65, 29.75, -.25))
# Plotting map - region selection # TODO rework -> config
plot_northern_germany = False
label_cities = False
map_plot_aspect_ratio = 9 / 12.5 # len(lons)/len(lats) # TODO this makes sense - adapt fixed number later on -> adaptable
mrc = ccrs.Mercator()
def calc_fig_height(fig_width, subplot_shape, plot_frame_top,
plot_frame_bottom, plot_frame_left, plot_frame_right):
""""Calculate figure height, such that all maps have the same resolution.
Args:
fig_width (float): Figure width in inches.
subplot_shape (tuple of int): Containing number of rows and columns of
subplot.
plot_frame_top (float): Top of plot as a fraction of the figure window
height w.r.t. bottom.
plot_frame_bottom (float): Bottom of plot as a fraction of the figure
window height w.r.t. bottom.
plot_frame_left (float): Left side of plot as a fraction of the figure
window width w.r.t. left.
plot_frame_right (float): Right side of plot as a fraction of the
figure window width w.r.t. left.
Returns:
float: Figure height in inches.
"""
plot_frame_width = fig_width*(plot_frame_right - plot_frame_left)
plot_frame_height = plot_frame_width/(map_plot_aspect_ratio *
subplot_shape[1] / subplot_shape[0])
fig_height = plot_frame_height/(plot_frame_top - plot_frame_bottom)
return fig_height
def eval_contour_fill_levels(plot_items):
""""Evaluate the plot data, e.g. if values are within contour fill
levels limits.
Args:
plot_items (list of dict): List containing the plot property dicts.
"""
for i, item in enumerate(plot_items):
max_value = np.amax(item['data'])
min_value = np.amin(item['data'])
print("Max and min value of plot"
" {}: {:.3f} and {:.3f}".format(i, max_value, min_value))
if item['contour_fill_levels'][-1] < max_value:
print("Contour fills "
"(max={:.3f}) do not cover max value of plot {}"
.format(item['contour_fill_levels'][-1], i))
if item['contour_fill_levels'][0] > min_value:
print("Contour fills "
"(min={:.3f}) do not cover min value of plot {}"
.format(item['contour_fill_levels'][0], i))
def individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cline_label_format_default,
log_scale=False,
extend="neither",
overflow=None):
""""Individual plot of coastlines and contours.
Args:
z (ndarray): 2D array containing contour plot data.
cf_lvls (list): Contour fill levels.
cl_lvls (list): Contour line levels.
cline_label_format (str, optional): Contour line label format string.
Defaults to `cline_label_format_default`.
log_scale (bool): Logarithmic scaled contour levels are used if True,
linearly scaled if False.
extend (str): Setting for extension of contour fill levels.
Returns:
QuadContourSet: Contour fills object.
"""
# Care if colorbar ticks are set beforehand, see plot_single_map
# colors_undersea = plt.cm.terrain(np.linspace(0, 0.17, 56))
# colors_land = plt.cm.terrain(np.linspace(0.25, 1, 200))
# # combine them and build a new colormap
# colors_stack = np.vstack((colors_undersea, colors_land))
# color_map = colors.LinearSegmentedColormap.from_list('color_map',
# colors_stack)
color_map = plt.get_cmap('YlOrRd')
if overflow is not None:
n_normal = 224
n_over = 32
top_overflow = overflow
colors_underflow = []
underflow_bounds = []
min_val = np.min(z)
if isinstance(overflow, list):
top_overflow = overflow[1]
min_val = overflow[0]
n_over = int(n_over/2)
colors_underflow = list(plt.get_cmap('coolwarm')(
np.linspace(0, 0.21, n_over)))
underflow_bounds = list(np.linspace(np.min(z), min_val,
n_over+1))[:-1]
colors_normal = list(plt.get_cmap('YlOrRd')(
np.linspace(0, .9, n_normal)))
colors_overflow = list(
plt.get_cmap('Greens')(np.linspace(0.5, 1, n_over)))
all_colors = colors_underflow + colors_normal + colors_overflow
color_map = mpl.colors.LinearSegmentedColormap.from_list(
'color_map', all_colors)
normal_bounds = list(np.linspace(min_val,
top_overflow, n_normal+1))[:-1]
overflow_bounds = list(np.linspace(top_overflow,
np.max(z), n_over))
bounds = underflow_bounds + normal_bounds + overflow_bounds
norm = mpl.colors.BoundaryNorm(boundaries=bounds, ncolors=256)
elif log_scale:
norm = colors.LogNorm(vmin=cf_lvls[0], vmax=cf_lvls[-1])
else:
norm = None
if extend == 'neither':
# plot with appropriate parameters
# zorder: put the filled-contour below coastlines
contour_fills = plt.contourf(lons, lats, z, cf_lvls,
transform=cartopy.crs.PlateCarree(),
zorder=0.5,
cmap=color_map,
norm=norm)
else:
contour_fills = plt.contourf(lons, lats, z, cf_lvls,
transform=cartopy.crs.PlateCarree(),
zorder=0.5,
cmap=color_map,
norm=norm,
extend=extend)
contour_lines = plt.contour(lons, lats, z, cl_lvls, colors='0.1',
transform=cartopy.crs.PlateCarree(),
linewidths=1)
# Label levels with specially formatted floats
plt.rcParams['font.weight'] = 'bold'
plt.clabel(contour_lines, fmt=cline_label_format, inline=1, fontsize=9,
colors='k')
plt.rcParams['font.weight'] = 'normal'
if label_cities: # TODO remove/ better: test locations
HH = (53.551086, 9.993682)
Hannover = (52.373954, 9.741647)
Bremen = (53.075176, 8.801850)
city_labels = ['Hamburg', 'Hannover', 'Bremen']
x_cities, y_cities = plt([HH[1], Hannover[1], Bremen[1]],
[HH[0], Hannover[0], Bremen[0]])
plt.plot(x_cities, y_cities, 'o', color='darkslategrey',
markersize=4)
for label, xpt, ypt in zip(city_labels, x_cities, y_cities):
plt.text(xpt+0.5, ypt+0.01, label, color='darkslategrey',
fontsize=6)
return contour_fills
def plot_single_panel(plot_item, plot_title='',
overflow=None):
""""Plot panel with one individual plot.
Args:
plot_item (dict): Individual properties of the plots.
plot_title (string, optional): Title to be written above the plot.
"""
# Set up figure, calculate figure height corresponding to desired width.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .95, 0.15, 0., 1.
bottom_pos_colorbar = .09
fig_width = 3
if plot_title == '':
plot_frame_top = 1.
plot_frame_width = plot_frame_right - plot_frame_left
width_colorbar = plot_frame_width*0.9
fig_height = calc_fig_height(fig_width, (1, 1), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
# Plot the data.
# Mapping individual properties of the plots.
z = plot_item['data']
cf_lvls = plot_item['contour_fill_levels']
cl_lvls = plot_item['contour_line_levels']
cb_ticks = plot_item['colorbar_ticks']
cb_tick_fmt = plot_item['colorbar_tick_fmt']
apply_log_scale = plot_item.get('log_scale', False)
extend = plot_item.get('extend', "neither")
cl_label_fmt = plot_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
plt.title(plot_title)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt,
log_scale=apply_log_scale,
extend=extend,
overflow=overflow)
# Add axis for colorbar.
i = 0
left_pos_colorbar = plot_frame_width*i + \
(plot_frame_width-width_colorbar)/2 + plot_frame_left
cbar_ax = fig.add_axes([left_pos_colorbar, bottom_pos_colorbar,
width_colorbar, 0.035])
if apply_log_scale:
formatter = LogFormatter(10, labelOnlyBase=False)
else:
formatter = None
cbar = plt.colorbar(contour_fills, orientation="horizontal",
cax=cbar_ax, ticks=cb_ticks, format=formatter)
cbar.ax.set_xticklabels([cb_tick_fmt.format(t) for t in cb_ticks])
cbar.set_label(plot_item['colorbar_label'])
def plot_panel_1x3(plot_items, column_titles, row_item):
""""Plot panel with 3 columns of individual plots.
Args:
plot_items (list of dict): Individual properties of the plots.
column_titles (list): Plot titles per column.
row_item (dict): General properties of the plots.
"""
# Set up figure, calculate figure height corresponding to desired width.
bottom_pos_colorbar = .09
fig_width = 9.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .95, 0, .035, 0.88
fig_height = calc_fig_height(fig_width, (1, 3), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, axs = plt.subplots(1, 3, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
# Mapping general properties of the plots.
cf_lvls = row_item['contour_fill_levels']
cb_tick_fmt = row_item.get('colorbar_tick_fmt', "{:.1f}")
cl_label_fmt = row_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
# Plot the data.
for ax, title, plot_item in zip(axs, column_titles, plot_items):
# Mapping individual properties of the plots.
z = plot_item['data']
cl_lvls = plot_item['contour_line_levels']
plt.axes(ax)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
plt.title(title)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt)
# Add axis for colorbar.
height_colorbar = .85
bottom_pos_colorbar = (plot_frame_top - height_colorbar)/2
cbar_ax = fig.add_axes([0.91, bottom_pos_colorbar, 0.02, height_colorbar])
cbar = fig.colorbar(contour_fills, cax=cbar_ax,
ticks=row_item['colorbar_ticks'])
cbar.ax.set_yticklabels([cb_tick_fmt.format(t)
for t in row_item['colorbar_ticks']])
cbar.set_label(row_item['colorbar_label'])
def plot_panel_1x3_seperate_colorbar(plot_items, column_titles):
""""Plot panel with 3 columns of individual plots using solely seperate
plot properties.
Args:
plot_items (list of dict): Individual properties of the plots.
column_titles (list): Plot titles per column.
"""
# Set up figure, calculate figure height corresponding to desired width.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .95, 0.17, 0., 1.
width_colorbar = .27
bottom_pos_colorbar = .1
fig_width = 9.*(0.88-.035)
if column_titles is None:
plot_frame_top = 1.
column_titles = [None]*3
plot_frame_width = plot_frame_right - plot_frame_left
fig_height = calc_fig_height(fig_width, (1, 3), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, axs = plt.subplots(1, 3, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
# Plot the data.
for i, (ax, title, plot_item) in enumerate(zip(axs, column_titles,
plot_items)):
# Mapping individual properties of the plots.
z = plot_item['data']
cf_lvls = plot_item['contour_fill_levels']
cl_lvls = plot_item['contour_line_levels']
cb_ticks = plot_item['colorbar_ticks']
cb_tick_fmt = plot_item['colorbar_tick_fmt']
apply_log_scale = plot_item.get('log_scale', False)
extend = plot_item.get('extend', "neither")
cl_label_fmt = plot_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
plt.axes(ax)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
plt.title(title)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt,
log_scale=apply_log_scale,
extend=extend)
# Add axis for colorbar.
left_pos_colorbar = plot_frame_width/3*i + \
(plot_frame_width/3-width_colorbar)/2 + plot_frame_left
cbar_ax = fig.add_axes([left_pos_colorbar, bottom_pos_colorbar,
width_colorbar, 0.035])
if apply_log_scale:
formatter = LogFormatter(10, labelOnlyBase=False)
else:
formatter = None
cbar = plt.colorbar(contour_fills, orientation="horizontal",
cax=cbar_ax, ticks=cb_ticks, format=formatter)
cbar.ax.set_xticklabels([cb_tick_fmt.format(t) for t in cb_ticks])
cbar.set_label(plot_item['colorbar_label'])
def plot_panel_2x3(plot_items, column_titles, row_items):
""""Plot panel with 2 rows and 3 columns of individual plots.
Args:
plot_items (list of dict): Individual properties of the plots.
column_titles (list): Plot titles per column.
row_items (list of dict): Properties of the plots shared per row.
"""
# Set up figure, calculate determine figure height corresponding to
# desired width.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .96, 0.0, .035, 0.88
fig_width = 9.
fig_height = calc_fig_height(fig_width, (2, 3), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, axs = plt.subplots(2, 3, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
# Positioning of colorbars.
height_colorbar = .4
right_pos_colorbar = .9
for i_row, row_item in enumerate(row_items):
# Mapping properties of the plots shared per row.
cb_tick_fmt = row_item.get('colorbar_tick_fmt', "{:.1f}")
extend = row_item.get('extend', "neither")
cl_label_fmt = row_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
cf_lvls = row_items[i_row]['contour_fill_levels']
# First row of plots.
for ax, plot_item in zip(axs[i_row, :], plot_items[i_row]):
# Mapping individual properties of the plots.
z = plot_item['data']
cl_lvls = plot_item['contour_line_levels']
plt.axes(ax)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt,
extend=extend)
# Add axis for colorbar.
bottom_pos_colorbar = (1-i_row)*plot_frame_top/2 + \
(plot_frame_top/2-height_colorbar)/2
cbar_ax = fig.add_axes([right_pos_colorbar, bottom_pos_colorbar,
0.02, height_colorbar])
cbar = fig.colorbar(contour_fills, cax=cbar_ax,
ticks=row_item['colorbar_ticks'])
cbar.ax.set_yticklabels([cb_tick_fmt.format(t)
for t in row_item['colorbar_ticks']])
cbar.set_label(row_item['colorbar_label'])
# Add subplot row and column labels.
row_titles = [r['title'] for r in row_items]
for ax, col in zip(axs[0], column_titles):
ax.annotate(col, xy=(0.5, 1), xytext=(0, 5.),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
for ax, row in zip(axs[:, 0], row_titles):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad + 2., 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center', rotation=90)
def percentile_plots(plot_var, i_case, plot_settings):
"""" Reading processed data and plotting the 5th, 32nd, 50th percentile
maps. Used for figure 3.
Args:
plot_var (str): Name of plotting variable in netCDF source file.
i_case (int): Id of plotted case.
plot_settings (dict): Individual and shared properties of the plots.
"""
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
plot_var_suffix = ["_perc5", "_perc32", "_perc50"]
# Read data from NetCDF source file.
plot_items = []
plot_data_max = 0
for s in plot_var_suffix:
d = nc[plot_var+s].values[i_case, :, :]
if plot_var[0] == "p":
d *= 1e-3
plot_items.append({'data': d})
if np.amax(d) > plot_data_max:
plot_data_max = np.amax(d)
# Mapping plot properties and splitting up into individual and
# shared properties.
plot_handling = plot_settings["plot_handling"]
contour_fill_levels = plot_handling["contour_fill_levels"]
contour_line_levels = plot_handling.get("contour_line_levels", 3 *
[contour_fill_levels])
colorbar_ticks = plot_handling.get("colorbar_ticks", contour_fill_levels)
colorbar_label = plot_settings["color_label"]
# Write the contour handling to plot_items.
for i, plot_item in enumerate(plot_items):
plot_item['contour_line_levels'] = contour_line_levels[i]
# Write the row dependent settings to row_items.
row_item = {
'colorbar_ticks': colorbar_ticks,
'colorbar_label': colorbar_label,
'contour_fill_levels': contour_fill_levels,
}
if 'colorbar_tick_fmt' in plot_handling:
row_item['colorbar_tick_fmt'] = plot_handling["colorbar_tick_fmt"]
if 'contour_line_label_fmt' in plot_handling:
row_item['contour_line_label_fmt'] = \
plot_handling["contour_line_label_fmt"]
plot_panel_1x3(plot_items, column_titles, row_item)
def percentile_plots_ref(plot_var, i_case, plot_var_ref, i_case_ref,
plot_settings_abs, plot_settings_rel):
"""" Reading processed data and plotting the 5th, 32nd, 50th percentile
maps on the first row and the relative
increase w.r.t the reference case on the second row. Used for figure 7.
Args:
plot_var (str): Name of plotting variable in netCDF source file.
i_case (int): Id of plotted case.
plot_var_ref (str): Name of reference variable in netCDF source file.
i_case_ref (int): Id of reference case
plot_settings_abs (dict): Individual and shared properties of the top
row plots.
plot_settings_rel (dict): Individual and shared properties of the
bottom row plots.
"""
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
row_titles = ['Absolute value', 'Relative to reference case']
plot_var_suffix = ["_perc5", "_perc32", "_perc50"]
# Read data from NetCDF source file.
plot_items = [[], []]
plot_data_max, plot_data_relative_max = 0, 0
for s in plot_var_suffix:
d = nc[plot_var+s].values[i_case, :, :]
if plot_var[0] == "p":
d *= 1e-3
plot_items[0].append({'data': d})
if np.amax(d) > plot_data_max:
plot_data_max = np.amax(d)
d_ref = nc[plot_var_ref+s].values[i_case_ref, :, :]
if plot_var[0] == "p":
d_ref *= 1e-3
d_relative = d/d_ref
plot_items[1].append({'data': d_relative})
if np.amax(d_relative) > plot_data_relative_max:
plot_data_relative_max = np.amax(d_relative)
print("Max absolute and relative value are respectively {:.2f} and {:.2f}"
.format(plot_data_max, plot_data_relative_max))
# Mapping plot properties and splitting up into individual and shared properties.
plot_handling = plot_settings_abs["plot_handling"]
contour_fill_levels = plot_handling["contour_fill_levels"]
contour_line_levels = plot_handling.get("contour_line_levels", 3*[contour_fill_levels])
colorbar_ticks = plot_handling.get("colorbar_ticks", contour_fill_levels)
contour_fill_levels_rel = plot_settings_rel["contour_fill_levels"]
contour_line_levels_rel = plot_settings_rel.get("contour_line_levels", 3*[contour_fill_levels_rel])
colorbar_ticks_rel = plot_settings_rel.get("colorbar_ticks", contour_fill_levels_rel)
# Write the contour handling to plot_items.
for i, plot_item in enumerate(plot_items[0]):
plot_item['contour_line_levels'] = contour_line_levels[i]
for i, plot_item in enumerate(plot_items[1]):
plot_item['contour_line_levels'] = contour_line_levels_rel[i]
# Write the row dependent settings to row_items.
row_items = []
for i in range(2):
row_items.append({
'title': row_titles[i],
})
row_items[0]['colorbar_ticks'] = colorbar_ticks
row_items[0]['colorbar_label'] = plot_settings_abs["color_label"]
row_items[0]['contour_fill_levels'] = contour_fill_levels
if 'colorbar_tick_fmt' in plot_handling:
row_items[0]['colorbar_tick_fmt'] = plot_handling["colorbar_tick_fmt"]
row_items[0]['contour_line_label_fmt'] = '%.1f'
row_items[1]['colorbar_ticks'] = colorbar_ticks_rel
row_items[1]['colorbar_label'] = "Increase factor [-]"
row_items[1]['contour_fill_levels'] = contour_fill_levels_rel
if 'colorbar_tick_fmt' in plot_settings_rel:
row_items[1]['colorbar_tick_fmt'] = plot_settings_rel["colorbar_tick_fmt"]
row_items[1]['extend'] = plot_settings_rel.get('extend', "neither")
plot_panel_2x3(plot_items, column_titles, row_items)
def plot_figure5():
"""" Generate integrated mean power plot. """
column_titles = ["50 - 150m", "10 - 500m", "Ratio"]
linspace0 = np.linspace(0, .31, 21)
plot_item0 = {
'data': p_integral_mean[0, :, :]*1e-6,
'contour_line_levels': linspace0[::4],
'contour_fill_levels': linspace0,
'colorbar_ticks': linspace0[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': '[$MWm/m^2$]',
}
linspace1 = np.linspace(0, 1.5, 21)
plot_item1 = {
'data': p_integral_mean[1, :, :]*1e-6,
'contour_line_levels': linspace1[::4],
'contour_fill_levels': linspace1,
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': '[$MWm/m^2$]',
}
logspace2 = np.logspace(np.log10(4), np.log10(28.0), num=17)
plot_item2 = {
'data': plot_item1['data']/plot_item0['data'],
'contour_line_levels': [10, 15],
'contour_fill_levels': logspace2,
'colorbar_ticks': logspace2[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Increase factor [-]',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure3():
"""" Generate fixed height wind speed plot. """
plot_settings = {
"color_label": 'Wind speed [m/s]',
"plot_handling": {
"contour_fill_levels": np.arange(0, 15.1, 0.5), # 13.1, 1),
"contour_line_levels": [
[1., 2., 3., 4.],
[3., 5., 7., 9.],
[5., 7., 9., 11.],
],
"colorbar_ticks": np.arange(0, 15, 2), # 13, 2),
"colorbar_tick_fmt": "{:.0f}",
'contour_line_label_fmt': '%.1f',
},
}
percentile_plots("v_fixed", 0, plot_settings)
def plot_figure4():
"""" Generate fixed height power density plot. """
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
fixed_height_ref = 100.
fixed_height_id = list(fixed_heights).index(fixed_height_ref)
linspace0 = np.linspace(0, 0.027, 21) # np.linspace(0, .033, 21)
plot_item0 = {
'data': nc["p_fixed_perc5"].values[fixed_height_id, :, :]*1e-3,
'contour_fill_levels': linspace0,
'contour_line_levels': sorted([.003]+list(linspace0[::5])),
'contour_line_label_fmt': '%.3f',
'colorbar_ticks': linspace0[::5],
'colorbar_tick_fmt': '{:.3f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace1 = np.linspace(0, 0.45, 21) # np.linspace(0, .45, 21)
plot_item1 = {
'data': nc["p_fixed_perc32"].values[fixed_height_id, :, :]*1e-3,
'contour_fill_levels': linspace1,
'contour_line_levels': sorted([.04]+list(linspace1[::4])),
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace2 = np.linspace(0, 0.95, 21) # np.linspace(0, 1, 21)
plot_item2 = {
'data': nc["p_fixed_perc50"].values[fixed_height_id, :, :]*1e-3,
'contour_fill_levels': linspace2,
'contour_line_levels': sorted([.1]+list(linspace2[::4])),
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace2[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure8():
"""" Generate baseline comparison wind speed plot. """
linspace_absolute = np.linspace(0, 15, 21) # np.arange(0, 15.1, 1)
plot_settings_absolute_row = {
"color_label": 'Wind speed [m/s]',
"plot_handling": {
"contour_fill_levels": linspace_absolute,
"colorbar_ticks": linspace_absolute[::2],
"contour_line_levels": [
linspace_absolute,
[5., 7., 9., 10.],
[7., 9., 11., 13.],
],
"colorbar_tick_fmt": "{:.0f}",
},
}
linspace_relative = np.linspace(0, 2, 21) # np.linspace(1., 2.2, 21)
plot_settings_relative_row = {
"contour_fill_levels": linspace_relative,
"colorbar_ticks": linspace_relative[::4],
"contour_line_levels": [
[1.1, 1.4, 1.7],
[1.1, 1.4, 1.7],
[1.1, 1.4, 1.7],
],
'extend': 'max',
}
percentile_plots_ref("v_ceiling", height_range_ceilings.index(500),
"v_fixed", fixed_heights.index(100),
plot_settings_absolute_row, plot_settings_relative_row)
def plot_figure9_upper():
"""" Generate baseline comparison wind power plot - upper part. """
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
linspace0 = np.linspace(0, .04, 21)
plot_item0 = {
'data': nc["p_ceiling_perc5"].values[height_ceiling_id, :, :]*1e-3,
'contour_fill_levels': linspace0,
'contour_line_levels': linspace0[::5],
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace0[::5],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace1 = np.linspace(0, .6, 21)
plot_item1 = {
'data': nc["p_ceiling_perc32"].values[height_ceiling_id, :, :]*1e-3,
'contour_fill_levels': linspace1,
'contour_line_levels': linspace1[::4],
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace2 = np.linspace(0, 1.3, 21)
plot_item2 = {
'data': nc["p_ceiling_perc50"].values[height_ceiling_id, :, :]*1e-3,
'contour_fill_levels': linspace2,
'contour_line_levels': linspace2[::4],
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace2[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure9_lower():
"""" Generate baseline comparison wind power plot - lower part. """
column_titles = None
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
fixed_height_ref = 100.
fixed_height_id = list(fixed_heights).index(fixed_height_ref)
linspace0 = np.linspace(0, 20, 21)
plot_item0 = {
'data': nc["p_ceiling_perc5"].values[height_ceiling_id, :, :]
/ nc["p_fixed_perc5"].values[fixed_height_id, :, :],
'contour_fill_levels': linspace0,
'contour_line_levels': np.arange(2., 5., 1.),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace0[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': 'Increase factor [-]',
'extend': 'max',
}
linspace1 = np.linspace(0, 10, 21)
plot_item1 = {
'data': nc["p_ceiling_perc32"].values[height_ceiling_id, :, :]
/ nc["p_fixed_perc32"].values[fixed_height_id, :, :],
'contour_fill_levels': linspace1,
'contour_line_levels': linspace1[::4],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': 'Increase factor [-]',
'extend': 'max',
}
linspace2 = np.linspace(0, 10, 21)
plot_item2 = {
'data': nc["p_ceiling_perc50"].values[height_ceiling_id, :, :]
/ nc["p_fixed_perc50"].values[fixed_height_id, :, :],
'contour_fill_levels': linspace2,
'contour_line_levels': linspace2[::4],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace2[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': 'Increase factor [-]',
'extend': 'max',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure10():
"""" Generate power availability plot. """
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
linspace00 = np.linspace(0, 100, 21)
plot_item00 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_id, :, :],
'contour_fill_levels': linspace00,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
linspace01 = np.linspace(0, 100, 21)
plot_item01 = {
'data': 100.-nc["p_ceiling_rank300"].values[height_ceiling_id, :, :],
'contour_fill_levels': linspace01,
'contour_line_levels': linspace01[::4][2:],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
}
linspace02 = np.linspace(0, 70, 21)
plot_item02 = {
'data': 100.-nc["p_ceiling_rank1600"].values[height_ceiling_id, :, :],
'contour_fill_levels': linspace02,
'contour_line_levels': linspace02[::4][2:],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
}
column_titles = ["40 $W/m^2$", "300 $W/m^2$", "1600 $W/m^2$"]
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
linspace10 = np.linspace(0., 50., 21)
plot_item10 = {
'data': (100.-nc["p_ceiling_rank40"].values[height_ceiling_id, :, :]) -
(100.-nc["p_fixed_rank40"].values[0, :, :]),
'contour_fill_levels': linspace10,
'contour_line_levels': sorted([1.1, 2.2]+list(linspace10[::4][:-2])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace10[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
linspace11 = np.linspace(0., 55., 21)
plot_item11 = {
'data': (100.-nc["p_ceiling_rank300"].values[height_ceiling_id, :, :]) -
(100.-nc["p_fixed_rank300"].values[0, :, :]),
'contour_fill_levels': linspace11,
'contour_line_levels': linspace11[::4][:-2],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace11[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
linspace12 = np.linspace(0., 45., 21)
plot_item12 = {
'data': (100.-nc["p_ceiling_rank1600"].values[height_ceiling_id, :, :]) -
(100.-nc["p_fixed_rank1600"].values[0, :, :]),
'contour_fill_levels': linspace12,
'contour_line_levels': linspace12[::4][:-2],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace12[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
column_titles = None
plot_items = [plot_item10, plot_item11, plot_item12]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure11():
"""" Generate 40 W/m^2 power availability plot for alternative height ceilings. """
height_ceilings = [300., 1000., 1250.]
height_ceiling_ids = [list(height_range_ceilings).index(height_ceiling) for height_ceiling in height_ceilings]
baseline_height_ceiling = 500.
baseline_height_ceiling_id = list(height_range_ceilings).index(baseline_height_ceiling)
linspace00 = np.linspace(0, 100, 21)
plot_item00 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[0], :, :],
'contour_fill_levels': linspace00,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
linspace01 = np.linspace(10, 100, 21)
plot_item01 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[1], :, :],
'contour_fill_levels': linspace01,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
linspace02 = np.linspace(10, 100, 21)
plot_item02 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[2], :, :],
'contour_fill_levels': linspace02,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
column_titles = ["300 m", "1000 m", "1250 m"]
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
linspace10 = np.linspace(0., 22., 21)
plot_item10 = {
'data': -(100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[0], :, :]) +
(100.-nc["p_ceiling_rank40"].values[baseline_height_ceiling_id, :, :]),
'contour_fill_levels': linspace10,
'contour_line_levels': sorted([1.1]+list(linspace10[::4])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace10[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability decrease [%]',
}
linspace11 = np.linspace(0., 38., 21)
plot_item11 = {
'data': (100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[1], :, :]) -
(100.-nc["p_ceiling_rank40"].values[baseline_height_ceiling_id, :, :]),
'contour_fill_levels': linspace11,
'contour_line_levels': sorted([2.3]+list(linspace11[::4])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace11[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
linspace12 = np.linspace(0., 50., 21)
plot_item12 = {
'data': (100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[2], :, :]) -
(100.-nc["p_ceiling_rank40"].values[baseline_height_ceiling_id, :, :]),
'contour_fill_levels': linspace12,
'contour_line_levels': sorted([3.8]+list(linspace12[::4])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace12[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
column_titles = None
plot_items = [plot_item10, plot_item11, plot_item12]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_mean_and_ratio(data_type='v',
fill_range=[0, 20],
ratio_range=[0, 2],
line_levels=[2, 5, 15, 20],
n_decimals=0):
if data_type == 'v':
label = r'v [m/s]'
scale = 1
ratio_levels = [1.1, 1.3, 1.6]
elif data_type == 'p':
label = r'Power density [$kW/m^2$]'
scale = 10**(-3)
ratio_levels = [1, 3, 4.5]
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
fixed_height_ref = 100.
fixed_height_id = list(fixed_heights).index(fixed_height_ref)
# TODO automatize with data?
plot_title = '500m ceiling'
linspace00 = np.linspace(fill_range[0], fill_range[1], 21)
plot_item = {
'data': nc['{}_ceiling_mean'.format(data_type)].values[height_ceiling_id, :, :]*scale,
'contour_fill_levels': linspace00,
'contour_line_levels': line_levels,
'contour_line_label_fmt': '%.{}f'.format(n_decimals),
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': label,
'extend': 'max',
}
eval_contour_fill_levels([plot_item])
plot_single_panel(plot_item, plot_title=plot_title)
plot_title = '100m fixed'
linspace00 = np.linspace(fill_range[0], fill_range[1], 21)
plot_item = {
'data': nc['{}_fixed_mean'.format(data_type)].values[fixed_height_id, :, :]*scale,
'contour_fill_levels': linspace00,
'contour_line_levels': line_levels,
'contour_line_label_fmt': '%.{}f'.format(n_decimals),
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': label,
'extend': 'max',
}
eval_contour_fill_levels([plot_item])
plot_single_panel(plot_item, plot_title=plot_title)
plot_title = 'Ratio using 100m'
linspace00 = np.linspace(ratio_range[0], ratio_range[1], 25)
plot_item = {
'data': nc['{}_ceiling_mean'.format(data_type)].values[height_ceiling_id, :, :]/nc[
'{}_fixed_mean'.format(data_type)].values[fixed_height_id, :, :],
'contour_fill_levels': linspace00,
'contour_line_levels': ratio_levels,
'contour_line_label_fmt': '%.{}f'.format(1),
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': '{}/{}_ref [-]'.format(data_type, data_type),
'extend': 'max',
}
eval_contour_fill_levels([plot_item])
plot_single_panel(plot_item, plot_title=plot_title)
def plot_surface_elevation_from_geopotential():
from process_data_paper import get_surface_elevation
data = get_surface_elevation(lats, lons, remove_neg=False,
revert_lat=True)
data[np.logical_and(data < 20, data > 0)] = 0
plot_title = 'Topography'
# color_map = plt.get_cmap('terrain')
# Set range such that 0 is at blue part
min_range_data, max_range = np.min(data), np.max(data)
blue = 56/256.
min_range = blue/(1 - blue) * max_range
if -min_range > min_range_data:
print('Min range does not cover full min range.')
linspace00 = np.linspace(-min_range, max_range, 42)
plot_item = {
'data': data,
'contour_fill_levels': linspace00,
'contour_line_levels': [-300, 300, 700, 1500],
'contour_line_label_fmt': '%.{}f'.format(0),
'colorbar_ticks': linspace00[::8],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'surface elevation [m]',
}
eval_contour_fill_levels([plot_item])
plot_single_panel(plot_item, plot_title=plot_title)
def plot_all():
# plot_mean_and_ratio(data_type='v',
# fill_range=[0, 15], # [6, 13],
# line_levels=[7, 9, 11],
# ratio_range=[0, 2.5],
# n_decimals=0)
# plot_mean_and_ratio(data_type='p',
# fill_range=[0, 2.7],
# line_levels=[0.3, 1.1, 1.5, 2],
# ratio_range=[1, 17],
# n_decimals=1)
# plot_surface_elevation_from_geopotential()
plot_figure3()
plot_figure4()
plot_figure5()
plot_figure8()
plot_figure9_upper()
plot_figure9_lower()
plot_figure10()
plot_figure11()
plt.show()
if __name__ == "__main__":
plot_all()
|
{"hexsha": "ad40e4e8ed5fde41be59a65aad0b09d5dd9e6efd", "size": 46574, "ext": "py", "lang": "Python", "max_stars_repo_path": "AWERA/resource_analysis/plot_maps.py", "max_stars_repo_name": "lthUniBonn/AWERA", "max_stars_repo_head_hexsha": "fa7f210516318bcfcbe1c99abbb5954b0cbaf682", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AWERA/resource_analysis/plot_maps.py", "max_issues_repo_name": "lthUniBonn/AWERA", "max_issues_repo_head_hexsha": "fa7f210516318bcfcbe1c99abbb5954b0cbaf682", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AWERA/resource_analysis/plot_maps.py", "max_forks_repo_name": "lthUniBonn/AWERA", "max_forks_repo_head_hexsha": "fa7f210516318bcfcbe1c99abbb5954b0cbaf682", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5696864111, "max_line_length": 122, "alphanum_fraction": 0.6089663761, "include": true, "reason": "import numpy", "num_tokens": 12171}
|
from math import pow
import numpy as np
def DEFAULT_ESCORT(x):
"""Gives Shahshahani metric and KL-divergence."""
return x
def twisted_escort(x):
l = list(x)
return np.array([l[1], l[2], l[0]])
def power_escort(q):
"""Returns an escort function for the power q."""
def g(x):
y = []
for i in range(len(x)):
y.append(pow(x[i], q))
return np.array(y)
return g
def projection_escort(x):
return power_escort(0)
def exponential_escort(x):
return np.exp(x)
# Can also use metric_from_escort to get the Euclidean metric.
def euclidean_metric(n=3):
I = np.identity(n)
def G(x):
return I
return G
def metric_from_escort(escort):
def G(x):
return np.diag(1. / escort(x))
return G
def shahshahani_metric():
return metric_from_escort(DEFAULT_ESCORT)
DEFAULT_METRIC = shahshahani_metric()
|
{"hexsha": "1342fdaf8233925bf44deddcfaaf9845924526d6", "size": 906, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyed/geometries.py", "max_stars_repo_name": "jsafyan/pyed", "max_stars_repo_head_hexsha": "0d9c2b860a94f5ce0fc89ee48e1e76a86257750c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyed/geometries.py", "max_issues_repo_name": "jsafyan/pyed", "max_issues_repo_head_hexsha": "0d9c2b860a94f5ce0fc89ee48e1e76a86257750c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyed/geometries.py", "max_forks_repo_name": "jsafyan/pyed", "max_forks_repo_head_hexsha": "0d9c2b860a94f5ce0fc89ee48e1e76a86257750c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.7777777778, "max_line_length": 62, "alphanum_fraction": 0.6291390728, "include": true, "reason": "import numpy", "num_tokens": 246}
|
% Process from LaTeX via XML to XHTML with
% latexml --destination installguide.xml --xml installguide.tex
% latexmlpost --destination installguide.xhtml --format=xhtml installguide.xml
%
% Crude hack to remove ugly symbols:
% sed -e 's/[§]//g' -i installguide.xhtml
%
% Strip off header for pasting into the website at
% http://www.gromacs.org/Documentation/Installation_Instructions:
%
% grep -A 99999 "class=\"main\"" installguide.xhtml > installguide_web.xhtml
\documentclass{article}[12pt,a4paper,twoside]
\usepackage{hyperref}
% haven't made these work with LaTeXML yet...
%\usepackage[strings]{underscore}
%\usepackage[english]{babel}
\title{GROMACS installation guide}
% macros to keep style uniform
\newcommand{\gromacs}{GROMACS}
\newcommand{\nvidia}{NVIDIA}
\newcommand{\cuda}{CUDA}
\newcommand{\fftw}{FFTW}
\newcommand{\mkl}{MKL}
\newcommand{\mpi}{MPI}
\newcommand{\threadmpi}{ThreadMPI}
\newcommand{\openmpi}{OpenMPI}
\newcommand{\openmp}{OpenMP}
\newcommand{\openmm}{OpenMM}
\newcommand{\lammpi}{LAM/MPI}
\newcommand{\mpich}{MPICH}
\newcommand{\cmake}{CMake}
\newcommand{\sse}{SSE}
\newcommand{\ssetwo}{SSE2}
\newcommand{\avx}{AVX}
\newcommand{\fft}{FFT}
\newcommand{\blas}{BLAS}
\newcommand{\lapack}{LAPACK}
\newcommand{\vmd}{VMD}
\newcommand{\pymol}{PyMOL}
\newcommand{\grace}{Grace}
%\newcommand{\}{}
%\newcommand{\}{}
% later, make CMake keep this version current for us
\newcommand{\fftwversion}{3.3.2}
\newcommand{\cmakeversion}{2.8.0}
\newcommand{\cudaversion}{3.2}
\begin{document}
\section{Building GROMACS}
These instructions pertain to building \gromacs{} 4.6 and newer releases
using our new CMake-based build system.
For installations instructions for old \gromacs{} versions,
see the documentation at
\url{http://www.gromacs.org/Documentation/Installation_Instructions_4.5}.
\section{Quick and dirty installation}
\begin{enumerate}
\item Get the latest version of your compiler.
\item Check you have \cmake{} version 2.8.x or later.
\item Unpack the \gromacs{} tarball.
\item Make a separate build directory and change to it.
\item Run \cmake{} with the path to the source as an argument
\item Run make and make install
\end{enumerate}
Or, as a sequence of commands to execute:
\begin{verbatim}
tar xfz gromacs-4.6.5.tar.gz
cd gromacs-4.6.5
mkdir build
cd build
cmake .. -DGMX_BUILD_OWN_FFTW=ON
make
sudo make install
\end{verbatim}
This will download and build first the prerequisite FFT library followed by \gromacs{}. If you already have
FFTW installed, you can remove that argument to cmake. Overall, this build
of \gromacs{} will be correct and reasonably fast on the
machine upon which \cmake{} ran. It will generally be 30-50\% faster
than \gromacs{} 4.5.x, but if you want to get the maximum value
for your hardware with \gromacs{}, you'll have to read further.
Sadly, the interactions of hardware, libraries, and compilers
are only going to continue to get more complex.
\section{Prerequisites}
\subsection{Platform}
\gromacs{} can be compiled for any distribution of Linux, Mac OS X,
Windows (native, Cygwin or MinGW), BlueGene, Cray and many other architectures.
Technically, it can be compiled on any platform with an ANSI C
compiler and supporting libraries, such as the GNU C library. However, Gromacs
also comes with many hardware-specific extensions to provide very high performance
on those platforms, and to enable these we have slightly more specific requirements
since old compilers do not support new features, or they can be buggy.
\subsection{Compiler}
\gromacs{} requires an ANSI C compiler that complies with the C89
standard. For best performance, the \gromacs{} team strongly
recommends you get the most recent version of your preferred compiler
for your platform (e.g. GCC 4.7 or Intel 12.0 or newer on x86
hardware). There is a large amount of \gromacs{} code introduced in
version 4.6 that depends on effective compiler optimization to get
high performance - the old raw assembly-language kernel routines are all gone.
Unfortunately this makes \gromacs{} more sensitive to the compiler
used, and the binary will only work on the hardware for which it is compiled,
but the good news is that it has enabled us to significantly accelerate performance
compared to version 4.5.
\begin{itemize}
\item On Intel-based x86 hardware, we recommend you to use
the Intel compiler for best performance. It is usually better at instruction
scheduling, although it does not hurt to try gcc too. Recent versions can
give icc a run for the money.
\item On AMD-based x86 hardware up through the Magny-Cours architecture
(e.g. Opteron 6100-series processors), it is worth using the Intel compiler for
better performance, but gcc-4.7 and later are also reasonable.
\item On the AMD Bulldozer architecture (Opteron 6200), AMD introduced fused multiply-add
instructions and an "FMA4" instruction format not available on Intel x86 processors. Thus,
on the most recent AMD processors you want to use gcc-4.7 or later for better performance!
icc will only generate code for the subset also supported by Intel processors, and that
is significantly slower right now.
\item If you are running on Mac OS X, the best option is the Intel compiler.
Both clang and gcc will work, but they produce lower performance and each have some
shortcomings. Clang does not fully support OpenMP, and the current gcc ports do not
support AVX instructions.
\item For all non-x86 platforms, your best option is typically to use the vendor's
default compiler, and check for specialized information below.
\end{itemize}
\subsubsection{Running in parallel}
\gromacs{} can run in parallel on multiple cores of a single
workstation using its built-in \threadmpi. No user action is required
in order to enable this.
If you wish to use the excellent new native GPU support in \gromacs,
\nvidia{}'s \cuda{}
\url{http://www.nvidia.com/object/cuda_home_new.html} version
\cudaversion{} software development kit is required, and the latest
version is strongly encouraged. \nvidia{} GPUs with at least \nvidia{} compute
capability 2.0 are required, e.g. Fermi or Kepler cards.
The GPU support from \gromacs{} version 4.5 using \openmm{}
\url{https://simtk.org/home/openmm} is still contained in the code,
but in the ``user contributions'' section (\verb+src/contrib+). You
will need to set
\verb+-DGMX_OPENMM=on -DGMX_GPU=off -DGMX_MPI=off
-DGMX_THREAD_MPI=off\+ in order to build it. It also requires \cuda{},
and remains the only hardware-based acceleration available for
implicit solvent simulations in \gromacs{} at the moment. However, the
long-term plan is to enable this functionality in core Gromacs, and
not have the OpenMM interface supported by the \gromacs team.
If you wish to run in parallel on multiple machines across a network,
you will need to have
\begin{itemize}
\item an \mpi{} library installed that supports the \mpi{} 1.3
standard, and
\item wrapper compilers that will compile code using that library.
\end{itemize}
The \gromacs{} team recommends \openmpi{}
\url{http://www.open-mpi.org/} version 1.4.1 (or higher), \mpich{}
\url{http://www.mpich.org/} version 1.4.1 (or higher), or your
hardware vendor's \mpi{} installation. The most recent version of
either of this is likely to be the best. More specialized networks
might depend on accelerations only available in the vendor's library.
\lammpi{}
\url{http://www.lam-mpi.org/} might work, but since it has been
deprecated for years, it is not supported.
In some cases, \openmp{} parallelism is an advantage for \gromacs{},
but support for this is generally built into your compiler and detected
automatically. The one common exception is Mac OS X, where the default
clang compiler currently does not fully support OpenMP. You can install
gcc-4.7 instead, but the currently available binary distribution of gcc
uses an old system assembler that does not support AVX acceleration
instructions. There are some examples on the internet where people have
hacked this to work, but presently the only straightforward way to get
both OpenMP and AVX support on Mac OS X is to get the Intel compiler.
In summary, for maximum performance you will need to
examine how you will use \gromacs{}, what hardware you plan to run
on, and whether you can afford a non-free compiler for slightly better
performance. The only way to find out is unfortunately to test different
options and parallelization schemes for the actual simulations you
want to run. You will still get {\em good}\, performance with the default
build and runtime options (better than in version 4.5), but if you truly
want to push your hardware to the performance limit the days of just blindly
starting programs like '\verb+mdrun+' are gone.
\subsection{CMake}
From version 4.6, \gromacs{} uses the build system
\cmake{}. The previous build system that used \verb+configure+ from
the GNU autotools package has been removed permanently. \cmake{}
permits the \gromacs{} team to support a very wide range of hardware,
compilers and build configurations while continuing to provide the
portability, robustness and performance for which \gromacs{} is known.
\gromacs{} requires \cmake{} version \cmakeversion{} or higher. Lower
versions will not work. You can check whether \cmake{} is installed,
and what version it is, with \verb+cmake --version+. If you need to
install \cmake{}, then first check whether your platform's package
management system provides a suitable version, or visit
\url{http://www.cmake.org/cmake/help/install.html} for pre-compiled
binaries, source code and installation instructions. The \gromacs{}
team recommends you install the most recent version of \cmake{} you
can. If you need to compile \cmake{} yourself and have a really old environment,
you might first have to compile a moderately recent version (say, 2.6) to
bootstrap version 2.8. This is a one-time job, and you can find lots of
documentation on the \cmake{} website if you run into problems.
\subsection{Fast Fourier Transform library}
Many simulations in \gromacs{} make extensive use of fast Fourier transforms,
and a software library to perform these is always required. We
recommend \fftw{} \url{http://www.fftw.org/} (version 3 or higher
only) or Intel's \mkl{} \url{http://software.intel.com/en-us/intel-mkl}.
\subsubsection{\fftw{}}
\fftw{} is likely to be available for your platform via its package
management system, but there can be compatibility and significant
performance issues associated with these packages. In particular,
\gromacs{} simulations are normally run in single floating-point
precision whereas the default \fftw{} package is normally in double
precision, and good compiler options to use for \fftw{} when linked to
\gromacs{} may not have been used. Accordingly, the \gromacs{} team
recommends either
\begin{itemize}
\item that you permit the \gromacs{} installation to download and
build \fftw{} \fftwversion{} from source automatically for you (use
\verb+cmake -DGMX_BUILD_OWN_FFTW=ON+), or
\item that you build \fftw{} from the source code.
Note that the GROMACS-managed download of the FFTW tarball has a
slight chance of posing a security risk. If you use this option, you
will see a warning that advises how you can eliminate this risk.
\end{itemize}
If you build \fftw{} from source yourself, get the most recent version
and follow its installation guide available from \url{http://www.fftw.org}.
Choose the precision (i.e. single or float vs.\ double) to match what you will
later require for \gromacs{}. There is no need to compile with
threading or \mpi{} support, but it does no harm. On x86 hardware,
compile \emph{only} with \verb+--enable-sse2+ (regardless of
precision) even if your processors can take advantage of \avx{}
extensions. Since \gromacs{} uses fairly short transform lengths we
do not benefit from the \fftw{} \avx{} acceleration, and because of
memory system performance limitations, it can even degrade \gromacs{}
performance by around 20\%. There is no way for \gromacs{} to
limit the use to \ssetwo{} acceleration at run time if \avx{}
support has been compiled into \fftw{}, so you need to set this at compile time.
\subsubsection{\mkl{}}
Using \mkl{} with icc 11 or higher is very simple. Set up your
compiler environment correctly, perhaps with a command like
\verb+source /path/to/compilervars.sh intel64+ (or consult your local
documentation). Then set \verb+-DGMX_FFT_LIBRARY=mkl+ when you run
\cmake{}. In this case, \gromacs{} will also use \mkl{} for \blas{}
and \lapack{} (see \hyperref{linear-algebra}{here}).
Otherwise, you can configure \mkl{} by setting
\verb+-DGMX_FFT_LIBRARY=mkl
-DMKL_LIBRARIES="/full/path/to/libone.so;/full/path/to/libtwo.so"
-DMKL_INCLUDE_DIR="/full/path/to/mkl/include"+,
where the full list (and order!) of libraries you require are found in
Intel's \mkl{} documentation for your system.
\subsection{Optional build components}
\begin{itemize}
\item Hardware-optimized \blas{} and \lapack{} libraries are useful
for a few of the \gromacs{} utilities focused on normal modes and
matrix manipulation, but they do not provide any benefits for normal
simulations. Configuring these are discussed
\hyperlink{linear-algebra}{here}.
\item The built-in \gromacs{} trajectory viewer \verb+ngmx+ requires
X11 and Motif/Lesstif libraries and header files. Generally, the
\gromacs{} team rather recommends you use third-party software for
visualization, such as \vmd{}
\url{http://www.ks.uiuc.edu/Research/vmd/} or \pymol{}
\url{http://www.pymol.org/}.
\item A few \gromacs{} tools get some extra functionality when linked with the
GNU scientific library GSL.
\end{itemize}
\section{Doing a build of \gromacs}
This section will cover a general build of \gromacs{} with \cmake{},
but it is not an exhaustive discussion of how to use \cmake{}. There
are many resources available on the web, which we suggest you search
for when you encounter problems not covered here. The material below
applies specifically to builds on Unix-like systems, including Linux,
Mac OS X, MinGW and Cygwin. For other platforms, see the specialist
instructions below.
\subsection{Configuring with \cmake{}}
\cmake{} will run many tests on your system and do its best to work
out how to build \gromacs{} for you. If you are building \gromacs{} on
hardware that is identical to that where you will run \verb+mdrun+,
then you can be sure that the defaults will be pretty good. The build
configuration will for instance attempt to detect the specific hardware
instructions available in your processor. However, if
you want to control aspects of the build, there are plenty of things you
can set manually.
The best way to use \cmake{} to configure \gromacs{} is to do an
``out-of-source'' build, by making another directory from which you
will run \cmake{}. This can be a subdirectory or not, it doesn't
matter. It also means you can never corrupt your source code by trying
to build it! So, the only required argument on the \cmake{} command
line is the name of the directory containing the
\verb+CMakeLists.txt+ file of the code you want to build. For
example, download the source tarball and use
% TODO: keep up to date with new releases!
\begin{verbatim}
$ tar xfz gromacs-4.6.5.tgz
$ cd gromacs-4.6.5
$ mkdir build-cmake
$ cd build-cmake
$ cmake ..
\end{verbatim}
You will see \verb+cmake+ report the results of a large number of
tests on your system made by \cmake{} and by \gromacs{}. These are
written to the \cmake{} cache, kept in \verb+CMakeCache.txt+. You
can edit this file by hand, but this is not recommended because it is
easy to reach an inconsistent state. You should not attempt to move or
copy this file to do another build, because file paths are hard-coded
within it. If you mess things up, just delete this file and start
again with '\verb+cmake+'.
If there's a serious problem detected at this stage, then you will see
a fatal error and some suggestions for how to overcome it. If you're
not sure how to deal with that, please start by searching on the web
(most computer problems already have known solutions!) and then
consult the gmx-users mailing list. There are also informational
warnings that you might like to take on board or not. Piping the
output of \verb+cmake+ through \verb+less+ or \verb+tee+ can be
useful, too.
\cmake{} works in an iterative fashion, re-running each time a setting
is changed to try to make sure other things are consistent. Once
things seem consistent, the iterations stop. Once \verb+cmake+
returns, you can see all the settings that were chosen and information
about them by using e.g. the curses interface
\begin{verbatim}
$ ccmake ..
\end{verbatim}
You can actually use \verb+ccmake+ directly in the first step, but then
most of the status messages will merely blink in the lower part
of the terminal rather than be written to standard out. Some platforms
like Windows or Mac even have native graphical user interfaces for
\cmake{}, and it can create project files for almost any build environment
you want (including Visual Studio or Xcode).
Check out \url{http://www.cmake.org/cmake/help/runningcmake.html} for
general advice on what you are seeing and how to navigate and change
things. The settings you might normally want to change are already
presented. If you make any changes, then \verb+ccmake+ will notice
that and require that you re-configure (using '\verb+c+'), so that it
gets a chance to make changes that depend on yours and perform more
checking. This might require several configuration stages when you are
using \verb+ccmake+ - when you are using \verb+cmake+ the
iteration is done behind the scenes.
A key thing to consider here is the setting of
\verb+CMAKE_INSTALL_PREFIX+. You will need to be able to write to
this directory in order to install \gromacs{} later, and if you change
your mind later, changing it in the cache triggers a full re-build,
unfortunately. So if you do not have super-user privileges on your
machine, then you will need to choose a sensible location within your
home directory for your \gromacs{} installation.
When \verb+cmake+ or \verb+ccmake+ have completed iterating, the
cache is stable and a build tree can be generated, with '\verb+g+' in
\verb+ccmake+ or automatically with \verb+cmake+.
You should not attempt to change compilers after the initial run of
\cmake{}. If you need to change, clean up and start again.
\subsection{Using CMake command-line options}
Once you become comfortable with setting and changing options, you
may know in advance how you will configure GROMACS. If so, you can
speed things up by invoking \verb+cmake+ with a command like:
\begin{verbatim}
$ cmake .. -DGMX_GPU=ON -DGMX_MPI=ON -DCMAKE_INSTALL_PREFIX=/home/marydoe/programs
\end{verbatim}
to build with GPUs, MPI and install in a custom location. You can even
save that in a shell script to make it even easier next time. You can
also do this kind of thing with \verb+ccmake+, but you should avoid
this, because the options set with '\verb+-D+' will not be able to be
changed interactively in that run of \verb+ccmake+.
\subsection{CMake advanced options}
The options that can be seen with \verb+ccmake+ are ones that we
think a reasonable number of users might want to consider
changing. There are a lot more options available, which you can see by
toggling the advanced mode in \verb+ccmake+ on and off with
'\verb+t+'. Even there, most of the variables that you might want to
change have a '\verb+CMAKE_+' or '\verb+GMX_+' prefix.
\subsection{Helping CMake find the right libraries/headers/programs}
If libraries are installed in non-default locations their location can
be specified using the following environment variables:
\begin{itemize}
\item \verb+CMAKE_INCLUDE_PATH+ for header files
\item \verb+CMAKE_LIBRARY_PATH+ for libraries
\item \verb+CMAKE_PREFIX_PATH+ for header, libraries and binaries
(e.g. '\verb+/usr/local+').
\end{itemize}
The respective '\verb+include+', '\verb+lib+', or '\verb+bin+' is
appended to the path. For each of these variables, a list of paths can
be specified (on Unix separated with ":"). Note that these are
enviroment variables (and not \cmake{} command-line arguments) and in
a '\verb+bash+' shell are used like:
\begin{verbatim}
$ CMAKE_PREFIX_PATH=/opt/fftw:/opt/cuda cmake ..
\end{verbatim}
The \verb+CC+ and \verb+CXX+ environment variables are also useful
for indicating to \cmake{} which compilers to use, which can be very
important for maximising \gromacs{} performance. Similarly,
\verb+CFLAGS+/\verb+CXXFLAGS+ can be used to pass compiler
options, but note that these will be appended to those set by
\gromacs{} for your build platform and build type. You can customize
some of this with advanced options such as \verb+CMAKE_C_FLAGS+
and its relatives.
See also: \url{http://cmake.org/Wiki/CMake_Useful_Variables#Environment_Variables}
\subsection{Linear algebra libraries}\hypertarget{linear-algebra}
As mentioned above, sometimes vendor \blas{} and \lapack{} libraries
can provide performance enhancements for \gromacs{} when doing
normal-mode analysis or covariance analysis. For simplicity, the text
below will refer only to \blas{}, but the same options are available
for \lapack{}. By default, CMake will search for \blas{}, use it if it
is found, and otherwise fall back on a version of \blas{} internal to
\gromacs{}. The \cmake{} option \verb+GMX_EXTERNAL_BLAS+ will be set
accordingly. The internal versions are fine for normal use. If you
need to specify a non-standard path to search, use
\verb+-DCMAKE_PREFIX_PATH=/path/to/search+. If you need to specify a
library with a non-standard name (e.g. ESSL on AIX or BlueGene), then
set \verb+-DGMX_BLAS_USER=/path/to/reach/lib/libwhatever.a+.
If you are using Intel's \mkl{} for \fft{}, then the \blas{} and
\lapack{} it provides are used automatically. This could be
over-ridden with \verb+GMX_BLAS_USER+, etc.
On Apple platforms where the Accelerate Framework is available, these
will be automatically used for \blas{} and \lapack{}. This could be
over-ridden with \verb+GMX_BLAS_USER+, etc.
\subsection{Native GPU acceleration}
If you have the \cuda{} Software Development Kit installed, you can
use \cmake{} with:
\begin{verbatim}
cmake .. -DGMX_GPU=ON -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda
\end{verbatim}
(or whichever path has your installation). Note that this will require
a working C++ compiler, and in some cases you might need to handle
this manually, e.g. with the advanced option
\verb+CUDA_HOST_COMPILER+.
Historically, Linux GPU builds have received most testing, but we
want to support GPU builds both under x86 Linux, Windows, Mac OS X and in the
future ARM. Any feedback on this build process (and fixes in particular) are very
welcome!
\subsection{Static linking}
Dynamic linking of the \gromacs{} executables will lead to a
smaller disk footprint when installed, and so is the default on
platforms where we believe it has been tested repeatedly and found to work.
In general, this includes Linux, Windows, Mac OS X and BSD systems.
Static binaries take much more space, but on some hardware and/or under
some conditions they are necessary, most commonly when you are running a parallel
simulation using MPI libraries.
\begin{itemize}
\item To link \gromacs{} binaries
statically against the internal \gromacs{} libraries, set
\verb+BUILD_SHARED_LIBS=OFF+.
\item To link statically against external
libraries as well, the \verb+GMX_PREFER_STATIC_LIBS=ON+ option can be
used. Note, that in general \cmake{} picks up whatever is available,
so this option only instructs \cmake{} to prefer static libraries when
both static and shared are available. If no static version of an
external library is available, even when the aforementioned option is
ON, the shared library will be used. Also note, that the resulting
binaries will still be dynamically linked against system libraries if
that is all that is available (common on Mac OS X).
\end{itemize}
\subsection{Changing the names of GROMACS binaries and libraries}
It is sometimes convenient to have different versions of the same
\gromacs{} libraries installed. The most common use cases have been
single and double precision, and with and without \mpi{}. By default,
\gromacs{} will suffix binaries and libraries for such builds with
'\verb+_d+' for double precision and/or '\verb+_mpi+' for \mpi{} (and
nothing otherwise). This can be controlled manually with
\verb+GMX_DEFAULT_SUFFIX (ON/OFF)+, \verb+GMX_BINARY_SUFFIX+ (takes
a string) and \verb+GMX_LIBS_SUFFIX+ (also takes a string).
This can also be useful for resolving libary-naming conflicts with
existing packges (\verb+GMX_PREFIX_LIBMD+ also can be useful).
For instance, to set a custom suffix for binaries and libraries,
one might specify:
\begin{verbatim}
cmake .. -DGMX_DEFAULT_SUFFIX=OFF -DGMX_BINARY_SUFFIX=_mod -DGMX_LIBS_SUFFIX=_mod
\end{verbatim}
Thus the names of all binaries and libraries will be appended with
"\_mod."
\subsection{Building \gromacs{}}
Once you have a stable cache, you can build \gromacs{}. If you're not
sure the cache is stable, you can re-run \verb+cmake ..+ or
\verb+ccmake ..+' to see. Then you can run \verb+make+ to start the
compilation. Before actual compilation starts, \verb+make+ checks
that the cache is stable, so if it isn't you will see \cmake{} run
again.
So long as any changes you've made to the configuration are sensible,
it is expected that the \verb+make+ procedure will always complete
successfully. The tests \gromacs{} makes on the settings you choose
are pretty extensive, but there are probably a few cases we haven't
thought of yet. Search the web first for solutions to problems, but if
you need help, ask on gmx-users, being sure to provide as much
information as possible about what you did, the system you are
building on, and what went wrong.
If you have a multi-core or multi-CPU machine with \verb+N+
processors, then using
\begin{verbatim}
$ make -j N
\end{verbatim}
will generally speed things up by quite a bit.
\subsection{Installing \gromacs{}}
Finally, \verb+make install+ will install \gromacs{} in the
directory given in \verb+CMAKE_INSTALL_PREFIX+. If this is an system
directory, then you will need permission to write there, and you
should use super-user privileges only for \verb+make install+ and
not the whole procedure.
\subsection{Getting access to \gromacs{} after installation}
\gromacs{} installs the script \verb+GMXRC+ in the \verb+bin+
subdirectory of the installation directory
(e.g. \verb+/usr/local/gromacs/bin/GMXRC+), which you should source
from your shell:
\begin{verbatim}
$ source your-installation-prefix-here/bin/GMXRC
\end{verbatim}
It will detect what kind of shell you are running and set up your
environment for using \gromacs{}. You may wish to arrange for your
login scripts to do this automatically; please search the web for
instructions on how to do this for your shell.
Many of the \gromacs{} programs rely on data installed in our
\verb+share/gromacs+ directory. By default, the programs will use
the environment variables set in the GMXRC script, and if this is not
available they will try to guess the path based on their own location.
This usually works well unless you change the names of directories
inside the install tree. If you still need to do that, you might want to recompile
with the new install location properly set, or edit the \verb+GMXRC+ script.
\subsection{Testing \gromacs{} for correctness}
Since 2011, the \gromacs{} development uses an automated system where
every new patch is subject to regression testing. While this improves
reliability quite a lot, not everything is tested, and since we
increasingly rely on cutting edge compiler features there is
non-negligible risk that the default compiler on your system could
have bugs. We have tried our best to test and refuse to use known bad
versions in \cmake{}, but we strongly recommend that you run through
the regression tests yourself. It only takes a few minutes, after
which you can trust your build.
The simplest way to run the checks is to build \gromacs{} with
\verb+-DREGRESSIONTEST_DOWNLOAD+, and run \verb+make check+.
\gromacs{} will automatically download and run the tests for you.
Alternatively, you can download and unpack the tarball yourself from
\url{http://gerrit.gromacs.org/download/regressiontests-4.6.5.tar.gz},
and use the advanced \cmake{} option \verb+REGRESSIONTEST_PATH+ to
specify the path to the unpacked tarball, which will then be used for
testing. If this doesn't work, then please read on.
The regression tests are available from the \gromacs{} website and ftp
site. Once you have downloaded them, unpack the tarball, source
\verb+GMXRC+ as described above, and run \verb+./gmxtest.pl all+
inside the regression tests folder. You can find more options
(e.g. adding \verb+double+ when using double precision) if you just
execute the script without options.
Hopefully you will get a report that all tests have passed. If there
are individual failed tests it could be a sign of a compiler bug, or
that a tolerance is just a tiny bit too tight. Check the output files
the script directs you too, and try a different or newer compiler if
the errors appear to be real. If you cannot get it to pass the
regression tests, you might try dropping a line to the gmx-users
mailing list, but then you should include a detailed description of
your hardware and an example logfile from mdrun (which contains
valuable information in the header).
\subsection{Testing \gromacs{} for performance}
We are still working on a set of benchmark systems for testing
the performance of \gromacs{}. Until that is ready, we recommend that
you start by comparing the performance to release 4.5, and also try
a few different parallelization options.
\subsection{Having difficulty?}
You're not alone - this can be a complex task! If you encounter a
problem with installing \gromacs{}, then there are a number of
locations where you can find assistance. It is recommended that you
follow these steps to find the solution:
\begin{enumerate}
\item Read the installation instructions again, taking note that you
have followed each and every step correctly.
\item Search the \gromacs{} website and users emailing list for
information on the error.
\item Search the internet using a search engine such as Google.
\item Post to the \gromacs{} users emailing list gmx-users for
assistance. Be sure to give a full description of what you have done
and why you think it didn't work. Give details about the system on
which you are installing.
Copy and paste your command line and as
much of the output as you think might be relevant - certainly from
the first indication of a problem. In particular, please try to include at
least the header from the mdrun logfile, and preferably the entire file.
People who might volunteer to
help you do not have time to ask you interactive detailed follow-up
questions, so you will get an answer faster if you provide as much
information as you think could possibly help. High quality bug reports
tend to receive rapid high quality answers.
\end{enumerate}
\section{Special instructions for some platforms}
\subsection{Building on Windows}
Building on Cygwin/MinGW/etc. works just like Unix. Please see the
instructions above.
Building on Windows using native compilers is rather similar to
building on Unix, so please start by reading the above. Then, download
and unpack the GROMACS source archive. The UNIX-standard .tar.gz
format can be managed on Windows, but you may prefer to browse
\url{ftp://ftp.gromacs.org/pub/gromacs} to obtain a zip format file,
which doesn't need any external tools to unzip on recent Windows
systems. Make a folder in which to do the out-of-source build of
\gromacs{}. For example, make it within the folder unpacked from the
source archive, and call it ``build-cmake''.
For \cmake{}, you can either use the graphical user interface provided
on Windows, or you can use a command line shell with instructions
similar to the UNIX ones above. If you open a shell from within
your IDE (e.g. Microsoft Visual Studio), it will configure the
environment for you, but you might need to tweak this in order to
get either a 32-bit or 64-bit build environment. The latter provides the
fastest executable. If you use a normal Windows command shell, then
you will need to either set up the environment to find your compilers
and libraries yourself, or run the \verb+vcvarsall.bat+ batch script
provided by MSVC (just like sourcing a bash script under
Unix).
With the graphical user interface you will be asked about what compilers
to use at the initial configuration stage, and if you use the command line
they can be set in a similar way as under UNIX.
You will probably make your life easier and faster by using the
new facility to download and install \fftw{} automatically.
For the build, you can either load the generated solutions file into
e.g. Visual Studio, or use the command line with \verb+cmake --build .+
so the right tools get used.
\subsection{Building on Cray}
Gromacs builds mostly out of the box on modern Cray machines,
but you want to use static libraries due to the peculiarities with
parallel job execution.
\subsection{Building on BlueGene}
\subsubsection{BlueGene/P}
There is currently no native acceleration on this platform and no
plans to make one. The default plain C kernels will work.
\subsubsection{BlueGene/Q}
There is currently native acceleration on this platform for the Verlet
cut-off scheme. Accelerated kernels for the group cut-off scheme may
come in the future, but the default plain C kernels will work.
Only static linking with XL compilers is supported by \gromacs{}. Dynamic
linking would be supported by the architecture and \gromacs{}, but has no
advantages other than disk space, and is generally discouraged on
BlueGene for performance reasons.
Computation on BlueGene floating-point units is always done in
double-precision. However, single-precision builds of \gromacs{} are
still normal and encouraged since they use cache more efficiently.
The BlueGene hardware automatically
converts values stored in single precision in memory to double
precision in registers for computation, converts the results back to
single precision correctly, and does so for no additional cost. As
with other platforms, doing the whole computation in double precision
normally shows no improvement in accuracy and costs twice as much time
moving memory around.
You need to arrange for FFTW to be installed correctly, following the
above instructions.
mpicc is used for compiling and linking. This can make it awkward to
attempt to use IBM's optimized BLAS/LAPACK called ESSL (see the
section on linear algebra). Since mdrun is the only part of \gromacs{}
that should normally run on the compute nodes, and there is nearly no
need for linear algebra support for mdrun, it is recommended to use
the \gromacs{} built-in linear algebra routines - it is rare for this
to be a bottleneck.
The recommended configuration is to use
\begin{verbatim}
cmake .. -DCMAKE_TOOLCHAIN_FILE=Platform/BlueGeneQ-static-XL-CXX \
-DCMAKE_PREFIX_PATH=/your/fftw/installation/prefix \
-DGMX_MPI=on
make mdrun
make install-mdrun
\end{verbatim}
which will build a statically-linked MPI-enabled mdrun for the back
end. Otherwise, GROMACS default configuration behaviour applies.
It is possible to configure and make the remaining \gromacs{} tools
with the compute-node toolchain, but as none of those tools are
\mpi{}-aware and could then only run on the compute nodes, this
would not normally be useful. Instead, these should be planned
to run on the login node, and a separate \gromacs{} installation
performed for that using the login node's toolchain - not the
above platform file, or any other compute-node toolchain.
Note that only the MPI build is available for the compute-node
toolchains. The GROMACS thread-MPI or serial builds are not useful at
all on BlueGene/Q.
\subsubsection{Fujitsu PRIMEHPC}
This is the architecture of the K computer, which uses Fujitsu Sparc64viiifx
chips. Gromacs-4.6 will build with default C kernels on this architecture,
and Gromacs-4.6.2 added accelerated group kernels and a custom toolchain.
\section{Tested platforms}
While it is our best belief that \gromacs{} will build and run pretty
much everywhere, it's important that we tell you where we really know
it works because we've tested it. We do test on Linux, Windows, and
Mac with a range of compilers and libraries for a range of our
configuration options. Every commit in our git source code
repository is currently tested on x86 with gcc versions ranging
from 4.4 through 4.7, and versions 12 and 13 of the Intel compiler.
Under Windows we test both the visual studio compilers and icc,
We test irregularly on BlueGene/Q, Cray,
Fujitsu PRIMEHPC, Google nativeclient and other environments. In
the future we expect ARM to be an important test target too, but this
is currently not included.
Contributions to this section are welcome.
Later we might set up the ability for users to contribute test results
to Jenkins.
\section{Other issues}
The \gromacs{} utility programs often write data files in formats
suitable for the \grace{} plotting tool, but it is straightforward to
use these files in other plotting programs, too.
\end{document}
|
{"hexsha": "602d3f2a673b48896969face372c6de184fca1bb", "size": 37402, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "gromacs-4.6.5/admin/installguide/installguide.tex", "max_stars_repo_name": "farajilab/gifs_release", "max_stars_repo_head_hexsha": "ffa674110bcd15de851a8b6a703b4f4bc96fcd2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-04T18:56:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T16:49:22.000Z", "max_issues_repo_path": "gromacs-4.6.5/admin/installguide/installguide.tex", "max_issues_repo_name": "farajilab/gifs_release", "max_issues_repo_head_hexsha": "ffa674110bcd15de851a8b6a703b4f4bc96fcd2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gromacs-4.6.5/admin/installguide/installguide.tex", "max_forks_repo_name": "farajilab/gifs_release", "max_forks_repo_head_hexsha": "ffa674110bcd15de851a8b6a703b4f4bc96fcd2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-08T00:11:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T00:11:00.000Z", "avg_line_length": 47.4043092522, "max_line_length": 107, "alphanum_fraction": 0.7804395487, "num_tokens": 9277}
|
"""
Module defining Population class and methods
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import map
from builtins import range
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from numpy import pi, sqrt, sin, cos, arccos
import numpy as np
from neuron import h # Import NEURON
###############################################################################
#
# POPULATION CLASS
#
###############################################################################
class Pop (object):
"""
Class for/to <short description of `netpyne.network.pop.Pop`>
"""
def __init__(self, label, tags):
self.tags = tags # list of tags/attributes of population (eg. numCells, cellModel,...)
self.tags['pop'] = label
self.cellGids = [] # list of cell gids beloging to this pop
self._setCellClass() # set type of cell
self.rand = h.Random() # random number generator
def _distributeCells(self, numCellsPop):
"""
Distribute cells across compute nodes using round-robin
"""
from .. import sim
hostCells = {}
for i in range(sim.nhosts):
hostCells[i] = []
for i in range(numCellsPop):
hostCells[sim.nextHost].append(i)
sim.nextHost+=1
if sim.nextHost>=sim.nhosts:
sim.nextHost=0
if sim.cfg.verbose:
print(("Distributed population of %i cells on %s hosts: %s, next: %s"%(numCellsPop,sim.nhosts,hostCells,sim.nextHost)))
return hostCells
def createCells(self):
"""
Function to instantiate Cell objects based on the characteristics of this population
"""
# add individual cells
if 'cellsList' in self.tags:
cells = self.createCellsList()
# create cells based on fixed number of cells
elif 'numCells' in self.tags:
cells = self.createCellsFixedNum()
# create cells based on density (optional ynorm-dep)
elif 'density' in self.tags:
cells = self.createCellsDensity()
# create cells based on density (optional ynorm-dep)
elif 'gridSpacing' in self.tags:
cells = self.createCellsGrid()
# not enough tags to create cells
else:
self.tags['numCells'] = 1
print('Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % (self.tags['pop']))
cells = self.createCellsFixedNum()
return cells
def createCellsFixedNum (self):
"""
Create population cells based on fixed number of cells
"""
from .. import sim
cells = []
self.rand.Random123(self.tags['numCells'], sim.net.lastGid, sim.cfg.seeds['loc'])
self.rand.uniform(0, 1)
vec = h.Vector(self.tags['numCells']*3)
vec.setrand(self.rand)
randLocs = np.array(vec).reshape(self.tags['numCells'], 3) # create random x,y,z locations
if sim.net.params.shape == 'cylinder':
# Use the x,z random vales
rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi)
x = (1 + sqrt(rho) * cos(phi))/2.0
z = (1 + sqrt(rho) * sin(phi))/2.0
randLocs[:,0] = x
randLocs[:,2] = z
elif sim.net.params.shape == 'ellipsoid':
# Use the x,y,z random vales
rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi)
costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist
theta = arccos(costheta) # obtain theta from cos(theta)
x = (1 + rho * cos(phi) * sin(theta))/2.0
y = (1 + rho * sin(phi) * sin(theta))/2.0
z = (1 + rho * cos(theta))/2.0
randLocs[:,0] = x
randLocs[:,1] = y
randLocs[:,2] = z
for icoord, coord in enumerate(['x', 'y', 'z']):
if coord+'Range' in self.tags: # if user provided absolute range, convert to normalized
self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
# constrain to range set by user
if coord+'normRange' in self.tags: # if normalized range, rescale random locations
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
numCells = int(sim.net.params.scale * self.tags['numCells'])
for i in self._distributeCells(numCells)[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = randLocs[i,0] # set x location (um)
cellTags['ynorm'] = randLocs[i,1] # set y location (um)
cellTags['znorm'] = randLocs[i,2] # set z location (um)
cellTags['x'] = sim.net.params.sizeX * randLocs[i,0] # set x location (um)
cellTags['y'] = sim.net.params.sizeY * randLocs[i,1] # set y location (um)
cellTags['z'] = sim.net.params.sizeZ * randLocs[i,2] # set z location (um)
if 'spkTimes' in self.tags: # if VecStim, copy spike times to params
if isinstance(self.tags['spkTimes'][0], list):
try:
cellTags['params']['spkTimes'] = self.tags['spkTimes'][i] # 2D list
except:
pass
else:
cellTags['params']['spkTimes'] = self.tags['spkTimes'] # 1D list (same for all)
if self.tags.get('diversity', False): # if pop has cell diversity
cellTags['fraction'] = float(i)/float(numCells)
if 'dynamicRates' in self.tags: # if NetStim, copy rates array to params
if 'rates' in self.tags['dynamicRates'] and 'times' in self.tags['dynamicRates']:
if isinstance(self.tags['dynamicRates']['rates'][0], list):
try:
cellTags['params']['rates'] = [self.tags['dynamicRates']['rates'][i], self.tags['dynamicRates']['times']] # 2D list
except:
pass
else:
cellTags['params']['rates'] = [self.tags['dynamicRates']['rates'], self.tags['dynamicRates']['times']] # 1D list (same for all)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, sim.net.params.scale * self.tags['numCells']-1, gid, self.tags['pop'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + self.tags['numCells']
return cells
def createCellsDensity (self):
"""
Create population cells based on density
"""
from .. import sim
cells = []
shape = sim.net.params.shape
sizeX = sim.net.params.sizeX
sizeY = sim.net.params.sizeY
sizeZ = sim.net.params.sizeZ
# calculate volume
if shape == 'cuboid':
volume = sizeY/1e3 * sizeX/1e3 * sizeZ/1e3
elif shape == 'cylinder':
volume = sizeY/1e3 * sizeX/1e3/2 * sizeZ/1e3/2 * pi
elif shape == 'ellipsoid':
volume = sizeY/1e3/2.0 * sizeX/1e3/2.0 * sizeZ/1e3/2.0 * pi * 4.0 / 3.0
for coord in ['x', 'y', 'z']:
if coord+'Range' in self.tags: # if user provided absolute range, convert to normalized
self.tags[coord+'normRange'] = [point / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
if coord+'normRange' in self.tags: # if normalized range, rescale volume
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
volume = volume * (maxv-minv)
funcLocs = None # start with no locations as a function of density function
if isinstance(self.tags['density'], basestring): # check if density is given as a function
if shape == 'cuboid': # only available for cuboids
strFunc = self.tags['density'] # string containing function
strVars = [var for var in ['xnorm', 'ynorm', 'znorm'] if var in strFunc] # get list of variables used
if not len(strVars) == 1:
print('Error: density function (%s) for population %s does not include "xnorm", "ynorm" or "znorm"'%(strFunc,self.tags['pop']))
return
coordFunc = strVars[0]
lambdaStr = 'lambda ' + coordFunc +': ' + strFunc # convert to lambda function
densityFunc = eval(lambdaStr)
minRange = self.tags[coordFunc+'Range'][0]
maxRange = self.tags[coordFunc+'Range'][1]
interval = 0.001 # interval of location values to evaluate func in order to find the max cell density
maxDensity = max(list(map(densityFunc, (np.arange(minRange, maxRange, interval))))) # max cell density
maxCells = volume * maxDensity # max number of cells based on max value of density func
self.rand.Random123(int(maxDensity), sim.net.lastGid, sim.cfg.seeds['loc'])
locsAll = minRange + ((maxRange-minRange)) * np.array([self.rand.uniform(0, 1) for i in range(int(maxCells))]) # random location values
locsProb = np.array(list(map(densityFunc, locsAll))) / maxDensity # calculate normalized density for each location value (used to prune)
allrands = np.array([self.rand.uniform(0, 1) for i in range(len(locsProb))]) # create an array of random numbers for checking each location pos
makethiscell = locsProb>allrands # perform test to see whether or not this cell should be included (pruning based on density func)
funcLocs = [locsAll[i] for i in range(len(locsAll)) if i in np.array(makethiscell.nonzero()[0],dtype='int')] # keep only subset of yfuncLocs based on density func
self.tags['numCells'] = len(funcLocs) # final number of cells after pruning of location values based on density func
if sim.cfg.verbose: print('Volume=%.2f, maxDensity=%.2f, maxCells=%.0f, numCells=%.0f'%(volume, maxDensity, maxCells, self.tags['numCells']))
else:
print('Error: Density functions are only implemented for cuboid shaped networks')
exit(0)
else: # NO ynorm-dep
self.tags['numCells'] = int(self.tags['density'] * volume) # = density (cells/mm^3) * volume (mm^3)
# calculate locations of cells
self.rand.Random123(self.tags['numCells'], sim.net.lastGid, sim.cfg.seeds['loc'])
self.rand.uniform(0, 1)
vec = h.Vector(self.tags['numCells']*3)
vec.setrand(self.rand)
randLocs = np.array(vec).reshape(self.tags['numCells'], 3) # create random x,y,z locations
if sim.net.params.shape == 'cylinder':
# Use the x,z random vales
rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi)
x = (1 + sqrt(rho) * cos(phi))/2.0
z = (1 + sqrt(rho) * sin(phi))/2.0
randLocs[:,0] = x
randLocs[:,2] = z
elif sim.net.params.shape == 'ellipsoid':
# Use the x,y,z random vales
rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi)
costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist
theta = arccos(costheta) # obtain theta from cos(theta)
x = (1 + rho * cos(phi) * sin(theta))/2.0
y = (1 + rho * sin(phi) * sin(theta))/2.0
z = (1 + rho * cos(theta))/2.0
randLocs[:,0] = x
randLocs[:,1] = y
randLocs[:,2] = z
for icoord, coord in enumerate(['x', 'y', 'z']):
if coord+'normRange' in self.tags: # if normalized range, rescale random locations
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
if funcLocs and coordFunc == coord+'norm': # if locations for this coordinate calculated using density function
randLocs[:,icoord] = funcLocs
if sim.cfg.verbose and not funcLocs: print('Volume=%.4f, density=%.2f, numCells=%.0f'%(volume, self.tags['density'], self.tags['numCells']))
for i in self._distributeCells(self.tags['numCells'])[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = randLocs[i,0] # calculate x location (um)
cellTags['ynorm'] = randLocs[i,1] # calculate y location (um)
cellTags['znorm'] = randLocs[i,2] # calculate z location (um)
cellTags['x'] = sizeX * randLocs[i,0] # calculate x location (um)
cellTags['y'] = sizeY * randLocs[i,1] # calculate y location (um)
cellTags['z'] = sizeZ * randLocs[i,2] # calculate z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose:
print(('Cell %d/%d (gid=%d) of pop %s, pos=(%2.f, %2.f, %2.f), on node %d, '%(i, self.tags['numCells']-1, gid, self.tags['pop'],cellTags['x'], cellTags['y'], cellTags['z'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + self.tags['numCells']
return cells
def createCellsList (self):
"""
Create population cells based on list of individual cells
"""
from .. import sim
cells = []
self.tags['numCells'] = len(self.tags['cellsList'])
for i in self._distributeCells(len(self.tags['cellsList']))[sim.rank]:
#if 'cellModel' in self.tags['cellsList'][i]:
# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags.update(self.tags['cellsList'][i]) # add tags specific to this cells
for coord in ['x','y','z']:
if coord in cellTags: # if absolute coord exists
cellTags[coord+'norm'] = cellTags[coord]/getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
elif coord+'norm' in cellTags: # elif norm coord exists
cellTags[coord] = cellTags[coord+'norm']*getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
else:
cellTags[coord+'norm'] = cellTags[coord] = 0
if 'cellModel' in self.tags.keys() and self.tags['cellModel'] == 'Vecstim': # if VecStim, copy spike times to params
cellTags['params']['spkTimes'] = self.tags['cellsList'][i]['spkTimes']
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %d, on node %d, '%(i, self.tags['numCells']-1, gid, i, sim.rank)))
sim.net.lastGid = sim.net.lastGid + len(self.tags['cellsList'])
return cells
def createCellsGrid (self):
"""
Create population cells based on fixed number of cells
"""
from .. import sim
cells = []
rangeLocs = [[0, getattr(sim.net.params, 'size'+coord)] for coord in ['X','Y','Z']]
for icoord, coord in enumerate(['x', 'y', 'z']):
# constrain to range set by user
if coord+'normRange' in self.tags: # if normalized range, convert to normalized
self.tags[coord+'Range'] = [float(point) * getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'normRange']]
if coord+'Range' in self.tags: # if user provided absolute range, calculate range
self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
rangeLocs[icoord] = [self.tags[coord+'Range'][0], self.tags[coord+'Range'][1]]
gridSpacing = self.tags['gridSpacing']
gridLocs = []
if isinstance(gridSpacing, list):
for x in np.arange(rangeLocs[0][0], rangeLocs[0][1]+1, gridSpacing[0]):
for y in np.arange(rangeLocs[1][0], rangeLocs[1][1]+1, gridSpacing[1]):
for z in np.arange(rangeLocs[2][0], rangeLocs[2][1]+1, gridSpacing[2]):
gridLocs.append((x, y, z))
else:
for x in np.arange(rangeLocs[0][0], rangeLocs[0][1]+1, gridSpacing):
for y in np.arange(rangeLocs[1][0], rangeLocs[1][1]+1, gridSpacing):
for z in np.arange(rangeLocs[2][0], rangeLocs[2][1]+1, gridSpacing):
gridLocs.append((x, y, z))
numCells = len(gridLocs)
for i in self._distributeCells(numCells)[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = gridLocs[i][0] / sim.net.params.sizeX # set x location (um)
cellTags['ynorm'] = gridLocs[i][1] / sim.net.params.sizeY # set y location (um)
cellTags['znorm'] = gridLocs[i][2] / sim.net.params.sizeZ # set z location (um)
cellTags['x'] = gridLocs[i][0] # set x location (um)
cellTags['y'] = gridLocs[i][1] # set y location (um)
cellTags['z'] = gridLocs[i][2] # set z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, numCells, gid, self.tags['pop'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + numCells
return cells
def _setCellClass (self):
"""
Set cell class (CompartCell, PointCell, etc)
"""
from .. import sim
# Check whether it's a NeuroML2 based cell
if 'originalFormat' in self.tags:
if self.tags['originalFormat'] == 'NeuroML2':
self.cellModelClass = sim.NML2Cell
if self.tags['originalFormat'] == 'NeuroML2_SpikeSource':
self.cellModelClass = sim.NML2SpikeSource
else:
# set cell class: CompartCell for compartmental cells of PointCell for point neurons (NetStims, IntFire1,...)
try: # check if cellModel corresponds to an existing point process mechanism; if so, use PointCell
tmp = getattr(h, self.tags['cellModel'])
self.cellModelClass = sim.PointCell
excludeTags = ['pop', 'cellModel', 'cellType', 'numCells', 'density', 'cellsList',
'xRange', 'yRange', 'zRange', 'xnormRange', 'ynormRange', 'znormRange', 'vref', 'spkTimes', 'dynamicRates']
params = {k: v for k,v in self.tags.items() if k not in excludeTags}
self.tags['params'] = params
for k in self.tags['params']: self.tags.pop(k)
sim.net.params.popTagsCopiedToCells.append('params')
except:
if getattr(self.tags, 'cellModel', None) in ['NetStim', 'DynamicNetStim', 'VecStim', 'IntFire1', 'IntFire2', 'IntFire4']:
print('Warning: could not find %s point process mechanism required for population %s' % (self.tags['cellModel'], self.tags['pop']))
self.cellModelClass = sim.CompartCell # otherwise assume has sections and some cellParam rules apply to it; use CompartCell
def calcRelativeSegCoords(self):
"""Calculate segment coordinates from 3d point coordinates
Used for LFP calc (one per population cell; assumes same morphology)"""
from .. import sim
localPopGids = list(set(sim.net.gid2lid.keys()).intersection(set(self.cellGids)))
if localPopGids:
cell = sim.net.cells[sim.net.gid2lid[localPopGids[0]]]
else:
return -1
ix = 0 # segment index
p3dsoma = cell.getSomaPos()
nseg = sum([sec['hObj'].nseg for sec in list(cell.secs.values())])
p0 = np.zeros((3, nseg)) # hold the coordinates of segment starting points
p1 = np.zeros((3, nseg)) # hold the coordinates of segment end points
d0 = np.zeros(nseg)
d1 = np.zeros(nseg)
for sec in list(cell.secs.values()):
hSec = sec['hObj']
hSec.push()
n3d = int(h.n3d()) # get number of n3d points in each section
p3d = np.zeros((3, n3d)) # to hold locations of 3D morphology for the current section
l3d = np.zeros(n3d) # to hold locations of 3D morphology for the current section
diam3d = np.zeros(n3d) # to diameters
for i in range(n3d):
p3d[0, i] = h.x3d(i) - p3dsoma[0]
p3d[1, i] = h.y3d(i) - p3dsoma[1] # shift coordinates such to place soma at the origin.
p3d[2, i] = h.z3d(i) - p3dsoma[2]
diam3d[i] = h.diam3d(i)
l3d[i] = h.arc3d(i)
l3d /= hSec.L # normalize
nseg = hSec.nseg
l0 = np.zeros(nseg) # keep range of segment starting point
l1 = np.zeros(nseg) # keep range of segment ending point
for iseg, seg in enumerate(hSec):
l0[iseg] = seg.x - 0.5*1/nseg # x (normalized distance along the section) for the beginning of the segment
l1[iseg] = seg.x + 0.5*1/nseg # x for the end of the segment
p0[0, ix:ix+nseg] = np.interp(l0, l3d, p3d[0, :])
p0[1, ix:ix+nseg] = np.interp(l0, l3d, p3d[1, :])
p0[2, ix:ix+nseg] = np.interp(l0, l3d, p3d[2, :])
d0[ix:ix+nseg] = np.interp(l0, l3d, diam3d[:])
p1[0, ix:ix+nseg] = np.interp(l1, l3d, p3d[0, :])
p1[1, ix:ix+nseg] = np.interp(l1, l3d, p3d[1, :])
p1[2, ix:ix+nseg] = np.interp(l1, l3d, p3d[2, :])
d1[ix:ix+nseg] = np.interp(l1, l3d, diam3d[:])
ix += nseg
h.pop_section()
self._morphSegCoords = {}
self._morphSegCoords['p0'] = p0
self._morphSegCoords['p1'] = p1
self._morphSegCoords['d0'] = d0
self._morphSegCoords['d1'] = d1
return self._morphSegCoords
def __getstate__ (self):
"""Removes non-picklable h objects so can be pickled and sent via py_alltoall
"""
from .. import sim
odict = self.__dict__.copy() # copy the dict since we change it
odict = sim.replaceFuncObj(odict) # replace h objects with None so can be pickled
#odict['cellModelClass'] = str(odict['cellModelClass'])
del odict['cellModelClass']
del odict['rand']
return odict
|
{"hexsha": "fa0f88640632bfc0f79539a80dfff764b32da6e2", "size": 25337, "ext": "py", "lang": "Python", "max_stars_repo_path": "netpyne/network/pop.py", "max_stars_repo_name": "FernandoSBorges/netpyne", "max_stars_repo_head_hexsha": "e1a7adb56b94aa78f8461397319eb4e9754c2d75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "netpyne/network/pop.py", "max_issues_repo_name": "FernandoSBorges/netpyne", "max_issues_repo_head_hexsha": "e1a7adb56b94aa78f8461397319eb4e9754c2d75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "netpyne/network/pop.py", "max_forks_repo_name": "FernandoSBorges/netpyne", "max_forks_repo_head_hexsha": "e1a7adb56b94aa78f8461397319eb4e9754c2d75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.8775100402, "max_line_length": 200, "alphanum_fraction": 0.5663653945, "include": true, "reason": "import numpy,from numpy", "num_tokens": 6715}
|
# this follows `markdown.jl` but spurred by bugs/issues
function inter(st::String)
steps = explore_md_steps(st)
return steps[:inter_md].inter_md, steps[:inter_html].inter_html
end
@testset "issue163" begin
st = raw"""A _B `C` D_ E""" * J.EOS
imd, ih = inter(st)
@test imd == "A _B ##JDINSERT## D_ E"
@test ih == "<p>A <em>B ##JDINSERT## D</em> E</p>\n"
st = raw"""A _`B` C D_ E""" * J.EOS
imd, ih = inter(st)
@test imd == "A _ ##JDINSERT## C D_ E"
@test ih == "<p>A <em>##JDINSERT## C D</em> E</p>\n"
st = raw"""A _B C `D`_ E""" * J.EOS
imd, ih = inter(st)
@test imd == "A _B C ##JDINSERT## _ E"
@test ih == "<p>A <em>B C ##JDINSERT##</em> E</p>\n"
st = raw"""A _`B` C `D`_ E""" * J.EOS
imd, ih = inter(st)
@test imd == "A _ ##JDINSERT## C ##JDINSERT## _ E"
@test ih == "<p>A <em>##JDINSERT## C ##JDINSERT##</em> E</p>\n"
end
@testset "TOC" begin
J.CUR_PATH[] = "pages/ff/aa.md"
h = raw"""
\toc
## Hello `jd`
#### weirdly nested
### Goodbye!
## Done
done.
""" * J.EOS |> seval
@test isapproxstr(h, raw"""
<div class="jd-toc">
<ol>
<li>
<a href="/pub/ff/aa.html#hello_jd">Hello <code>jd</code></a>
<ol>
<li><ol><li><a href="/pub/ff/aa.html#weirdly_nested">weirdly nested</a></li></ol></li>
<li><a href="/pub/ff/aa.html#goodbye">Goodbye!</a></li>
</ol>
</li>
<li><a href="/pub/ff/aa.html#done">Done</a></li>
</ol>
</div>
<h2 id="hello_jd"><a href="/pub/ff/aa.html#hello_jd">Hello <code>jd</code></a></h2>
<h4 id="weirdly_nested"><a href="/pub/ff/aa.html#weirdly_nested">weirdly nested</a></h4>
<h3 id="goodbye"><a href="/pub/ff/aa.html#goodbye">Goodbye!</a></h3>
<h2 id="done"><a href="/pub/ff/aa.html#done">Done</a></h2>done.
""")
end
@testset "TOC" begin
J.CUR_PATH[] = "pages/ff/aa.md"
s = raw"""
@def mintoclevel = 2
@def maxtoclevel = 3
\toc
# A
## B
#### C
### D
## E
### F
done.
""" |> seval
@test isapproxstr(s, raw"""
<div class="jd-toc">
<ol>
<li><a href="/pub/ff/aa.html#b">B</a>
<ol>
<li><a href="/pub/ff/aa.html#d">D</a></li>
</ol>
</li>
<li><a href="/pub/ff/aa.html#e">E</a>
<ol>
<li><a href="/pub/ff/aa.html#f">F</a></li>
</ol>
</li>
</ol>
</div>
<h1 id="a"><a href="/pub/ff/aa.html#a">A</a></h1>
<h2 id="b"><a href="/pub/ff/aa.html#b">B</a></h2>
<h4 id="c"><a href="/pub/ff/aa.html#c">C</a></h4>
<h3 id="d"><a href="/pub/ff/aa.html#d">D</a></h3>
<h2 id="e"><a href="/pub/ff/aa.html#e">E</a></h2>
<h3 id="f"><a href="/pub/ff/aa.html#f">F</a></h3> done.
""")
end
|
{"hexsha": "72604427c2f096f5bf7dcbbefefba8af0c850bf1", "size": 3119, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/converter/markdown2.jl", "max_stars_repo_name": "davidanthoff/JuDoc.jl", "max_stars_repo_head_hexsha": "9435d89859d5e3b434af43aedb482053ba5eef09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/converter/markdown2.jl", "max_issues_repo_name": "davidanthoff/JuDoc.jl", "max_issues_repo_head_hexsha": "9435d89859d5e3b434af43aedb482053ba5eef09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/converter/markdown2.jl", "max_forks_repo_name": "davidanthoff/JuDoc.jl", "max_forks_repo_head_hexsha": "9435d89859d5e3b434af43aedb482053ba5eef09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8265306122, "max_line_length": 102, "alphanum_fraction": 0.4376402693, "num_tokens": 1123}
|
module a
implicit none
contains
subroutine b()
print *, "b()"
call c()
contains
subroutine c()
print *, 5
end subroutine c
end subroutine b
end module
program nested_02
use a, only: b
implicit none
call b()
end
|
{"hexsha": "f3e60f0cd77dafe0966afd6d9aa82acfd9098762", "size": 229, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "integration_tests/nested_02.f90", "max_stars_repo_name": "Thirumalai-Shaktivel/lfortran", "max_stars_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 316, "max_stars_repo_stars_event_min_datetime": "2019-03-24T16:23:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:28:33.000Z", "max_issues_repo_path": "integration_tests/nested_02.f90", "max_issues_repo_name": "Thirumalai-Shaktivel/lfortran", "max_issues_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-29T04:58:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-04T16:40:06.000Z", "max_forks_repo_path": "integration_tests/nested_02.f90", "max_forks_repo_name": "Thirumalai-Shaktivel/lfortran", "max_forks_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2019-03-28T19:40:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:28:55.000Z", "avg_line_length": 9.5416666667, "max_line_length": 20, "alphanum_fraction": 0.6812227074, "num_tokens": 64}
|
import numpy as np
import math
import grid_generate as GridGen
import estimate_method as EstMeth
import frequency_oracle as FreOra
import itertools
import choose_granularity
class AG_Uniform_Grid_1_2_way_optimal:
def __init__(self, args = None):
self.args = args
self.group_attribute_num = 2 # to construct 2-D grids
self.group_num = 0
self.AG = [] # attribute_group
self.Grid_set = []
self.answer_list = []
self.weighted_update_answer_list = []
self.granularity = None # granularity g2
self.granularity_1_way = None # granularity g1
self.LDP_mechanism_list_divide_user = [] # LDP mechanism for each attribute group
self.set_granularity_1_2_way()
def set_granularity_1_2_way(self):
chooseGran = choose_granularity.choose_granularity_beta(args= self.args)
tmp_g1 = chooseGran.get_1_way_granularity_for_HDG(ep= self.args.epsilon)
tmp_g2 = chooseGran.get_2_way_granularity_for_HDG(ep= self.args.epsilon)
self.granularity_1_way = chooseGran.get_rounding_to_pow_2(gran= tmp_g1)
self.granularity = chooseGran.get_rounding_to_pow_2(gran= tmp_g2)
self.args.granularity_1_way = self.granularity_1_way
self.args.granularity = self.granularity
def judge_sub_attribute_in_attribute_group(self, sub_attribute = None, attribute_group:list = None):
if sub_attribute in attribute_group:
return True
else:
return False
def get_C_W_list(self, sub_attribute_value = None, sub_attribute = None, relevant_attribute_group_list:list = None):
C_list = np.zeros(self.args.group_num)
C_reci_list = np.zeros(self.args.group_num)
for i in relevant_attribute_group_list:
tmp_grid = self.Grid_set[i]
if len(tmp_grid.attribute_set) == 1:
C_list[i] = self.args.granularity_1_way // self.args.granularity
else:
C_list[i] = self.args.granularity
C_reci_list[i] = 1.0 / C_list[i]
return C_list, C_reci_list
def get_T_A_a(self, sub_attribute_value = None, sub_attribute = None, relevant_attribute_group_list:list = None, C_reci_list = None):
sum_C_reci_list = sum(C_reci_list)
sum_T_V_i_a = 0
for i in relevant_attribute_group_list:
T_V_i_a = 0
tmp_grid = self.Grid_set[i]
if len(tmp_grid.attribute_set) == 1:
left_interval_1_way = sub_attribute_value * (self.args.granularity_1_way // self.args.granularity)
right_interval_1_way = (sub_attribute_value + 1) * (self.args.granularity_1_way // self.args.granularity) - 1
k = left_interval_1_way
while k <= right_interval_1_way:
tmp_cell = tmp_grid.cell_list[k]
T_V_i_a += tmp_cell.consistent_count
k += 1
else:
sub_attribute_index_in_grid = tmp_grid.attribute_set.index(sub_attribute)
for tmp_cell in tmp_grid.cell_list:
if tmp_cell.dimension_index_list[sub_attribute_index_in_grid] == sub_attribute_value:
T_V_i_a += tmp_cell.consistent_count
sum_T_V_i_a += (C_reci_list[i] * T_V_i_a)
T_A_a = sum_T_V_i_a / sum_C_reci_list
return T_A_a
def get_consistency_for_sub_attribute(self, sub_attribute = None):
relevant_attribute_group_list = []
for i in range(self.group_num):
if self.judge_sub_attribute_in_attribute_group(sub_attribute, self.AG[i]):
relevant_attribute_group_list.append(i)
sub_attribute_domain = range(self.args.granularity) # need to be changed for 3-way attribute group
for sub_attribute_value in sub_attribute_domain:
C_list, C_reci_list = self.get_C_W_list(sub_attribute_value, sub_attribute, relevant_attribute_group_list)
T_A_a = self.get_T_A_a(sub_attribute_value, sub_attribute, relevant_attribute_group_list, C_reci_list)
for i in relevant_attribute_group_list: #update T_V_i_c
T_V_i_a = 0
T_V_i_c_cell_list = []
tmp_grid = self.Grid_set[i]
if len(tmp_grid.attribute_set) == 1:
left_interval_1_way = sub_attribute_value * (self.args.granularity_1_way // self.args.granularity)
right_interval_1_way = (sub_attribute_value + 1) * (self.args.granularity_1_way // self.args.granularity) - 1
k = left_interval_1_way
while k <= right_interval_1_way:
tmp_cell = tmp_grid.cell_list[k]
T_V_i_c_cell_list.append(k)
T_V_i_a += tmp_cell.consistent_count
k += 1
else:
sub_attribute_index_in_grid = tmp_grid.attribute_set.index(sub_attribute)
for k in range(len(tmp_grid.cell_list)):
tmp_cell = tmp_grid.cell_list[k]
if tmp_cell.dimension_index_list[sub_attribute_index_in_grid] == sub_attribute_value:
T_V_i_c_cell_list.append(k)
T_V_i_a += tmp_cell.consistent_count
for k in T_V_i_c_cell_list:
tmp_cell = tmp_grid.cell_list[k]
tmp_cell.consistent_count = tmp_cell.consistent_count + (T_A_a - T_V_i_a) * C_reci_list[i]
return
def overall_consistency(self):
for i in range(self.args.attribute_num):
self.get_consistency_for_sub_attribute(i)
return
def get_consistent_Grid_set(self):
for tmp_grid in self.Grid_set:
tmp_grid.get_consistent_grid()
self.overall_consistency()
for i in range(self.args.consistency_iteration_num_max):
for tmp_grid in self.Grid_set:
tmp_grid.get_consistent_grid_iteration()
self.overall_consistency()
# end with the Non-Negativity step
for tmp_grid in self.Grid_set:
tmp_grid.get_consistent_grid_iteration()
return
#*************consistency end*******************************
def weighted_update_iteration(self, grid_1_way_list = None, grid_2_way = None):
# update using 1_way
for tmp_grid_1_way in grid_1_way_list:
tmp_1_way_attribute = tmp_grid_1_way.attribute_set[0]
tmp_1_way_attribute_index = grid_2_way.attribute_set.index(tmp_1_way_attribute)
for i in range(len(tmp_grid_1_way.cell_list)):
tmp_cell = tmp_grid_1_way.cell_list[i]
lower_bound = tmp_cell.left_interval_list[0]
upper_bound = tmp_cell.right_interval_list[0] + 1
if tmp_1_way_attribute_index == 0:
tmp_sum = np.sum(grid_2_way.weighted_update_matrix[lower_bound:upper_bound, :])
if tmp_sum == 0:
continue
grid_2_way.weighted_update_matrix[lower_bound:upper_bound, :] = grid_2_way.weighted_update_matrix[lower_bound:upper_bound, :] / tmp_sum * tmp_cell.consistent_count
else:
tmp_sum = np.sum(grid_2_way.weighted_update_matrix[:, lower_bound:upper_bound])
if tmp_sum == 0:
continue
grid_2_way.weighted_update_matrix[:, lower_bound:upper_bound] = grid_2_way.weighted_update_matrix[:, lower_bound:upper_bound] / tmp_sum * tmp_cell.consistent_count
# normalization
grid_2_way.weighted_update_matrix = grid_2_way.weighted_update_matrix / np.sum(grid_2_way.weighted_update_matrix) * self.args.user_num
# update using 2_way
for tmp_cell in grid_2_way.cell_list:
x_lower_bound = tmp_cell.left_interval_list[0]
x_upper_bound = tmp_cell.right_interval_list[0] + 1
y_lower_bound = tmp_cell.left_interval_list[1]
y_upper_bound = tmp_cell.right_interval_list[1] + 1
tmp_sum = np.sum(grid_2_way.weighted_update_matrix[x_lower_bound:x_upper_bound, y_lower_bound:y_upper_bound])
if tmp_sum == 0:
continue
grid_2_way.weighted_update_matrix[x_lower_bound:x_upper_bound, y_lower_bound:y_upper_bound] = grid_2_way.weighted_update_matrix[x_lower_bound:x_upper_bound, \
y_lower_bound:y_upper_bound] / tmp_sum * tmp_cell.consistent_count
# normalization
grid_2_way.weighted_update_matrix = grid_2_way.weighted_update_matrix / np.sum(grid_2_way.weighted_update_matrix) * self.args.user_num
return
def get_weight_update_for_2_way_group(self):
for tmp_grid in self.Grid_set:
if len(tmp_grid.attribute_set) == 2:
grid_1_way_list = []
for tmp_grid_1_way in self.Grid_set:
if len(tmp_grid_1_way.attribute_set) == 1 and tmp_grid_1_way.attribute_set[0] in tmp_grid.attribute_set:
grid_1_way_list.append(tmp_grid_1_way)
tmp_grid.weighted_update_matrix = np.zeros((self.args.domain_size, self.args.domain_size))
# initialize
tmp_grid.weighted_update_matrix[:,:] = self.args.user_num / (self.args.domain_size * self.args.domain_size)
for i in range(self.args.weighted_update_iteration_num_max):
weighted_update_matrix_before = np.copy(tmp_grid.weighted_update_matrix)
self.weighted_update_iteration(grid_1_way_list, tmp_grid)
weighted_update_matrix_delta = np.sum(np.abs(tmp_grid.weighted_update_matrix - weighted_update_matrix_before))
if weighted_update_matrix_delta < 1:
break
return
def generate_attribute_group(self):
attribute_group_list = []
attribute_list = [i for i in range(self.args.attribute_num)]
for tmp_attribute in attribute_list:
attribute_group_list.append((tmp_attribute,))
attribute_group_2_way_list = list(itertools.combinations(attribute_list, self.group_attribute_num))
for tmp_attribute_group_2_way in attribute_group_2_way_list:
attribute_group_list.append(tmp_attribute_group_2_way)
self.group_num = len(attribute_group_list)
self.args.group_num = self.group_num
self.AG = attribute_group_list
for i in range(len(self.AG)):
self.AG[i] = list(self.AG[i])
def group_attribute(self):
self.generate_attribute_group()
return
def construct_Grid_set(self):
for i in range(self.group_num):
if len(self.AG[i]) == 1:
tmp_Grid = GridGen.UniformGrid(self.AG[i], granularity= self.granularity_1_way, args= self.args)
else:
tmp_Grid = GridGen.UniformGrid(self.AG[i], granularity= self.granularity, args= self.args)
tmp_Grid.Grid_index = i
tmp_Grid.Main()
self.Grid_set.append(tmp_Grid)
return
def get_user_record_in_attribute_group(self, user_record_i, attribute_group: int = None):
user_record_in_attribute_group = []
for tmp in self.AG[attribute_group]:
user_record_in_attribute_group.append(user_record_i[tmp])
return user_record_in_attribute_group
def get_LDP_Grid_set_divide_user(self, user_record):
print("HDG is working...")
self.LDP_mechanism_list_divide_user = [] # intialize for each time to randomize user data
for j in range(self.group_num): # initialize LDP mechanism for each attribute group
tmp_Grid = self.Grid_set[j] # the i-th Grid
tmp_domain_size = len(tmp_Grid.cell_list)
tmp_LDR = FreOra.OUE(domain_size=tmp_domain_size, epsilon= self.args.epsilon, sampling_factor=self.group_num, args=self.args)
# tmp_LDR = FreOra.OLH(domain_size=tmp_domain_size, epsilon= self.args.epsilon, sampling_factor=self.group_num, args=self.args)
self.LDP_mechanism_list_divide_user.append(tmp_LDR)
for i in range(self.args.user_num):
tmp_user_granularity = math.ceil(self.args.user_num / self.group_num)
group_index_of_user = i // tmp_user_granularity
j = group_index_of_user
# to count the user num of each group
self.LDP_mechanism_list_divide_user[j].group_user_num += 1
tmp_Grid = self.Grid_set[j]
user_record_in_attribute_group_j = self.get_user_record_in_attribute_group(user_record[i], j)
tmp_real_cell_index = tmp_Grid.get_cell_index_from_attribute_value_set(user_record_in_attribute_group_j)
tmp_LDP_mechanism = self.LDP_mechanism_list_divide_user[j]
tmp_LDP_mechanism.operation_perturb(tmp_real_cell_index)
# update the perturbed_count of each cell
for j in range(self.group_num):
tmp_LDP_mechanism = self.LDP_mechanism_list_divide_user[j]
tmp_LDP_mechanism.operation_aggregate()
tmp_Grid = self.Grid_set[j] # the j-th Grid
for k in range(len(tmp_Grid.cell_list)):
tmp_Grid.cell_list[k].perturbed_count = tmp_LDP_mechanism.aggregated_count[k]
return
def judge_sub_attribute_list_in_attribute_group(self, sub_attribute_list, attribute_group):
if len(sub_attribute_list) == 1:
return False
flag = True
for sub_attribute in sub_attribute_list:
if sub_attribute not in attribute_group:
flag = False
break
return flag
def get_answer_range_query_attribute_group_list(self, selected_attribute_list):
answer_range_query_attribute_group_index_list = []
answer_range_query_attribute_group_list = []
for tmp_Grid in self.Grid_set:
#note that here we judge if tmp_Grid.attribute_set belongs to selected_attribute_list
if self.judge_sub_attribute_list_in_attribute_group(tmp_Grid.attribute_set, selected_attribute_list):
answer_range_query_attribute_group_index_list.append(tmp_Grid.Grid_index)
answer_range_query_attribute_group_list.append(tmp_Grid.attribute_set)
return answer_range_query_attribute_group_index_list, answer_range_query_attribute_group_list
def answer_range_query(self, range_query):
t_Grid_ans = []
answer_range_query_attribute_group_index_list, answer_range_query_attribute_group_list = \
self.get_answer_range_query_attribute_group_list(range_query.selected_attribute_list)
for k in answer_range_query_attribute_group_index_list:
tmp_Grid = self.Grid_set[k]
Grid_range_query_attribute_node_list = []
for tmp_attribute in tmp_Grid.attribute_set:
Grid_range_query_attribute_node_list.append(range_query.query_attribute_node_list[tmp_attribute])
t_Grid_ans.append(tmp_Grid.answer_range_query_with_weight_update_matrix(Grid_range_query_attribute_node_list))
if range_query.query_dimension == self.group_attribute_num: # answer the 2-way marginal
tans_weighted_update = t_Grid_ans[0]
else:
tt = EstMeth.EsimateMethod(args= self.args)
tans_weighted_update = tt.weighted_update(range_query, answer_range_query_attribute_group_list, t_Grid_ans)
return tans_weighted_update
def answer_range_query_list(self, range_query_list):
self.weighted_update_answer_list = []
for tmp_range_query in range_query_list:
tans_weighted_update = self.answer_range_query(tmp_range_query)
self.weighted_update_answer_list.append(tans_weighted_update)
return
|
{"hexsha": "031a077c992e5e52d6be7681ca35d8686fce976f", "size": 15980, "ext": "py", "lang": "Python", "max_stars_repo_path": "HDG.py", "max_stars_repo_name": "YangJianyu-bupt/privmdr", "max_stars_repo_head_hexsha": "c4b68c87aeeb811ae8c42db511704fd3cc258a3c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-09-16T18:51:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T09:55:31.000Z", "max_issues_repo_path": "HDG.py", "max_issues_repo_name": "YangJianyu-bupt/privmdr", "max_issues_repo_head_hexsha": "c4b68c87aeeb811ae8c42db511704fd3cc258a3c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HDG.py", "max_forks_repo_name": "YangJianyu-bupt/privmdr", "max_forks_repo_head_hexsha": "c4b68c87aeeb811ae8c42db511704fd3cc258a3c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-28T06:52:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T07:48:54.000Z", "avg_line_length": 49.6273291925, "max_line_length": 183, "alphanum_fraction": 0.6648310388, "include": true, "reason": "import numpy", "num_tokens": 3471}
|
import re
import os
import pandas as pd
import numpy as np
import datetime
# from sklearn.feature_extraction import DictVectorizer
# from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.preprocessing import Imputer
import pyodbc
from config import Config
conf = Config()
INPUT_DIR = conf.input_folder
OUTPUT_DIR = conf.output_folder
MODEL_LOCATION = conf.LDAModel_path
connection_str = conf.get_db_conn()
cnxn = pyodbc.connect(connection_str)
cursor = cnxn.cursor()
def unescape(s):
s = str(s)
s = re.sub(r"<.*?>", "", s)
s = s.replace("<", "less")
s = s.replace(">", "greater")
# this has to be last:
s = s.replace("&", "and")
s = s.replace("x0D", "")
s = s.replace("\n", " ")
s = s.replace("\r", " ")
s = s.replace("*", " ")
s = s.replace("&#;", '')
s = s.replace("\t", "")
s = s.replace("(", "")
s = s.replace(")", "")
s = s.replace("null,", "")
s = s.replace('\'', "")
# next step removes all characters other than alphanumerical and space
s = re.sub('[^A-Za-z0-9,.[space]]+', '', s)
s = re.sub('[ \t]+', ' ', s)
return s
cursor.execute(""" select distinct claimid, Ext_MethodOfEntry into #All_Auto_Claims_base from
(select claimid, Ext_MethodOfEntry, (case when PrimaryCoverage='10004' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10047' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10047' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='10047' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10048' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10049' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10049' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10049' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10050' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10050' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10051' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10051' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10051' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10052' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10053' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10054' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10057' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10058' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10060' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10060' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10061' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10063' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10064' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10064' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10064' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10064' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10064' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10065' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10065' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10066' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10066' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10066' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10067' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10067' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10069' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10071' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10071' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10078' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10078' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10081' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10082' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10085' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10087' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='10088' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10089' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10089' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10090' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10091' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10091' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10092' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10092' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10094' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10095' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10095' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10160' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10214' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10278' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10278' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10279' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10280' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10280' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10281' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10282' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10283' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10286' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10287' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10287' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10287' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10287' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10287' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10288' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10288' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10288' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10288' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10290' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10290' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10290' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10290' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10292' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10292' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10293' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10294' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10295' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10295' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10296' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10296' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10298' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10299' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10304' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='10318' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='10318' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10319' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10320' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10367' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10367' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10367' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10367' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10368' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10369' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10370' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10371' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10371' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10372' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10372' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10373' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10374' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10374' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10374' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10375' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10376' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10376' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10380' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10387' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10388' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10388' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10389' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10389' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10390' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10631' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10633' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10633' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10634' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10634' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10634' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10635' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10635' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10637' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10638' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10638' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10639' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10639' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10640' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10640' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10642' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10643' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10646' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10649' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10653' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10653' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10654' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10654' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10655' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10656' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10656' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10656' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10656' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10656' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10657' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10660' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10661' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10661' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10663' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10665' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10666' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10667' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10684' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10684' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10684' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10685' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10685' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10687' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10769' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10794' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10794' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10794' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10795' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10795' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10795' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10800' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10800' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10800' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10801' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10801' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10801' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10840' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10847' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10849' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='10895' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10895' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10896' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10896' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10896' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10903' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10905' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10981' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10982' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10982' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='10984' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10985' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10986' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10987' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10989' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='10999' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='10999' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='10999' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11000' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11000' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='11000' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11001' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11002' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='11002' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11003' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='11122' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='11122' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11143' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11143' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11144' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11208' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11225' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11225' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11248' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='11249' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='11258' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11258' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11264' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11265' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11265' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='11266' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11266' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11267' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11268' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11268' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11269' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11269' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11270' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11271' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11271' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='11273' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11273' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11274' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='11274' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11275' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='11275' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11276' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='11276' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11276' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11365' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11365' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11374' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11374' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11497' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='11614' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11634' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11716' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11724' and exposuretype_desc='General' then 'Auto'
when PrimaryCoverage='11726' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11767' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11880' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11881' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11882' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11883' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='11884' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='11885' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='11886' and exposuretype_desc='Property' then 'Auto'
when PrimaryCoverage='11887' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='11889' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='Bodily Injury' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='Med Pay' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='PIP' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='Towing and Labor' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='Vehicle' then 'Auto'
when PrimaryCoverage='' and exposuretype_desc='Vehicle' then 'Auto' end) as lob
from (select claim.id claimid,
exposure.CoverageID,
exposure.PrimaryCoverage,
exposure.ExposureType,
cctl_exposuretype.[DESCRIPTION] as exposuretype_desc,
claim.Ext_MethodOfEntry
from GWCC_ProdCopy.dbo.cc_exposure exposure inner join GWCC_ProdCopy.dbo.cc_claim claim
on claim.id=exposure.claimid
left join GWCC_ProdCopy.dbo.cctl_exposuretype
on exposure.exposuretype= GWCC_ProdCopy.dbo.cctl_exposuretype.ID
where claim.CloseDate is null ) as base) as base2
where lob='Auto' """)
ALL_AUTO_CLAIMS2 = pd.read_sql(("""select * from #All_Auto_Claims_base """), cnxn)
get_claim_details = """select distinct base_claim.CLAIMID,cc_claim.ClaimNumber,LOSSCAUSEID,
LOSSTYPEID,LOSSLOCATIONCITY,
LOSSLOCATIONCOUNTY,LOSSLOCATIONSTATEID,LOSSLOCATIONZIP,LOSSLOCATIONCOUNTRYID,
cc_claim.DESCRIPTION,LITIGATIONSTATUSID,
LITIGATIONDATE,SUBROGATIONSTATUSID,CLAIMCURRENCYID,
CLAIMSTATEID,base_claim.REOPENDATE,CLAIMANTFIRSTNAME,CLAIMANTLASTNAME,
CLAIMANTCOMPANYNAME,CLAIMANTTYPEID,REOPENEDREASONID,
cc_claim.closedate as CLAIMCLOSEDATE,LASTUPDATED,CLAIMTIERID,
base_claim.RETIRED,EXT_CLAIMSOURCESYSTEMID,
base_claim.EXT_SOURCECLAIMNUMBER,
EXT_VENDORNAMEID,
JURISDICTIONSTATEID,base_claim.LOBCODE,
CLAIMINFOID,base_claim.EXT_SOURCECATCODE,
base_claim.AGENCYID, base_claim.EXT_POLICYSOURCESYS,ETL_ASOFDATE,CLOSEDOUTCOMEID ,base_claim.WEATHERRELATED,
base_claim.ASSIGNEDGROUPID,base_claim.POLICYID,
base_claim.EXT_INSUREDREPORTINGCD,LOSSLOCATIONADDRESSLINE1,
LOSSLOCATIONADDRESSLINE2,LOSSLOCATIONADDRESSLINE3,base_claim.WEATHER,base_claim.CREATETIME,
CLAIMANTMIDDLENAME,
base_claim.FAULT,base_claim.FAULTRATING,
Auto_claims.Ext_MethodOfEntry, cc_claim.reporteddate, cc_claim.lossdate,
datediff(day, cc_claim.LossDate, cc_claim.ReportedDate) AS Report_lag,
cc_claim.Ext_ResponsibleParty,
cctl_ext_responsibleparty.[DESCRIPTION] AS responsibleparty_desc,
cc_claim.LossLocationID,
cc_claim.LocationOfTheft,
cctl_locationoftheft.[DESCRIPTION] as LocationOfTheft_desc,
cc_claim.Ext_LocationofLoss,
cctl_ext_locationofloss.[DESCRIPTION] as ext_locationofloss_desc,
cc_claim.LitigationStatus,
cctl_litigationstatus.[DESCRIPTION] as litigationstatus_desc,
cc_claim.ClaimantDenormID,
cc_claim.InsuredDenormID,
upper(INS.NAME) AS INSURED_NAME,
upper(CLM.NAME) AS CLAIMANT_NAME
from GWCC_Datamart_ProdCopy.dbo.c_claim_d base_claim
inner join
#All_Auto_Claims_base Auto_claims
on base_claim.CLAIMID = Auto_claims.CLAIMID
left join
GWCC_Prodcopy.dbo.cc_claim cc_claim
on base_claim.CLAIMID = cc_claim.id
left join GWCC_ProdCopy.dbo.cctl_ext_responsibleparty cctl_ext_responsibleparty
on cc_claim.ext_responsibleparty=cctl_ext_responsibleparty.ID
left join GWCC_ProdCopy.dbo.cctl_locationoftheft
on cc_claim.LocationOfTheft=GWCC_ProdCopy.dbo.cctl_locationoftheft.ID
left join GWCC_ProdCopy.dbo.cctl_litigationstatus
on cc_claim.litigationstatus=GWCC_ProdCopy.dbo.cctl_litigationstatus.ID
left join GWCC_ProdCopy.dbo.cctl_ext_locationofloss
on cc_claim.ext_locationofloss=GWCC_ProdCopy.dbo.cctl_ext_locationofloss.ID
left join (select * from
GWCC_DataMart_ProdCopy.dbo.o_cc_contact
where ETL_ISCURRENTRECORD = 1) INS
on cc_claim.InsuredDenormID=INS.id
left join (select * from
GWCC_DataMart_ProdCopy.dbo.o_cc_contact
where ETL_ISCURRENTRECORD = 1) CLM
on cc_claim.ClaimantDenormID=CLM.id
WHERE base_claim.ETL_ISCURRENTRECORD = 1 """
CLAIM_BASE = pd.read_sql(get_claim_details, cnxn)
EXPOSURE_BASE = pd.read_sql(("""select distinct base_exposure.CLAIMID as CLAIMID_exp, base_exposure.EXPOSUREID ,base_exposure.EXPOSURESTATEID ,base_exposure.
EXPOSURETYPEID ,base_exposure.
CLAIMORDER ,base_exposure.EXPOSURECLOSEDATE ,base_exposure.CLOSEDOUTCOMEID ,base_exposure.LOSSCATEGORYID ,base_exposure.
LOSSPARTYID ,base_exposure.EXPOSUREREOPENDATE ,base_exposure.REOPENEDREASONID ,base_exposure.SETTLEDATE ,base_exposure.SETTLEMETHODID ,base_exposure.INJURYTYPEID ,base_exposure.
DETAILEDINJURYTYPEID ,base_exposure.CLAIMANTFIRSTNAME ,base_exposure.CLAIMANTLASTNAME ,base_exposure.CLAIMANTCOMPANY ,
exposure.ClaimantType as Claimanttypeid ,
cctl_claimanttype.[DESCRIPTION]ASClaimantType_DESC ,base_exposure.
EXPOSURETIERID ,base_exposure.CREATEDATE ,base_exposure.EXT_CLOSEDWITHOUTPAY ,base_exposure.ETL_ASOFDATE ,base_exposure.INCIDENTID ,base_exposure.
VEHICLEID ,base_exposure.VIN ,base_exposure.
CLAIMANTDENORMID as CLAIMANTDENORMID_dm ,base_exposure.MAKE ,base_exposure.
MODEL ,base_exposure.YEAR ,base_exposure.DATEVEHICLESOLD ,base_exposure.TOTALLOSS ,base_exposure.TOTALLOSSPOINTS ,base_exposure.Speed ,
ext_methodofentry as ext_methodofentry_exp,
exposure.ClaimantDenormID as ClaimantDenormID_exp,
upper(con.Name) as Name_exp,
exposure.PrimaryCoverage,
cctl_coveragetype.[DESCRIPTION] as coveragetype_desc,
exposure.ExposureType,
cctl_exposuretype.[DESCRIPTION] as exposuretype_desc
from GWCC_Datamart_ProdCopy.dbo.c_exposure_d base_exposure
inner join
#All_Auto_Claims_base Auto_claims
on base_exposure.CLAIMID = Auto_claims.CLAIMID
left join
(select * from
GWCC_DataMart_ProdCopy.dbo.o_cc_exposure
where ETL_ISCURRENTRECORD = 1 ) exposure
on base_exposure.exposureid=exposure.id
left join GWCC_ProdCopy.dbo.cctl_claimanttype cctl_claimanttype
on exposure.ClaimantType=cctl_claimanttype.ID
left join GWCC_ProdCopy.dbo.cctl_exposuretype
on exposure.exposuretype=GWCC_ProdCopy.dbo.cctl_exposuretype.ID
left join GWCC_ProdCopy.dbo.cctl_coveragetype
on exposure.PrimaryCoverage=GWCC_ProdCopy.dbo.cctl_coveragetype.ID
left join (select * from GWCC_DataMart_Prodcopy.dbo.o_cc_contact
where ETL_ISCURRENTRECORD =1 ) con
on exposure.ClaimantDenormID=con.id
where base_exposure.ETL_ISCURRENTRECORD = 1 """), cnxn)
COV = pd.read_csv(os.path.join(INPUT_DIR, 'coverage.csv'))
EXPOSURE_BASE_F = EXPOSURE_BASE.merge(COV, on=['PrimaryCoverage', 'exposuretype_desc'], how='left')
CLAIM_COV = EXPOSURE_BASE_F.loc[:, ['CLAIMID_exp', 'cov_rollup_3']]
CLAIM_COV_GRP = []
for name, group in CLAIM_COV[['CLAIMID_exp', 'cov_rollup_3']].groupby('CLAIMID_exp', as_index=False):
if any(g in ['Auto BI', 'PIP', 'Med Pay'] for g in group['cov_rollup_3']):
CLAIM_COV_GRP.append({'CLAIMID_exp': name, 'BI': 1})
else:
CLAIM_COV_GRP.append({'CLAIMID_exp': name, 'BI': 0})
CLAIM_COV_GRP = pd.DataFrame(CLAIM_COV_GRP)
EXPOSURE_BASE_FINAL = EXPOSURE_BASE_F.merge(CLAIM_COV_GRP, on=['CLAIMID_exp'], how='left')
CLAIM_BASE_FINAL = CLAIM_BASE.merge(CLAIM_COV_GRP, right_on=['CLAIMID_exp'], left_on=['CLAIMID'], how='left')
CLAIM_BASE_FINAL = CLAIM_BASE_FINAL.drop_duplicates()
INCIDENT_BASE_FINAL = pd.read_sql(("""select base_incident.CLAIMID ,base_incident.ID as incidentid ,base_incident.Ext_LossPartyType ,
base_incident.VehicleID ,base_incident.VehicleLossParty ,
base_incident.VehicleType ,base_incident.Description ,base_incident.VehiclePolStatus ,base_incident.DateSalvageAssigned ,
base_incident.OwnerRetainingSalvage ,base_incident.Ext_DidAccidentInvolve ,base_incident.Ext_DriverNumber ,base_incident.Ext_IsVehPhysicalDamaged ,
base_incident.Ext_VehDamageEstDetermined ,base_incident.UpdateTime ,base_incident.Subtype ,base_incident.Ext_LowImpactIncident ,base_incident.CreateTime ,
base_incident.VehicleDriveable,base_incident.Collision,
base_incident.Ext_RearEndCollision,
veh_type.[DESCRIPTION] as vehicletype_desc
from GWCC_Datamart_ProdCopy.dbo.o_cc_incident base_incident
inner join #All_Auto_Claims_base Auto_claims
on base_incident.CLAIMID = Auto_claims.CLAIMID
left join
GWCC_ProdCopy.dbo.cctl_vehicletype veh_type
on base_incident.vehicletype=veh_type.id
where base_incident.ETL_ISCURRENTRECORD = 1 """), cnxn)
BODY_BASE_FINAL = pd.read_sql("""select claimid as claimid,
coalesce(count(case when primarybodypart='10001' then claimid end),0) as Body_Head,
coalesce(count(case when primarybodypart='10002' then claimid end),0) as Body_Neck,
coalesce(count(case when primarybodypart='10003' then claimid end),0) as Body_Upper_ext,
coalesce(count(case when primarybodypart='10004' then claimid end),0) as Body_Trunk,
coalesce(count(case when primarybodypart='10005' then claimid end),0) as Body_Lower_ext,
coalesce(count(case when primarybodypart='10006' then claimid end),0) as Body_Unknown,
coalesce(count(case when primarybodypart='10007' then claimid end),0) as Body_Multiple
from (select a.claimid, a.id as incidentid, b.primarybodypart
from GWCC_DataMart_ProdCopy.dbo.o_cc_incident a left join GWCC_Prodcopy.dbo.cc_bodypart b
on a.ID=b.IncidentID
where a.ClaimID in (select distinct ClaimID from #All_Auto_Claims_base)
) a
group by ClaimID """, cnxn)
PAYMENT_BASE_FINAL = pd.read_sql(("""select a.claimid
,sum(case when b.TRANSACTIONTYPE_TYPECODE = 'Reserve' then b.TRANSACTIONAMOUNT else 0 end) as RESERVE,
sum(case when b.TRANSACTIONTYPE_TYPECODE = 'Payment' then b.TRANSACTIONAMOUNT else 0 end) as PAYMENT
from #All_Auto_Claims_base as a
left join
GWCC_Prodcopy.dbo.cc_claim as cc_claim
on a.claimid = cc_claim.id
left join
(select CLAIMNUMBER, ISSUEDATE, TRANSACTIONAMOUNT, TRANSACTIONTYPE_TYPECODE from
GWCC_DataMart_Prodcopy.dbo.EXT_TRANSACTION_F
WHERE TRANSACTIONTYPE_TYPECODE in ('Payment', 'Reserve') ) AS b
on cc_claim.CLAIMNUMBER = b.CLAIMNUMBER
group by a.claimid
order by a.claimid """), cnxn)
CONTRIBFACTOR_BASE_FINAL = pd.read_sql((""" select distinct a.claimid, a.pricontributingfactors , c.[description] as contrib_desc
from GWCC_ProdCopy.dbo.cc_contribfactor as a
inner join
#All_Auto_Claims_base as b
on a.claimid = b.claimid
left join GWCC_Prodcopy.dbo.cctl_pricontributingfactors c
on a.PriContributingFactors=c.ID
order by a.claimid """), cnxn)
CC_POLICY_BASE_FINAL = pd.read_sql((""" select PolicyNumber, POL.ID AS POLICYID, c.claimid, EffectiveDate, OrigEffectiveDate, ExpirationDate,PolicyType,
ReportingDate,Ext_CustomerNumber,UnderwritingGroup,TotalVehicles
from GWCC_ProdCopy.dbo.cc_policy pol
inner join
(select policyid, claimid from GWCC_ProdCopy.dbo.cc_claim as cc_claim
inner join #All_Auto_Claims_base as b
on cc_claim.id = b.claimid ) as c
on pol.id = c.policyid
where year(pol.effectivedate)>=1984 """), cnxn)
CC_POLICY_ALL_FINAL = pd.read_sql(("""select distinct ID as POLICYID,POLICYNUMBER, EFFECTIVEDATE, ORIGEFFECTIVEDATE, EXPIRATIONDATE,POLICYTYPE
from GWCC_ProdCopy.dbo.cc_policy pol
"""), cnxn)
CC_EXPOSURE_ALL_FINAL = pd.read_sql(("""select distinct ID as EXPOSUREID, CLAIMID, CLAIMANTDENORMID AS CLAIMANTDENORMID_EXP,
LOSSPARTY
from GWCC_ProdCopy.dbo.cc_EXPOSURE
"""), cnxn)
CC_CLAIM_ALL_FINAL = pd.read_sql(("""select distinct ID AS CLAIMID,CLAIMNUMBER, REPORTEDDATE, POLICYID, CLAIMANTDENORMID, INSUREDDENORMID
from GWCC_ProdCopy.dbo.cc_claiM pol
"""), cnxn)
CC_CONTACT_BASE_FINAL = pd.read_sql(("""
select DISTINCT ID as contactid,
DateOfBirth,
Gender,
Occupation,
upper(Name) as Name,
upper(FirstName) as FirstName,
upper(LastName) as LastName,
primaryaddressID
from GWCC_Prodcopy.dbo.cc_contact con"""), cnxn)
CC_ADDRESS_BASE_FINAL = pd.read_sql(("""select ID as addressid, AddressLine1 as contact_address, city as contact_city,
PostalCode as contact_postalcode, State as contact_state
from
gwcc_prodcopy.dbo.cc_address cc_address
inner join
(select primaryaddressID from gwcc_prodcopy.dbo.cc_contact as a
inner join
(select distinct CLAIMANTDENORMID from GWCC_Datamart_ProdCopy.dbo.C_EXPOSURE_D as a
inner join
#All_Auto_Claims_base as b
on a.CLAIMID = b.claimid ) c
on a.ID = c.CLAIMANTDENORMID ) as d
on cc_address.ID = d.PrimaryAddressID """), cnxn)
AVG_NOTES_PER_DAY = pd.read_sql(("""select a.claimid , (a.cnt_notes*1.0/b.days_open*1.0)*1.0 avg_notes_perday from
(select claimid, count(*) cnt_notes
from GWCC_ProdCopy.dbo.cc_note a
where claimid in (select claimid from #All_Auto_Claims_base )
and YEAR(authoringdate) = year(GETDATE()) and month(AuthoringDate) = month(GETDATE())
group by claimid ) a
left join
(select id,
case when (year(reporteddate)*100+month(reporteddate)) != (year(GETDATE())*100+month(GETDATE()))
then day(getdate()) else DATEDIFF(day, ReportedDate, getdate()) end as days_open
from GWCC_ProdCopy.dbo.cc_claim
where id in (select claimid from #All_Auto_Claims_base )) b
on a.claimid = b.id
order by (a.cnt_notes/b.days_open) desc """), cnxn)
cursor.execute("""SELECT a.CLAIMID, STUFF(
(SELECT ',' + b.Body
FROM [GWCC_Prodcopy].[dbo].[cc_note] b
WHERE a.claimID = b.claimID
order by claimid,authoringdate asc
FOR XML PATH('')),1,1,'') AS Adjuster_Notes
into #AN_REFERRED
FROM #All_Auto_Claims_base a""")
UNST_DATA_PROD = pd.read_sql(("""select * from #AN_REFERRED """), cnxn)
UNST_DATA_PROD['Adjuster_Notes'] = UNST_DATA_PROD['Adjuster_Notes'].apply(unescape)
# Converting all column names into upper case
UNST_DATA_PROD.columns = [col.upper() for col in UNST_DATA_PROD.columns]
CLAIM_BASE_FINAL.columns = [col.upper() for col in CLAIM_BASE_FINAL.columns]
EXPOSURE_BASE_FINAL.columns = [col.upper() for col in EXPOSURE_BASE_FINAL.columns]
INCIDENT_BASE_FINAL.columns = [col.upper() for col in INCIDENT_BASE_FINAL.columns]
BODY_BASE_FINAL.columns = [col.upper() for col in BODY_BASE_FINAL.columns]
PAYMENT_BASE_FINAL.columns = [col.upper() for col in PAYMENT_BASE_FINAL.columns]
CONTRIBFACTOR_BASE_FINAL.columns = [col.upper() for col in CONTRIBFACTOR_BASE_FINAL.columns]
CC_POLICY_BASE_FINAL.columns = [col.upper() for col in CC_POLICY_BASE_FINAL.columns]
CC_CONTACT_BASE_FINAL.columns = [col.upper() for col in CC_CONTACT_BASE_FINAL.columns]
CC_ADDRESS_BASE_FINAL.columns = [col.upper() for col in CC_ADDRESS_BASE_FINAL.columns]
ALL_AUTO_CLAIMS2.columns = [col.upper() for col in ALL_AUTO_CLAIMS2.columns]
COV.columns = [col.upper() for col in COV.columns]
AVG_NOTES_PER_DAY.columns = [col.upper() for col in AVG_NOTES_PER_DAY.columns]
# Normalizing text to lower
UNST_DATA_PROD['ADJUSTER_NOTES'] = UNST_DATA_PROD['ADJUSTER_NOTES'].astype(str).apply(str.lower)
red_flags = {'fire_flags': ['FIRE', 'EXPLOSION', 'BLAST', 'ARSON'],
'theft_flags': ['THEFT', 'BURGLARY', 'STEAL', 'STOLE', 'ROBBERY', 'LARCENY'],
'total_loss_flags': ['TOTALLOSS', 'TOTAL LOSS'],
'attorney_flags': ['ATTORNEY', 'LAWYER', 'COUNSEL', 'ADVOCATE', 'LITIGATION'],
'chiro_unst_flags': ['CHIROPRACTOR', 'PHYSIOTHERAPY', 'CHIROPRACTIC']}
UNST_DATA_PROD['FIRE_UNST_F'] = UNST_DATA_PROD['ADJUSTER_NOTES'].apply(
lambda x: 1 if any(flag.lower() in x for flag in red_flags['fire_flags']) else 0)
UNST_DATA_PROD['THEFT_UNST_F'] = UNST_DATA_PROD['ADJUSTER_NOTES'].apply(
lambda x: 1 if any(flag.lower() in x for flag in red_flags['theft_flags']) else 0)
UNST_DATA_PROD['TOTALLOSS_UNST_F'] = UNST_DATA_PROD['ADJUSTER_NOTES'].apply(
lambda x: 1 if any(flag.lower() in x for flag in red_flags['total_loss_flags']) else 0)
UNST_DATA_PROD['ATTORNEY_UNST_F'] = UNST_DATA_PROD['ADJUSTER_NOTES'].apply(
lambda x: 1 if any(flag.lower() in x for flag in red_flags['attorney_flags']) else 0)
UNST_DATA_PROD['CHIRO_UNST_F'] = UNST_DATA_PROD['ADJUSTER_NOTES'].apply(
lambda x: 1 if any(flag.lower() in x for flag in red_flags['chiro_unst_flags']) else 0)
CLAIM_BASE_FINAL['REPORTEDDATE'] = pd.to_datetime(CLAIM_BASE_FINAL['REPORTEDDATE'])
CLAIM_BASE_FINAL['FINAL_YR_MNTH'] = CLAIM_BASE_FINAL['REPORTEDDATE'].map(lambda x: 100 * x.year + x.month)
CLAIM_EXP_FINAL = pd.merge(CLAIM_BASE_FINAL, EXPOSURE_BASE_FINAL, how='left', left_on='CLAIMID', right_on='CLAIMID_EXP')
CLAIM_INC_FINAL = pd.merge(CLAIM_BASE_FINAL, INCIDENT_BASE_FINAL, how='left', left_on='CLAIMID', right_on='CLAIMID')
CLAIM_EXP_INC_FINAL = pd.merge(CLAIM_EXP_FINAL, INCIDENT_BASE_FINAL, how='left', left_on='INCIDENTID',
right_on='INCIDENTID')
dummy_col2 = ['CLAIMID_x', 'EXPOSUREID', 'INCIDENTID', 'CLAIMANTDENORMID', 'LOSSPARTYID', 'FINAL_YR_MNTH',
'COV_ROLLUP_3',
'CLAIMANTTYPEID_y', 'VEHICLETYPE']
AUTO_DATA_EXP_AGG = CLAIM_EXP_INC_FINAL[dummy_col2]
AUTO_DATA_EXP_AGG = AUTO_DATA_EXP_AGG.drop_duplicates()
denorm_unique = pd.DataFrame(AUTO_DATA_EXP_AGG[['CLAIMID_x', 'CLAIMANTTYPEID_y', 'CLAIMANTDENORMID']].groupby(
['CLAIMID_x', 'CLAIMANTTYPEID_y'], as_index=False)['CLAIMANTDENORMID'].apply(
lambda x: len(x.unique()))).reset_index()
denorm_unique['CLAIMANTTYPEID_y'] = denorm_unique['CLAIMANTTYPEID_y'].astype(int)
denorm_unique = denorm_unique.groupby(['CLAIMID_x', 'CLAIMANTTYPEID_y', 'CLAIMANTDENORMID']).count().reset_index()
denorm_unique = denorm_unique.pivot(index='CLAIMID_x', columns='CLAIMANTTYPEID_y',
values='CLAIMANTDENORMID').reset_index().fillna(0).drop(0, axis=1)
cols = ['CLAIMANTDENORMID_{}'.format(col) for col in denorm_unique.columns if str(col).startswith('1')]
cols.insert(0, 'CLAIMID_x')
denorm_unique.columns = cols
AUTO_DATA_EXP_AGG['INSURED_NEW_V'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10001.0 and x.VEHICLETYPE == 10003.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['TP_NEW_V'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10002.0 and x.VEHICLETYPE == 10003.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['INSURED_RENTED'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10001.0 and x.VEHICLETYPE in [10005.0, 10006.0] else 0, axis=1)
AUTO_DATA_EXP_AGG['TP_RENTED'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10002.0 and x.VEHICLETYPE in [10005.0, 10006.0] else 0, axis=1)
AUTO_DATA_EXP_AGG['INSURED_OWNED'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10001.0 and x.VEHICLETYPE == 10004.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['TP_OWNED'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10002.0 and x.VEHICLETYPE == 10004.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['INSURED_LISTED'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10001.0 and x.VEHICLETYPE == 10002.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['TP_LISTED'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10002.0 and x.VEHICLETYPE == 10002.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['INSURED_OTHER'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10001.0 and x.VEHICLETYPE == 10009.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['TP_OTHER'] = AUTO_DATA_EXP_AGG.apply(
lambda x: 1 if x.LOSSPARTYID == 10001.0 and x.VEHICLETYPE == 10009.0 else 0, axis=1)
AUTO_DATA_EXP_AGG['EXPOSUREID'] = AUTO_DATA_EXP_AGG['EXPOSUREID'].astype(str)
AUTO_DATA_EXP_AGG['INCIDENTID'] = AUTO_DATA_EXP_AGG['INCIDENTID'].astype(str)
AUTO_DATA_EXP_AGG2 = AUTO_DATA_EXP_AGG.groupby(['CLAIMID_x'], as_index=False).agg(
{'INSURED_NEW_V': np.sum,
'TP_NEW_V': np.sum,
'INSURED_RENTED': np.sum,
'TP_RENTED': np.sum,
'INSURED_OWNED': np.sum,
'TP_OWNED': np.sum,
'INSURED_LISTED': np.sum,
'TP_LISTED': np.sum,
'TP_OTHER': np.sum,
'EXPOSUREID': pd.Series.nunique,
'INCIDENTID': pd.Series.nunique})
AUTO_DATA_EXP_AGG2 = AUTO_DATA_EXP_AGG2.merge(denorm_unique, on='CLAIMID_x')
CLAIM_BASE_FINAL2 = pd.merge(CLAIM_BASE_FINAL,
AUTO_DATA_EXP_AGG[['CLAIMID_x', 'CLAIMANTDENORMID_10001', 'CLAIMANTDENORMID_10002',
'CLAIMANTDENORMID_10003', 'CLAIMANTDENORMID_10004',
'CLAIMANTDENORMID_10005', 'CLAIMANTDENORMID_10006',
'CLAIMANTDENORMID_10007', 'CLAIMANTDENORMID_10008',
'CLAIMANTDENORMID_10009', 'CLAIMANTDENORMID_10010',
'CLAIMANTDENORMID_10011', 'CLAIMANTDENORMID_10014', 'INSURED_NEW_V',
'TP_NEW_V', 'INSURED_RENTED', 'TP_RENTED', 'INSURED_OWNED', 'TP_OWNED',
'INSURED_LISTED', 'TP_LISTED', 'INSURED_OTHER',
'TP_OTHER']].drop_duplicates(), how='left', left_on='CLAIMID',
right_on='CLAIMID_x')
VEH_DATA1 = CLAIM_EXP_INC_FINAL[['CLAIMID_x', 'EXPOSUREID', 'FINAL_YR_MNTH', 'LOSSPARTYID', 'LOSSDATE', 'YEAR']]
VEH_DATA1['LOSSDATE'] = pd.to_datetime(VEH_DATA1['LOSSDATE'])
VEH_DATA1['V_AGE'] = VEH_DATA1['LOSSDATE'].map(lambda X: X.year) - VEH_DATA1['YEAR']
VEH_DATA1['LOSSPARTYID'] = VEH_DATA1['LOSSPARTYID'].fillna(0)
VEH_DATA1['FP_V_AGE'] = VEH_DATA1.apply(lambda x: x.V_AGE if int(x.LOSSPARTYID) == 10001 else 0, axis=1)
VEH_DATA1['TP_V_AGE'] = VEH_DATA1.apply(lambda x: x.V_AGE if int(x.LOSSPARTYID) == 10002 else 0, axis=1)
VEH_DATA2 = VEH_DATA1.groupby(['CLAIMID_x'], as_index=False).agg({'FP_V_AGE': np.max, 'TP_V_AGE': np.max})
VEH_DATA2['FP_V_AGE'] = VEH_DATA2['FP_V_AGE'].fillna(4)
VEH_DATA2['TP_V_AGE'] = VEH_DATA2['TP_V_AGE'].fillna(4)
VEH_DATA2['FP_V_AGE'] = VEH_DATA2['FP_V_AGE'].apply(lambda X: 0 if X < 0 else X)
VEH_DATA2['TP_V_AGE'] = VEH_DATA2['TP_V_AGE'].apply(lambda X: 0 if X < 0 else X)
VEH_DATA2['FP_V_AGE'] = VEH_DATA2['FP_V_AGE'].apply(lambda X: 50 if X > 50 else X)
VEH_DATA2['TP_V_AGE'] = VEH_DATA2['TP_V_AGE'].apply(lambda X: 50 if X > 50 else X)
AUTO_FINAL_DATA = pd.merge(CLAIM_BASE_FINAL, VEH_DATA2, how='left', left_on='CLAIMID', right_on='CLAIMID_x')
POLICY_DATA_BASE = CC_POLICY_BASE_FINAL
POLICY_DATA_ALL = CC_POLICY_ALL_FINAL
POLICY_DATA_ALL.columns = map(str.upper, POLICY_DATA_ALL.columns)
POLICY_DATA_BASE.columns = map(str.upper, POLICY_DATA_BASE.columns)
POLICY_DATA_BASE['EFFECTIVEDATE'] = pd.to_datetime(POLICY_DATA_BASE['EFFECTIVEDATE'])
POLICY_DATA_BASE['EXPIRATIONDATE'] = pd.to_datetime(POLICY_DATA_BASE['EXPIRATIONDATE'])
POLICY_DATA_BASE2 = pd.merge(POLICY_DATA_BASE, POLICY_DATA_ALL, on='POLICYNUMBER')
del POLICY_DATA_BASE['CLAIMID'] # not required as we have PolicyID to join on
AUTO_FINAL_DATA3 = pd.merge(AUTO_FINAL_DATA, POLICY_DATA_BASE, how='left', on='POLICYID')
AUTO_FINAL_DATA3['REPORTEDDATE'] = pd.to_datetime(AUTO_FINAL_DATA3['REPORTEDDATE'])
AUTO_FINAL_DATA3['EXPIRATIONDATE'] = pd.to_datetime(AUTO_FINAL_DATA3['EXPIRATIONDATE'])
AUTO_FINAL_DATA3['LOSSDATE'] = pd.to_datetime(AUTO_FINAL_DATA3['LOSSDATE'])
AUTO_FINAL_DATA3['PC_ISSUE_REPORT_LAG'] = (AUTO_FINAL_DATA3.REPORTEDDATE - AUTO_FINAL_DATA3.EFFECTIVEDATE).astype(
'timedelta64[D]')
AUTO_FINAL_DATA3['PC_REPORT_EXPRN_LAG'] = (AUTO_FINAL_DATA3.EXPIRATIONDATE - AUTO_FINAL_DATA3.REPORTEDDATE).astype(
'timedelta64[D]')
AUTO_FINAL_DATA3['PC_LOSS_EXPRN_LAG'] = (AUTO_FINAL_DATA3.EXPIRATIONDATE - AUTO_FINAL_DATA3.LOSSDATE).astype(
'timedelta64[D]')
AUTO_FINAL_DATA3['PC_REPORT_EXPRN_LAG'] = AUTO_FINAL_DATA3['PC_REPORT_EXPRN_LAG'].fillna(0)
AUTO_FINAL_DATA3['PC_ISSUE_REPORT_LAG'] = AUTO_FINAL_DATA3['PC_ISSUE_REPORT_LAG'].fillna(0)
LOSS_DESC_DATA = CLAIM_BASE_FINAL[['CLAIMID', 'DESCRIPTION']].drop_duplicates()
LOSS_DESC_DATA['DESCRIPTION'] = LOSS_DESC_DATA['DESCRIPTION'].astype(str).apply(str.upper)
COLL_DATA2 = CLAIM_EXP_INC_FINAL[['CLAIMID_x', 'COLLISION', 'EXT_REARENDCOLLISION']].drop_duplicates()
COLL_DATA2['COLLISION2'] = COLL_DATA2['COLLISION'] + COLL_DATA2['EXT_REARENDCOLLISION']
REA_DATA = COLL_DATA2.groupby(['CLAIMID_x'], as_index=False).agg({'EXT_REARENDCOLLISION': np.sum})
COLL_DATA3 = COLL_DATA2.groupby(['CLAIMID_x'], as_index=False).agg({'COLLISION2': np.sum})
LOSS_DESC_DATA['REA'] = LOSS_DESC_DATA['DESCRIPTION'].str.contains('REA') + LOSS_DESC_DATA['DESCRIPTION'].str.contains(
'REAR')
LOSS_DESC_DATA['COLLISION'] = LOSS_DESC_DATA['DESCRIPTION'].str.contains('REA') + LOSS_DESC_DATA[
'DESCRIPTION'].str.contains('REAR') + LOSS_DESC_DATA['DESCRIPTION'].str.contains('COLL') + LOSS_DESC_DATA[
'DESCRIPTION'].str.contains('COLI')
LOSS_DESC_DATA['THEFT_FLAG'] = LOSS_DESC_DATA['DESCRIPTION'].str.contains('THEFT') + LOSS_DESC_DATA[
'DESCRIPTION'].str.contains('STOLEN') + LOSS_DESC_DATA['DESCRIPTION'].str.contains('BURGLARY') + LOSS_DESC_DATA[
'DESCRIPTION'].str.contains('ROBBERY') + LOSS_DESC_DATA['DESCRIPTION'].str.contains(
'STEAL') + LOSS_DESC_DATA['DESCRIPTION'].str.contains('LARCENY')
LOSS_DESC_DATA['FIRE_FLAG'] = LOSS_DESC_DATA['DESCRIPTION'].str.contains('FIRE') + LOSS_DESC_DATA[
'DESCRIPTION'].str.contains('BURN') + LOSS_DESC_DATA['DESCRIPTION'].str.contains('EXPLOSION') + LOSS_DESC_DATA[
'DESCRIPTION'].str.contains('FLAME') + LOSS_DESC_DATA['DESCRIPTION'].str.contains(
'ARSON') + LOSS_DESC_DATA['DESCRIPTION'].str.contains('BLAST')
LOSS_DESC_DATA2 = pd.merge(LOSS_DESC_DATA, COLL_DATA3, how='inner', left_on='CLAIMID', right_on='CLAIMID_x')
LOSS_DESC_DATA2['COLLISION2'] = LOSS_DESC_DATA2['COLLISION2'].astype(object)
LOSS_DESC_DATA2['COLLISION_F'] = LOSS_DESC_DATA2['COLLISION'] + LOSS_DESC_DATA2['COLLISION2']
LOSS_DESC_DATA2['COLLISION_F2'] = LOSS_DESC_DATA2['COLLISION_F'].apply(lambda x: 1 if x else 0)
LOSS_DESC_DATA3 = pd.merge(LOSS_DESC_DATA2, REA_DATA, how='inner', on='CLAIMID_x')
LOSS_DESC_DATA3['REAR_END_FINAL'] = LOSS_DESC_DATA3['REA'] + LOSS_DESC_DATA3['EXT_REARENDCOLLISION']
LOSS_DESC_DATA3['REAR_END_FINAL'] = LOSS_DESC_DATA3['REAR_END_FINAL'].apply(lambda x: 1 if x else 0)
TOTALLOSS_DATA = CLAIM_EXP_INC_FINAL[['CLAIMID_x', 'TOTALLOSS']].drop_duplicates()
TOTALLOSS_DATA2 = TOTALLOSS_DATA.groupby(['CLAIMID_x'], as_index=False).agg({'TOTALLOSS': np.sum})
TOTALLOSS_DATA2['TOTALLOSS_F'] = TOTALLOSS_DATA2['TOTALLOSS'].apply(lambda x: 1 if x else 0)
LOSS_DESC_DATA4 = pd.merge(LOSS_DESC_DATA3, TOTALLOSS_DATA2, how='inner', on='CLAIMID_x')
LOW_IMPACT_DATA = CLAIM_EXP_INC_FINAL[['CLAIMID_x', 'EXT_LOWIMPACTINCIDENT']].drop_duplicates()
LOW_IMPACT_DATA2 = LOW_IMPACT_DATA.groupby(['CLAIMID_x'], as_index=False).agg({'EXT_LOWIMPACTINCIDENT': np.sum})
LOW_IMPACT_DATA2['EXT_LOWIMPACTINCIDENT_F'] = LOW_IMPACT_DATA2['EXT_LOWIMPACTINCIDENT'].apply(lambda x: 1 if x else 0)
LOSS_DESC_DATA5 = pd.merge(LOSS_DESC_DATA4, LOW_IMPACT_DATA2, how='inner', on='CLAIMID_x')
VEH_DRIVE_DATA = CLAIM_EXP_INC_FINAL[['CLAIMID_x', 'VEHICLEDRIVEABLE']].drop_duplicates()
VEH_DRIVE_DATA2 = VEH_DRIVE_DATA.groupby(['CLAIMID_x'], as_index=False).agg({'VEHICLEDRIVEABLE': np.sum})
VEH_DRIVE_DATA2['VEHICLEDRIVEABLE_F'] = VEH_DRIVE_DATA2['VEHICLEDRIVEABLE'].apply(lambda x: 1 if x else 0)
LOSS_DESC_DATA6 = pd.merge(LOSS_DESC_DATA5, VEH_DRIVE_DATA2, how='inner', on='CLAIMID_x')
LOSS_DESC_DATA7 = LOSS_DESC_DATA6[
['CLAIMID_x', 'THEFT_FLAG', 'FIRE_FLAG', 'COLLISION_F2', 'REAR_END_FINAL', 'TOTALLOSS_F', 'EXT_LOWIMPACTINCIDENT_F',
'VEHICLEDRIVEABLE_F']]
AUTO_FINAL_DATA7 = pd.merge(AUTO_FINAL_DATA3, LOSS_DESC_DATA7, how='left', left_on='CLAIMID_x', right_on='CLAIMID_x')
cursor.execute("""select claimid, matchreasons
into #iso_data
from GWCC_ProdCopy.dbo.cc_claimisomatchreport
where ClaimID in (select distinct ClaimID from #All_Auto_Claims_base)""")
##concatenating at claim level
cursor.execute("""select distinct claimid, matchreasons into #iso_distinct
from
#iso_data
select * from #iso_distinct
order by claimid""")
##for getting concatenated values of match reasons at claim level
cursor.execute("""DECLARE @inpTbl TABLE(claimid VARCHAR(100), matchreasons VARCHAR(100));
INSERT INTO @inpTbl
select * from #iso_distinct;
WITH ConvertToXMLLikeStrings AS
(
SELECT claimid, CAST('<x>' + REPLACE(matchreasons,',','</x><x>') + '</x>' AS XML) AS MyData
FROM @inpTbl AS it
)
SELECT claimid, MyData.value('x[1]','varchar(max)') AS Val1
,MyData.value('x[2]','varchar(max)') AS Val2
,MyData.value('x[3]','varchar(max)') AS Val3
,MyData.value('x[4]','varchar(max)') AS Val4
,MyData.value('x[5]','varchar(max)') AS Val5
,MyData.value('x[6]','varchar(max)') AS Val6
,MyData.value('x[7]','varchar(max)') AS Val7
into #iso_parsed
FROM ConvertToXMLLikeStrings;""")
cursor.execute("""select * into #iso_final from (
(select claimid, val1 as matchreason from #iso_parsed where Val1 is not null)
union
(select claimid, Val2 from #iso_parsed where Val2 is not null)
union
(select claimid, Val3 from #iso_parsed where Val3 is not null)
union
(select claimid, Val4 from #iso_parsed where Val4 is not null)
union
(select claimid, Val5 from #iso_parsed where Val5 is not null)
union
(select claimid, Val6 from #iso_parsed where Val6 is not null)
union
(select claimid, Val7 from #iso_parsed where Val7 is not null)
) a
order by claimid""")
cursor.execute("""SELECT a.claimid, STUFF(
(SELECT ',' + b.matchreason
FROM #iso_final b
WHERE a.claimID = b.claimID
FOR XML PATH('')),1,1,'') AS iso_concat
into #concat_iso
FROM #iso_final AS a
GROUP BY a.claimid""")
##iso_summing up
cursor.execute("""DECLARE @inpTbl1 TABLE(claimid VARCHAR(100), matchreasons VARCHAR(100));
INSERT INTO @inpTbl1
select * from #iso_data;
WITH ConvertToXMLLikeStrings AS
(
SELECT claimid, CAST('<x>' + REPLACE(matchreasons,',','</x><x>') + '</x>' AS XML) AS MyData
FROM @inpTbl1 AS it
)
SELECT claimid, MyData.value('x[1]','varchar(max)') AS Val1
,MyData.value('x[2]','varchar(max)') AS Val2
,MyData.value('x[3]','varchar(max)') AS Val3
,MyData.value('x[4]','varchar(max)') AS Val4
,MyData.value('x[5]','varchar(max)') AS Val5
,MyData.value('x[6]','varchar(max)') AS Val6
,MyData.value('x[7]','varchar(max)') AS Val7
into #iso_parsed_sum
FROM ConvertToXMLLikeStrings;""")
cursor.execute("""select * into #iso_row
from
(SELECT row_number() over( order by claimid) as row, claimid, val1, val2, val3, val4, val5, val6, val7
from #iso_parsed_sum) a""")
cursor.execute("""select distinct row, claimid
into #iso_row_distinct
from #iso_row""")
cursor.execute("""select * into #iso_final_sum from (
(select row, val1 as matchreason from #iso_row where Val1 is not null)
union
(select row, Val2 from #iso_row where Val2 is not null)
union
(select row, Val3 from #iso_row where Val3 is not null)
union
(select row, Val4 from #iso_row where Val4 is not null)
union
(select row, Val5 from #iso_row where Val5 is not null)
union
(select row, Val6 from #iso_row where Val6 is not null)
union
(select row, Val7 from #iso_row where Val7 is not null)
) a
order by row""")
cursor.execute("""select a.row, b.claimid, a.matchreason
into #iso_sum_data
from #iso_final_sum a left join #iso_row_distinct b
on a.row=b.row""")
cursor.execute("""select claimid, (case when matchreason='A' then 1 else 0 end) as ISO_address,
(case when matchreason='D' then 1 else 0 end) as ISO_DL,
(case when matchreason='L' then 1 else 0 end) as ISO_Licenseplatenumber,
(case when matchreason='LL' then 1 else 0 end) as ISO_Losslocation,
(case when matchreason='N' then 1 else 0 end) as ISO_Name,
(case when matchreason='P' then 1 else 0 end) as ISO_Phone,
(case when matchreason='S' then 1 else 0 end) as ISO_SSN,
(case when matchreason='V' then 1 else 0 end) as ISO_VIN
into #iso_flag_sum
from #iso_sum_data""")
cursor.execute("""select claimid,
SUM(ISO_address) as ISO_address,
SUM(iso_dl) as iso_dl,
SUM(ISO_Licenseplatenumber) as ISO_Licenseplatenumber,
SUM(ISO_Losslocation) as ISO_Losslocation,
SUM(ISO_Name) as ISO_Name,
SUM(ISO_Phone) as ISO_Phone,
SUM(ISO_SSN) as ISO_SSN,
SUM(ISO_VIN) as ISO_VIN
into #iso_matchreason_f
from #iso_flag_sum
group by claimid""")
ISO_DATA = pd.read_sql(("""select * from #iso_matchreason_f """), cnxn)
ISO_DATA.columns = [col.upper() for col in ISO_DATA.columns]
AUTO_FINAL_DATA10 = pd.merge(AUTO_FINAL_DATA7, ISO_DATA, how='left', on='CLAIMID')
AUTO_FINAL_DATA12 = pd.merge(AUTO_FINAL_DATA10, PAYMENT_BASE_FINAL, how='left', on=['CLAIMID'])
cursor.execute("""select ClaimID, PriContributingFactors INTO #CONTRIB
from GWCC_ProdCopy.dbo.cc_contribfactor
WHERE ClaimID IN (SELECT DISTINCT ClaimID FROM #All_Auto_Claims_base)""")
cursor.execute("""select claimid, (case when PriContributingFactors in ('10077','10078','10079') then 1 else 0 end) as glass_damage,
(case when PriContributingFactors in ('10153','10024') then 1 else 0 end) as rear_end_pc,
(case when PriContributingFactors in ('10094') then 1 else 0 end) as intersection_accident,
(case when PriContributingFactors in ('10083','10154','10169') then 1 else 0 end) as speed_headon_racing,
(case when PriContributingFactors in ('10141') then 1 else 0 end) as parked_vehicle,
(case when PriContributingFactors in ('10069') then 1 else 0 end) as following_closely
into #contri_factor
from #CONTRIB""")
cursor.execute("""select ClaimID,
sum(glass_damage) as glass_damage,
sum(rear_end_pc) as rear_end_pc,
sum(intersection_accident) as intersection_accident,
sum(speed_headon_racing) as speed_headon_racing,
sum(parked_vehicle) as parked_vehicle,
sum(following_closely) as following_closely
into #claim_contri_1
from #contri_factor
group by ClaimID""")
cursor.execute("""select ClaimID,
(case when glass_damage=0 then 0 else 1 end) as glass_damage,
(case when rear_end_pc=0 then 0 else 1 end) as rear_end_pc,
(case when intersection_accident=0 then 0 else 1 end) as intersection_accident,
(case when speed_headon_racing=0 then 0 else 1 end) as speed_headon_racing,
(case when parked_vehicle=0 then 0 else 1 end) as parked_vehicle,
(case when following_closely=0 then 0 else 1 end) as following_closely
into #claim_contri
from #claim_contri_1""")
CONTRIBFACTOR_17 = pd.read_sql(("""select * from #claim_contri """), cnxn)
CONTRIBFACTOR_17.columns = [col.upper() for col in CONTRIBFACTOR_17.columns]
AUTO_FINAL_DATA15 = pd.merge(AUTO_FINAL_DATA12, CONTRIBFACTOR_17, how='left', on=['CLAIMID'])
AUTO_FINAL_DATA16 = pd.merge(AUTO_FINAL_DATA15, BODY_BASE_FINAL, how='left', on='CLAIMID')
AVG_NOTES_PER_DAY.columns = [col.upper() for col in AVG_NOTES_PER_DAY.columns]
AUTO_FINAL_DATA18 = pd.merge(AUTO_FINAL_DATA16, AVG_NOTES_PER_DAY, how='left', left_on='CLAIMID', right_on='CLAIMID')
CC_CLAIM_EXPOSURE = pd.merge(CC_CLAIM_ALL_FINAL, CC_EXPOSURE_ALL_FINAL, how='left', on='CLAIMID')
CC_CLAIM_EXPOSURE_POLICY = pd.merge(CC_CLAIM_EXPOSURE, CC_POLICY_ALL_FINAL, how='left', on='POLICYID')
CC_CLAIM_EXPOSURE_POLICY_CON_1 = pd.merge(CC_CLAIM_EXPOSURE_POLICY, CC_CONTACT_BASE_FINAL[['CONTACTID', 'NAME']],
how='left', left_on='CLAIMANTDENORMID_EXP', right_on='CONTACTID')
CC_CLAIM_EXPOSURE_POLICY_CON_2 = pd.merge(CC_CLAIM_EXPOSURE_POLICY_CON_1, CC_CONTACT_BASE_FINAL[['CONTACTID', 'NAME']],
how='left', left_on='CLAIMANTDENORMID', right_on='CONTACTID')
CC_CLAIM_EXPOSURE_POLICY_CON_3 = pd.merge(CC_CLAIM_EXPOSURE_POLICY_CON_2, CC_CONTACT_BASE_FINAL[['CONTACTID', 'NAME']],
how='left', left_on='INSUREDDENORMID', right_on='CONTACTID')
CC_CLAIM_EXPOSURE_POLICY_CON_3.columns = ['CLAIMID', 'CLAIMNUMBER', 'REPORTEDDATE', 'POLICYID',
'CLAIMANTDENORMID', 'INSUREDDENORMID', 'EXPOSUREID',
'CLAIMANTDENORMID_EXP', 'LOSSPARTY', 'POLICYNUMBER', 'EFFECTIVEDATE',
'ORIGEFFECTIVEDATE', 'EXPIRATIONDATE', 'POLICYTYPE', 'CONTACTID_x',
'NAME_EXP', 'CONTACTID_y', 'CLAIMANTNAME', 'CONTACTID', 'INSUREDNAME']
CC_CLAIM_EXPOSURE_POLICY_CONTACT = CC_CLAIM_EXPOSURE_POLICY_CON_3[['EXPOSUREID', 'CLAIMID', 'LOSSPARTY',
'CLAIMNUMBER', 'REPORTEDDATE', 'POLICYID',
'CLAIMANTDENORMID_EXP', 'CONTACTID_x', 'NAME_EXP',
'CLAIMANTDENORMID', 'CONTACTID_y', 'CLAIMANTNAME',
'INSUREDDENORMID',
'CONTACTID', 'INSUREDNAME', 'POLICYNUMBER',
'EFFECTIVEDATE', 'ORIGEFFECTIVEDATE',
'EXPIRATIONDATE', 'POLICYTYPE']]
CC_CLAIM_EXPOSURE_POLICY_CONTACT['LOSSPARTY'] = CC_CLAIM_EXPOSURE_POLICY_CONTACT['LOSSPARTY'].fillna(0)
# def func(row):
# if pd.isnull(row['NAME_EXP']) == True and pd.isnull(row['CLAIMANTNAME']) == False:
# return row['CLAIMANTNAME']
# elif pd.isnull(row['NAME_EXP']) == True and pd.isnull(row['CLAIMANTNAME']) == True and int(
# row['LOSSPARTY']) == 10001:
# return row['INSUREDNAME']
# else:
# return row['NAME_EXP']
# def func1(row):
# if pd.isnull(row['NAME_EXP']) == True and pd.isnull(row['CLAIMANTNAME']) == False and int(
# row['LOSSPARTY']) == 10002:
# return row['CLAIMANTNAME']
# elif pd.isnull(row['NAME_EXP']) == True and pd.isnull(row['CLAIMANTNAME']) == True and pd.isnull(
# row['INSUREDNAME']) == False and int(row['LOSSPARTY']) == 10001:
# return row['INSUREDNAME']
# else:
# return row['NAME_EXP']
# def func2(row):
# if pd.isnull(row['INSUREDNAME']) == True and pd.isnull(row['NAME_EXP']) == True and pd.isnull(
# row['CLAIMANTNAME']) == False and int(row['LOSSPARTY']) == 10001:
# return row['CLAIMANTNAME']
# elif pd.isnull(row['INSUREDNAME']) == True and pd.isnull(row['NAME_EXP']) == False and int(
# row['LOSSPARTY']) == 10001:
# return row['NAME_EXP']
# else:
# return row['INSUREDNAME']
# def func3(row):
# if pd.isnull(row['NAME_EXP']) == False and int(row['LOSSPARTY']) == 10002:
# return row['NAME_EXP']
#
# else:
# return row['CLAIMANTNAME']
CC_CLAIM_EXPOSURE_POLICY_CONTACT['FINALNAME'] = CC_CLAIM_EXPOSURE_POLICY_CONTACT.apply(
lambda x: x.CLAIMANTNAME if (x.NAME_EXP.isnull() and not x.CLAIMANTNAME.isnull()) else x.INSUREDNAME if (
x.NAME_EXP.isnull() and x.CLAIMANTNAME.isnull() and int(x.LOSSPARTY) == 10001) else x.NAME_EXP, axis=1)
CC_CLAIM_EXPOSURE_POLICY_CONTACT['FINALNAME_1'] = CC_CLAIM_EXPOSURE_POLICY_CONTACT.apply(lambda x: x.CLAIMANTNAME if (
x.NAME_EXP.isnull() and not x.CLAIMANTNAME.isnull() and int(x.LOSSPARTY) == 10002) else x.INSUREDNAME if (
x.NAME_EXP.isnull() and x.CLAIMANTNAME.isnull() and not x.INSUREDNAME.isnull() and int(
x.LOSSPARTY) == 10001) else x.NAME_EXP, axis=1)
CC_CLAIM_EXPOSURE_POLICY_CONTACT['INSUREDNAME_F'] = CC_CLAIM_EXPOSURE_POLICY_CONTACT.apply(lambda x: x.CLAIMANTNAME if (
x.NAME_EXP.isnull() and not x.CLAIMANTNAME.isnull() and int(
x.LOSSPARTY) == 10001 and x.INSUREDNAME.isnull()) else x.NAME_EXP if (not
x.NAME_EXP.isnull() and x.INSUREDNAME.isnull() and int(
x.LOSSPARTY) == 10001) else x.INSUREDNAME, axis=1)
CC_CLAIM_EXPOSURE_POLICY_CONTACT['CLAIMANTNAME_F'] = CC_CLAIM_EXPOSURE_POLICY_CONTACT.apply(
lambda x: x.NAME_EXP if int(x.LOSSPARTY) == 10002 and not x.NAME_EXP.isnull() else x.CLAIMANTNAME, axis=1)
CC_CLAIM_EXPOSURE_POLICY_CONTACT.columns = ['EXPOSUREID_N', 'CLAIMID_N', 'LOSSPARTY_N', 'CLAIMNUMBER_N',
'REPORTEDDATE_N',
'POLICYID_N', 'CLAIMANTDENORMID_EXP_N', 'CONTACTID_x_N', 'NAME_EXP_N',
'CLAIMANTDENORMID_N', 'CONTACTID_y_N', 'CLAIMANTNAME_N',
'INSUREDDENORMID_N',
'CONTACTID_N', 'INSUREDNAME_N', 'POLICYNUMBER_N', 'EFFECTIVEDATE_N',
'ORIGEFFECTIVEDATE_N', 'EXPIRATIONDATE_N', 'POLICYTYPE_N', 'FINALNAME_N',
'FINALNAME_1_N', 'INSUREDNAME_F_N', 'CLAIMANTNAME_F_N']
CLAIM_EXP_INC_FINAL_CON = pd.merge(CLAIM_EXP_INC_FINAL, CC_CLAIM_EXPOSURE_POLICY_CONTACT, how='left',
left_on=['CLAIMID_x', 'EXPOSUREID'], right_on=['CLAIMID_N', 'EXPOSUREID_N'])
NAME_CLAIM = CC_CLAIM_EXPOSURE_POLICY_CONTACT[CC_CLAIM_EXPOSURE_POLICY_CONTACT['FINALNAME_1_N'].isnull() == False][
['CLAIMID_N', 'FINALNAME_1_N']].drop_duplicates()
PREV_CLAIMS_CLM1 = pd.merge(CLAIM_EXP_INC_FINAL_CON[['CLAIMID_x', 'CLAIMANTNAME_F_N']].drop_duplicates(), NAME_CLAIM,
how='left', left_on='CLAIMANTNAME_F_N', right_on='FINALNAME_1_N')
PREV_CLAIMS_CLM3 = PREV_CLAIMS_CLM1.groupby(['CLAIMID_x', 'CLAIMANTNAME_F_N'], as_index=False).CLAIMID_N.count()
PREV_CLAIMS_1 = pd.merge(CLAIM_EXP_INC_FINAL_CON[['CLAIMID_x', 'FINALNAME_1_N']].drop_duplicates(), NAME_CLAIM,
how='left', on='FINALNAME_1_N')
PREV_CLAIMS_3 = PREV_CLAIMS_1.groupby(['CLAIMID_x', 'FINALNAME_1_N'], as_index=False).CLAIMID_N.count()
CON_CLM_1 = CLAIM_EXP_INC_FINAL_CON[['CLAIMID_x', 'CLAIMANTNAME_F_N', 'CLAIMANTDENORMID_EXP_N']]
CON_CLM_2 = pd.merge(CON_CLM_1, PREV_CLAIMS_CLM3, how='left', on=['CLAIMID_x', 'CLAIMANTNAME_F_N'])
CON_CLM_2.columns = ['CLAIMID', 'CLAIMANTNAME_F_N', 'CLAIMANTDENORMID_EXP_N', 'CLAIM_CNT_NAME_CLM']
CON_CLM_2.columns = ['CLAIMID', 'CLAIMANTNAME_F_N', 'CLAIMANTDENORMID_EXP_N', 'CLAIM_CNT_NAME_CLM']
CON_EXP_1 = CLAIM_EXP_INC_FINAL_CON[['CLAIMID_x', 'FINALNAME_1_N', 'CLAIMANTDENORMID_EXP_N']]
CON_EXP_2 = pd.merge(CON_EXP_1, PREV_CLAIMS_3, how='left', on=['CLAIMID_x', 'FINALNAME_1_N'])
CON_INS_1 = CLAIM_EXP_INC_FINAL_CON[['CLAIMID_x', 'INSUREDNAME_F_N', 'CLAIMANTDENORMID_EXP_N']]
PREV_CLAIMS_INS1 = pd.merge(CLAIM_EXP_INC_FINAL_CON[['CLAIMID_x', 'INSUREDNAME_F_N']].drop_duplicates(), NAME_CLAIM,
how='left', left_on='INSUREDNAME_F_N', right_on='FINALNAME_1_N')
PREV_CLAIMS_INS3 = PREV_CLAIMS_INS1.groupby(['CLAIMID_x', 'INSUREDNAME_F_N'], as_index=False).CLAIMID_N.count()
CON_EXP_2.columns = ['CLAIMID', 'FINALNAME_1_N', 'CLAIMANTDENORMID_EXP_N', 'CLAIM_CNT_NAME_EXP']
CON_INS_2 = pd.merge(CON_INS_1, PREV_CLAIMS_INS3, how='left', on=['CLAIMID_x', 'INSUREDNAME_F_N'])
CON_INS_2.columns = ['CLAIMID', 'INSUREDNAME_F_N', 'CLAIMANTDENORMID_EXP_N', 'CLAIM_CNT_NAME_INS']
CON_EXP_2['CLAIM_CNT_NAME_EXP'] = CON_EXP_2['CLAIM_CNT_NAME_EXP'].fillna(1)
CON_INS_2['CLAIM_CNT_NAME_INS'] = CON_INS_2['CLAIM_CNT_NAME_INS'].fillna(1)
CON_CLM_2['CLAIM_CNT_NAME_CLM'] = CON_CLM_2['CLAIM_CNT_NAME_CLM'].fillna(1)
CON_EXP_2['RANK1'] = CON_EXP_2.sort_values(['CLAIM_CNT_NAME_EXP', 'FINALNAME_1_N'], ascending=[0, 0]).groupby(
['CLAIMID']).cumcount() + 1
CON_INS_2['RANK1'] = CON_INS_2.sort_values(['CLAIM_CNT_NAME_INS', 'INSUREDNAME_F_N'], ascending=[0, 0]).groupby(
['CLAIMID']).cumcount() + 1
CON_CLM_2['RANK1'] = CON_CLM_2.sort_values(['CLAIM_CNT_NAME_CLM', 'CLAIMANTNAME_F_N'], ascending=[0, 0]).groupby(
['CLAIMID']).cumcount() + 1
CON_EXP_3 = CON_EXP_2[CON_EXP_2['RANK1'] == 1]
CON_INS_3 = CON_INS_2[CON_INS_2['RANK1'] == 1]
CON_CLM_3 = CON_CLM_2[CON_CLM_2['RANK1'] == 1]
CON_CLM_4 = CON_CLM_3[['CLAIMID', 'CLAIM_CNT_NAME_CLM']]
CON_INS_4 = CON_INS_3[['CLAIMID', 'CLAIM_CNT_NAME_INS']]
CON_CLM_INS_4 = pd.merge(CON_CLM_4, CON_INS_4, how='left', on=['CLAIMID'])
AUTO_FINAL_DATA19 = pd.merge(AUTO_FINAL_DATA18, CON_CLM_INS_4, how='left', on=['CLAIMID'])
PREV_RENEWALS = CC_CLAIM_EXPOSURE_POLICY_CONTACT.groupby("POLICYNUMBER_N", as_index=False).agg(
{"EFFECTIVEDATE_N": pd.Series.nunique})
PREV_POL_CLAIMS = CC_CLAIM_EXPOSURE_POLICY_CONTACT.groupby("POLICYNUMBER_N", as_index=False).agg(
{"CLAIMID_N": pd.Series.nunique})
AUTO_FINAL_DATA20 = pd.merge(AUTO_FINAL_DATA19, PREV_RENEWALS, how='left', left_on='POLICYNUMBER',
right_on='POLICYNUMBER_N')
TEN_LIST_EXP = ['CLAIMID_N', 'EFFECTIVEDATE_N', 'ORIGEFFECTIVEDATE_N', 'FINALNAME_1_N']
TEN_LIST_EXP = CC_CLAIM_EXPOSURE_POLICY_CONTACT[not CC_CLAIM_EXPOSURE_POLICY_CONTACT['FINALNAME_1_N'].isnull()][
['CLAIMID_N', 'EFFECTIVEDATE_N', 'ORIGEFFECTIVEDATE_N', 'FINALNAME_1_N']].drop_duplicates()
CC_CLAIM_EXPOSURE_POLICY_CONTACT['INS_DEN'] = CC_CLAIM_EXPOSURE_POLICY_CONTACT.apply(
lambda x: x.CLAIMANTDENORMID_EXP_N if x.INSUREDDENORMID_N.isnull() else x.INSUREDDENORMID_N, axis=1)
CC_CLAIM_EXPOSURE_POLICY_CONTACT['CLM_DEN'] = CC_CLAIM_EXPOSURE_POLICY_CONTACT.apply(
lambda x: x.CLAIMANTDENORMID_EXP_N if x.INSUREDDENORMID_N.isnull() else x.INSUREDDENORMID_N, axis=1)
NAME_CLAIM_NULL_EXP = \
CC_CLAIM_EXPOSURE_POLICY_CONTACT[CC_CLAIM_EXPOSURE_POLICY_CONTACT['FINALNAME_1_N'].isnull()][
['CLAIMID_N', 'CLAIMANTDENORMID_EXP_N']].drop_duplicates()
NAME_CLAIM_NULL_INS = \
CC_CLAIM_EXPOSURE_POLICY_CONTACT[CC_CLAIM_EXPOSURE_POLICY_CONTACT['FINALNAME_1_N'].isnull()][
['CLAIMID_N', 'INS_DEN']].drop_duplicates()
NAME_CLAIM_NULL_CLM = \
CC_CLAIM_EXPOSURE_POLICY_CONTACT[CC_CLAIM_EXPOSURE_POLICY_CONTACT['FINALNAME_1_N'].isnull()][
['CLAIMID_N', 'CLM_DEN']].drop_duplicates()
CC_CLAIM_EXPOSURE_POLICY_CONTACT_T = CC_CLAIM_EXPOSURE_POLICY_CONTACT.drop_duplicates()
TEN_LIST_EXP = \
CC_CLAIM_EXPOSURE_POLICY_CONTACT_T[CC_CLAIM_EXPOSURE_POLICY_CONTACT_T['FINALNAME_1_N'].isnull()][
['CLAIMID_N', 'EFFECTIVEDATE_N', 'ORIGEFFECTIVEDATE_N', 'FINALNAME_1_N']].drop_duplicates()
TEN_LIST_EXP['ORIGEFFECTIVEDATE_N'] = pd.to_datetime(TEN_LIST_EXP['ORIGEFFECTIVEDATE_N'], errors='coerce')
TEN_LIST_EXP['EFFECTIVEDATE_N'] = pd.to_datetime(TEN_LIST_EXP['EFFECTIVEDATE_N'], errors='coerce')
TEN_LIST_EXP['EFFECTIVEDATE_N'] = TEN_LIST_EXP['EFFECTIVEDATE_N'].fillna(datetime.datetime)
TEN_LIST_EXP['ORIG_EFF_YR'] = TEN_LIST_EXP['ORIGEFFECTIVEDATE_N'].map(lambda x: x.year)
TEN_LIST_EXP['EFF_DATE_F'] = TEN_LIST_EXP.apply(
lambda x: x.EFFECTIVEDATE_N if x.ORIG_EFF_YR.isnull() else x.ORIGEFFECTIVEDATE_N, axis=1)
TEN_LIST_EXP_NULL = \
CC_CLAIM_EXPOSURE_POLICY_CONTACT_T[CC_CLAIM_EXPOSURE_POLICY_CONTACT_T['FINALNAME_1_N'].isnull() == True][
['CLAIMID_N', 'EFFECTIVEDATE_N', 'ORIGEFFECTIVEDATE_N', 'INS_DEN']].drop_duplicates()
TEN_LIST_EXP_NULL['ORIGEFFECTIVEDATE_N'] = pd.to_datetime(TEN_LIST_EXP_NULL['ORIGEFFECTIVEDATE_N'], errors='coerce')
TEN_LIST_EXP_NULL['EFFECTIVEDATE_N'] = pd.to_datetime(TEN_LIST_EXP_NULL['EFFECTIVEDATE_N'], errors='coerce')
TEN_LIST_EXP_NULL['ORIG_EFF_YR'] = TEN_LIST_EXP_NULL['ORIGEFFECTIVEDATE_N'].map(lambda x: x.year)
TEN_LIST_EXP_NULL['EFF_DATE_F'] = TEN_LIST_EXP_NULL.apply(
lambda x: x.EFFECTIVEDATE_N if x.ORIG_EFF_YR.isnull() else x.ORIGEFFECTIVEDATE_N, axis=1)
TEN_LIST_EXP_NULL_F = pd.DataFrame(TEN_LIST_EXP_NULL.groupby('INS_DEN', as_index=False)['EFF_DATE_F'].min())
CLAIM_EXP_INC_FINAL_CON['INS_DEN'] = CLAIM_EXP_INC_FINAL_CON.apply(
lambda x: x.CLAIMANTDENORMID_EXP_N if x.INSUREDDENORMID_N.isnull() else x.INSUREDDENORMID_N, axis=1)
CLAIM_EXP_INC_FINAL_CON['CLM_DEN'] = CLAIM_EXP_INC_FINAL_CON.apply(
lambda x: x.CLAIMANTDENORMID_EXP_N if x.INSUREDDENORMID_N.isnull() else x.INSUREDDENORMID_N, axis=1)
TEN_INS_1 = CLAIM_EXP_INC_FINAL_CON[['CLAIMID_x', 'INSUREDNAME_F_N', 'INS_DEN']]
TEN_INS_2 = pd.merge(TEN_INS_1, TEN_LIST_EXP_NULL_F, how='left', on='INS_DEN')
TEN_LIST_EXP_F = pd.DataFrame(TEN_LIST_EXP.groupby('FINALNAME_1_N', as_index=False)['EFF_DATE_F'].min())
TEN_INS_3 = pd.merge(TEN_INS_2, TEN_LIST_EXP_F, how='left', left_on='INSUREDNAME_F_N', right_on='FINALNAME_1_N')
TEN_INS_3['EFF_DATE_FINAL'] = TEN_INS_3['EFF_DATE_F_y']
TEN_INS_3['EFF_DATE_FINAL'] = TEN_INS_3['EFF_DATE_FINAL'].fillna(TEN_INS_3['EFF_DATE_F_x'])
TEN_INS_3['EFF_DATE_FINAL'] = pd.to_datetime(TEN_INS_3['EFF_DATE_FINAL'], errors='coerce')
TEN_INS_4 = TEN_INS_3[['CLAIMID_x', 'EFF_DATE_FINAL']].drop_duplicates()
TEN_INS_4['RANK1'] = TEN_INS_4.sort_values(['EFF_DATE_FINAL'], ascending=[1]).groupby(['CLAIMID_x']).cumcount() + 1
TEN_INS_5 = TEN_INS_4[TEN_INS_4['RANK1'] == 1]
CLM_REP = AUTO_FINAL_DATA19[['CLAIMID_x', 'REPORTEDDATE']].drop_duplicates()
CLM_REP['REPORTEDDATE'] = pd.to_datetime(CLM_REP['REPORTEDDATE'])
TENURE_1 = pd.merge(TEN_INS_5, CLM_REP, how='left', on='CLAIMID_x')
TENURE_1['LENGTH'] = TENURE_1['REPORTEDDATE'].sub(TENURE_1['EFF_DATE_FINAL'], axis=0)
TENURE_1['LENGTH'] = TENURE_1['LENGTH'] / np.timedelta64(1, 'D')
TENURE_1['LENGTH'] = TENURE_1['LENGTH'].fillna(1625)
TENURE_1['LENGTH'] = TENURE_1['LENGTH'].astype(int)
del TENURE_1['RANK1']
TENURE_1.columns = ['CLAIMID_x', 'EFF_DATE_FINAL', 'REPORTEDDATE', 'CLM_TENURE']
AUTO_FINAL_DATA21 = pd.merge(AUTO_FINAL_DATA20, TENURE_1[['CLAIMID_x', 'CLM_TENURE']], how='left', on='CLAIMID_x')
LOSSCAUSE = pd.read_csv('/home/e06315e/Data/FINAL_MASTERDATA/LOSSCAUSE_GROUPING2.csv')
AUTO_FINAL_DATA25 = pd.merge(AUTO_FINAL_DATA21, LOSSCAUSE, how='left', on='LOSSCAUSEID')
AUTO_FINAL_DATA25_1 = pd.merge(AUTO_FINAL_DATA25, AUTO_DATA_EXP_AGG2, how='left', left_on='CLAIMID',
right_on='CLAIMID_x')
coverage_rollup = CLAIM_EXP_INC_FINAL[['CLAIMID_x', 'EXPOSUREID', 'INCIDENTID', 'COV_ROLLUP_3']]
cov_list = ['Collision', 'Comprehensive', 'Auto PD', 'Rental Reimbursement', 'UIM/UM', 'Towing and Labor',
'Inland Marine', 'Building', 'GL PD', 'nan', 'All Other', 'GL BI', 'Contents', 'PIP', 'Auto BI', 'Med Pay',
'Loss of Use', 'nan']
coverage_rollup['COV_ROLLUP_3'] = coverage_rollup['COV_ROLLUP_3'].astype(str)
for elem in cov_list:
coverage_rollup[pd.Series(['COV_ROLLUP_3_', elem]).str.cat()] = (coverage_rollup['COV_ROLLUP_3'] == elem).astype(
int)
coverage_rollup2 = coverage_rollup.groupby(['CLAIMID_x'], as_index=False).agg({'COV_ROLLUP_3_Collision': np.sum,
'COV_ROLLUP_3_Comprehensive': np.sum,
'COV_ROLLUP_3_Auto PD': np.sum,
'COV_ROLLUP_3_Rental Reimbursement': np.sum,
'COV_ROLLUP_3_UIM/UM': np.sum,
'COV_ROLLUP_3_Towing and Labor': np.sum,
'COV_ROLLUP_3_Inland Marine': np.sum,
'COV_ROLLUP_3_Building': np.sum,
'COV_ROLLUP_3_GL PD': np.sum,
'COV_ROLLUP_3_nan': np.sum,
'COV_ROLLUP_3_All Other': np.sum,
'COV_ROLLUP_3_GL BI': np.sum,
'COV_ROLLUP_3_Contents': np.sum,
'COV_ROLLUP_3_PIP': np.sum,
'COV_ROLLUP_3_Auto BI': np.sum,
'COV_ROLLUP_3_Med Pay': np.sum,
'COV_ROLLUP_3_Loss of Use': np.sum,
'COV_ROLLUP_3_nan': np.sum})
AUTO_FINAL_DATA25_2 = pd.merge(AUTO_FINAL_DATA25_1, coverage_rollup2, how='left', left_on='CLAIMID',
right_on='CLAIMID_x')
AUTO_FINAL_DATA25_2['LOSSDATE'] = pd.to_datetime(AUTO_FINAL_DATA25_2['LOSSDATE'])
AUTO_FINAL_DATA25_2['LOSSHOUR'] = AUTO_FINAL_DATA25_2['LOSSDATE'].apply(lambda x: x.hour)
AUTO_FINAL_DATA25_2['LOSS_TIMEOFDAY'] = AUTO_FINAL_DATA25_2['LOSSHOUR'].apply(
lambda x: '0 to 5' if (x >= 0 and x <= 5) else '6 to 11' if (x >= 6 and x <= 11) else '12 to 17' if (
x >= 12 and x <= 17) else '18 to 23')
PREV_POL_CLAIMS2 = CC_CLAIM_EXPOSURE_POLICY_CONTACT.groupby("POLICYNUMBER_N", as_index=False).agg(
{"CLAIMID_N": pd.Series.nunique})
PREV_POL_CLAIMS2.columns = ['POLICYNUMBER', 'PREV_POL_CLAIMS']
AUTO_FINAL_DATA25_3 = pd.merge(AUTO_FINAL_DATA25_2, PREV_POL_CLAIMS2, how='left', on='POLICYNUMBER')
AUTO_FINAL_DATA25_3['DAYS_OPEN'] = (datetime.date.today() - AUTO_FINAL_DATA25_3.REPORTEDDATE).astype('timedelta64[D]')
AUTO_FINAL_DATA25_3['PAYMENT_PERDAY'] = AUTO_FINAL_DATA25_3['PAYMENT'] / AUTO_FINAL_DATA25_3['DAYS_OPEN']
lossparty = CLAIM_EXP_INC_FINAL[['CLAIMID_x', 'EXPOSUREID', 'INCIDENTID', 'LOSSPARTYID']]
lossparty['LOSSPARTYID'] = lossparty['LOSSPARTYID'].astype(str)
lossparty_list = ['10001', '10002', 'nan']
for elem in lossparty_list:
lossparty[pd.Series(['LOSSPARTYID_', elem]).str.cat()] = (lossparty['LOSSPARTYID'] == elem).astype(int)
lossparty2 = lossparty.groupby(['CLAIMID_x'], as_index=False).agg({'LOSSPARTYID_10001': np.sum,
'LOSSPARTYID_10002': np.sum,
'LOSSPARTYID_nan': np.sum})
AUTO_FINAL_DATA25_4 = pd.merge(AUTO_FINAL_DATA25_3, lossparty2, how='left', left_on='CLAIMID', right_on='CLAIMID_x')
injured_cnt = CLAIM_EXP_INC_FINAL[
['CLAIMID_x', 'LOSSPARTYID', 'CLAIMANTDENORMID', 'INSUREDDENORMID', 'CLAIMANTDENORMID_DM', 'BI_x']]
injured_cnt['FP_INJURED_CNT'] = injured_cnt.apply(
lambda x: x.CLAIMANTDENORMID_DM if x.LOSSPARTYID == 10001.0 and x.BI_x == 1 else 0, axis=1)
injured_cnt['TP_INJURED_CNT'] = injured_cnt.apply(
lambda x: x.CLAIMANTDENORMID_DM if x.LOSSPARTYID == 10002.0 and x.BI_x == 1 else 0, axis=1)
injured_cnt = injured_cnt.groupby(['CLAIMID_x'], as_index=False).agg(
{'FP_INJURED_CNT': pd.Series.nunique, 'TP_INJURED_CNT': pd.Series.nunique})
AUTO_FINAL_DATA25_5 = pd.merge(AUTO_FINAL_DATA25_4, injured_cnt, how='left', left_on='CLAIMID', right_on='CLAIMID_x')
UNST_DATA_PROD['FIRE_UNST_F2'] = UNST_DATA_PROD['FIRE_UNST_F'].apply(lambda X: 1 if X >= 1 else 0)
UNST_DATA_PROD['THEFT_UNST_F2'] = UNST_DATA_PROD['THEFT_UNST_F'].apply(lambda X: 1 if X >= 1 else 0)
UNST_DATA_PROD['TOTALLOSS_UNST_F2'] = UNST_DATA_PROD['TOTALLOSS_UNST_F'].apply(lambda X: 1 if X >= 1 else 0)
UNST_DATA_PROD['ATTORNEY_UNST_F2'] = UNST_DATA_PROD['ATTORNEY_UNST_F'].apply(lambda X: 1 if X >= 1 else 0)
UNST_DATA_PROD['CHIRO_UNST_F2'] = UNST_DATA_PROD['CHIRO_UNST_F'].apply(lambda X: 1 if X >= 1 else 0)
UNST_DATA_PROD_FLAG = UNST_DATA_PROD[
['CLAIMID', 'FIRE_UNST_F2', 'THEFT_UNST_F2', 'TOTALLOSS_UNST_F2', 'ATTORNEY_UNST_F2', 'CHIRO_UNST_F2']]
AUTO_FINAL_DATA25_6 = pd.merge(AUTO_FINAL_DATA25_5, UNST_DATA_PROD_FLAG, how='left', left_on='CLAIMID_x',
right_on='CLAIMID')
AUTO_FINAL_DATA25_6.to_csv('AUTO_FINAL_DATA25_6')
|
{"hexsha": "278293950a2c2b5650a02cda4984924294f26e22", "size": 76674, "ext": "py", "lang": "Python", "max_stars_repo_path": "reference/auto_refresh_final-test-20170818T100421Z-001/auto_refresh_final-test/original_code.py", "max_stars_repo_name": "aniruddhasanyal/auto_wrk", "max_stars_repo_head_hexsha": "970156f7cb5205a3595bd757981e267c9879d08e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reference/auto_refresh_final-test-20170818T100421Z-001/auto_refresh_final-test/original_code.py", "max_issues_repo_name": "aniruddhasanyal/auto_wrk", "max_issues_repo_head_hexsha": "970156f7cb5205a3595bd757981e267c9879d08e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reference/auto_refresh_final-test-20170818T100421Z-001/auto_refresh_final-test/original_code.py", "max_forks_repo_name": "aniruddhasanyal/auto_wrk", "max_forks_repo_head_hexsha": "970156f7cb5205a3595bd757981e267c9879d08e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.4946695096, "max_line_length": 177, "alphanum_fraction": 0.7356600673, "include": true, "reason": "import numpy", "num_tokens": 21858}
|
from multiprocessing import Process
import netCDF4 as nc
import numpy as np
import pandas as pd
import sys
import json
import time
import matplotlib.pyplot as plt
from urllib.request import urlopen
from math import sin, cos, sqrt, atan2, radians
# CLASS FOR ORGANIZING THE TOP CITIES
class city:
def __init__(self, lat, lon, name):
self.lat = lat
self.lon = lon
self.name = name
# CALCULATING THE DISTANCE GIVEN 2 LAT LONG POINTS
def calcDistance(lat1, lon1, lat2, lon2):
R = 6373.0
latitude1 = radians(lat1)
longitude1 = radians(lon1)
latitude2 = radians(lat2)
longitude2 = radians(lon2)
dlon = longitude2 - longitude1
dlat = latitude2 - latitude1
a = sin(dlat / 2)**2 + cos(latitude1) * cos(latitude2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return (R * c)
# ATTACH A CITY TO THE ROW IF POSSIBLE
def processRow(lat, lon, cities):
distance = sys.maxsize
city = ''
for c in cities:
temp = calcDistance(lat, lon, c.lat, c.lon)
# find the closes city
if (temp < distance):
city = c.name
distance = temp
# IF THE POINT IS WITHIN 40KM OF CITY
if (distance <= 40):
return city
else:
return ''
# FIND ALL ROWS CLOSE TO A MAJOR CITY IN THE STATE
def processState(s, states_dict):
curr_state = ''
s['city'] = ''
# FOR EACH ROW FIND A CITY IF POSSIBLE
for index,row in s.iterrows():
curr_state = row['StatProv']
lat = row['LATITUDE']
lon = row['LONGITUDE']
cities = states_dict[row['StatProv']]
city = processRow(lat, lon, cities)
s.set_value(index, 'city', city)
# DELETING ROWS WITH NO CITY
for i,r in s.iterrows():
if(r['city'] == ''):
s.drop(i, inplace=True)
if (curr_state != ''):
file_name = curr_state + '.csv'
# create the csv for state with the city added
s.to_csv(file_name, sep=',')
# MAIN METHOD
if __name__ == '__main__':
states_dict = {}
statesConv = {
'AL':'"AL"',
'AK':'"AK"',
'AZ':'"AZ"',
'AR':'"AR"',
'CA':'"CA"',
'CO':'"CO"',
'CT':'"CT"',
'DE':'"DE"',
'FL':'"FL"',
'GA':'"GA"',
'HI':'"HI"',
'ID':'"ID"',
'IL':'"IL"',
'IN':'"IN"',
'IA':'"IA"',
'KS':'"KS"',
'KY':'"KY"',
'LA':'"LA"',
'ME':'"ME"',
'MD':'"MD"',
'MA':'"MA"',
'MI':'"MI"',
'MN':'"MN"',
'MS':'"MS"',
'MO':'"MO"',
'MT':'"MT"',
'NE':'"NE"',
'NV':'"NV"',
'NH':'"NH"',
'NJ':'"NJ"',
'NM':'"NM"',
'NY':'"NY"',
'NC':'"NC"',
'ND':'"ND"',
'OH':'"OH"',
'OK':'"OK"',
'OR':'"OR"',
'PA':'"PA"',
'RI':'"RI"',
'SC':'"SC"',
'SD':'"SD"',
'TN':'"TN"',
'TX':'"TX"',
'UT':'"UT"',
'VT':'"VT"',
'VA':'"VA"',
'WA':'"WA"',
'WV':'"WV"',
'WI':'"WI"',
'WY':'"WY"',
'DC':'"DC"'
}
major_cities = pd.read_csv('topCities.csv')
# Change name of file
# birdfeeder_df = pd.read_csv('PFW2011-12_Subset.csv')
done = []
state_dfs = []
# ORGANIZING BIRDFEEDER DATA BY STATE
for index,row in major_cities.iterrows():
curr_state = row['state_abbrv']
if curr_state not in done:
done.append(curr_state)
state = statesConv[curr_state]
string = 'StatProv==' + state
state_subset = birdfeeder_df.query(string).copy()
state_dfs.append(state_subset)
# UNCOMMENT THE CODE BELOW IF YOU WANT STATE SPECIFIC CSV's of the PFW data
# if not state_subset.empty:
# csv_title = 'PFW2011-12_' + curr_state + '.csv'
# state_subset.to_csv(csv_title, sep=',')
# CREATING A HASHMAP (STATE : CITES LIST)
for index,row in major_cities.iterrows():
x = city(row['lat'], row['long'], row['City'])
if (row['state_abbrv'] in states_dict):
states_dict[row['state_abbrv']].append(x)
else :
states_dict[row['state_abbrv']] = []
states_dict[row['state_abbrv']].append(x)
# MAIN LOOP FOR GOING THROUGH ALL OF THE STATES
# Parallel processes
i = 0
while i < len(state_dfs):
s1 = state_dfs[i]
i += 1
p1 = Process(target=processState, args=(s1,states_dict))
p1.start()
|
{"hexsha": "e12d043abb87bcd3ef2be1903ee9f817dc5aa1a6", "size": 4253, "ext": "py", "lang": "Python", "max_stars_repo_path": "appendix/adding_cities.py", "max_stars_repo_name": "siddkahal/MastersThesis_Latex_Doc", "max_stars_repo_head_hexsha": "6eff0b7138abd77ab46c7f6149a6bdf1054f7c40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "appendix/adding_cities.py", "max_issues_repo_name": "siddkahal/MastersThesis_Latex_Doc", "max_issues_repo_head_hexsha": "6eff0b7138abd77ab46c7f6149a6bdf1054f7c40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "appendix/adding_cities.py", "max_forks_repo_name": "siddkahal/MastersThesis_Latex_Doc", "max_forks_repo_head_hexsha": "6eff0b7138abd77ab46c7f6149a6bdf1054f7c40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.4200913242, "max_line_length": 84, "alphanum_fraction": 0.5412649894, "include": true, "reason": "import numpy", "num_tokens": 1324}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi_label_head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.head import head_utils as test_lib
from tensorflow_estimator.python.estimator.head import multi_label_head as head_lib
def _sigmoid_cross_entropy(labels, logits):
"""Returns sigmoid cross entropy averaged over classes."""
sigmoid_logits = 1 / (1 + np.exp(-logits))
unreduced_result = (
-labels * np.log(sigmoid_logits)
-(1 - labels) * np.log(1 - sigmoid_logits))
# Mean over classes
return np.mean(unreduced_result, axis=-1, keepdims=True)
class MultiLabelHead(test.TestCase):
def test_n_classes_is_none(self):
with self.assertRaisesRegexp(
ValueError,
r'n_classes must be > 1 for multi-label classification\. Given: None'):
head_lib.MultiLabelHead(n_classes=None)
def test_n_classes_is_1(self):
with self.assertRaisesRegexp(
ValueError,
r'n_classes must be > 1 for multi-label classification\. Given: 1'):
head_lib.MultiLabelHead(n_classes=1)
def test_threshold_too_small(self):
with self.assertRaisesRegexp(
ValueError,
r'thresholds must be in \(0, 1\) range\. Given: 0\.0'):
head_lib.MultiLabelHead(n_classes=2, thresholds=[0., 0.5])
def test_threshold_too_large(self):
with self.assertRaisesRegexp(
ValueError,
r'thresholds must be in \(0, 1\) range\. Given: 1\.0'):
head_lib.MultiLabelHead(n_classes=2, thresholds=[0.5, 1.0])
def test_label_vocabulary_dict(self):
with self.assertRaisesRegexp(
ValueError,
r'label_vocabulary must be a list or tuple\. '
r'Given type: <(type|class) \'dict\'>'):
head_lib.MultiLabelHead(n_classes=2, label_vocabulary={'foo': 'bar'})
def test_label_vocabulary_wrong_size(self):
with self.assertRaisesRegexp(
ValueError,
r'Length of label_vocabulary must be n_classes \(3\). Given: 2'):
head_lib.MultiLabelHead(n_classes=3, label_vocabulary=['foo', 'bar'])
def test_invalid_loss_reduction(self):
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):
head_lib.MultiLabelHead(
n_classes=3, loss_reduction='invalid_loss_reduction')
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: none'):
head_lib.MultiLabelHead(
n_classes=3, loss_reduction=losses.Reduction.NONE)
def test_loss_fn_arg_labels_missing(self):
def _loss_fn(logits):
del logits # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: labels\. '
r'Given arguments: \(\'logits\',\)'):
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_logits_missing(self):
def _loss_fn(labels):
del labels # unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: logits\. '
r'Given arguments: \(\'labels\',\)'):
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_features_ok(self):
def _loss_fn(labels, logits, features):
del labels, logits, features # Unused
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_loss_fn_arg_invalid(self):
def _loss_fn(labels, logits, name=None):
del labels, logits, name # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn has unexpected args: \[\'name\'\]'):
head_lib.MultiLabelHead(n_classes=3, loss_fn=_loss_fn)
def test_classes_for_class_based_metrics_invalid(self):
with self.assertRaisesRegexp(
ValueError,
r'All classes_for_class_based_metrics must be in range \[0, 2\]\. '
r'Given: -1'):
head_lib.MultiLabelHead(
n_classes=3, classes_for_class_based_metrics=[2, -1])
def test_classes_for_class_based_metrics_string_invalid(self):
with self.assertRaisesRegexp(
ValueError, r'\'z\' is not in list'):
head_lib.MultiLabelHead(
n_classes=3, label_vocabulary=['a', 'b', 'c'],
classes_for_class_based_metrics=['c', 'z'])
def test_predict(self):
n_classes = 4
head = head_lib.MultiLabelHead(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
logits = np.array(
[[0., 1., 2., -1.], [-1., -2., -3., 1.]], dtype=np.float32)
expected_probabilities = nn.sigmoid(logits)
expected_export_classes = [[b'0', b'1', b'2', b'3']] * 2
keys = prediction_keys.PredictionKeys
preds = head.predictions(logits, [keys.LOGITS, keys.PROBABILITIES])
self.assertAllClose(logits, self.evaluate(preds[keys.LOGITS]))
self.assertAllClose(expected_probabilities,
self.evaluate(preds[keys.PROBABILITIES]))
if context.executing_eagerly():
return
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(test_lib._DEFAULT_SERVING_KEY, 'predict', 'classification'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].scores))
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].classes))
def test_weight_should_not_impact_prediction(self):
n_classes = 4
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
self.assertEqual(n_classes, head.logits_dimension)
logits = np.array(
[[0., 1., 2., -1.], [-1., -2., -3., 1.]], dtype=np.float32)
expected_probabilities = nn.sigmoid(logits)
weights_2x1 = [[1.], [2.]]
features = {
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights_2x1
}
keys = prediction_keys.PredictionKeys
preds = head.predictions(logits, [keys.LOGITS, keys.PROBABILITIES])
self.assertAllClose(logits, self.evaluate(preds[keys.LOGITS]))
self.assertAllClose(expected_probabilities,
self.evaluate(preds[keys.PROBABILITIES]))
if context.executing_eagerly():
return
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert predictions and export_outputs.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
def test_eval_create_loss(self):
"""Tests head.loss for eval mode."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = (labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))) / 2
expected_training_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
actual_training_loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=model_fn.ModeKeys.EVAL)
self.assertAllClose(expected_training_loss,
self.evaluate(actual_training_loss))
def test_eval_create_loss_large_logits(self):
"""Tests head.loss for eval mode and large logits."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
expected_training_loss = 0.5 * np.sum(
np.array([[(10. + 10.) / 2.], [(15. + 0.) / 2.]], dtype=np.float32))
actual_training_loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=model_fn.ModeKeys.EVAL)
self.assertAllClose(expected_training_loss,
self.evaluate(actual_training_loss), atol=1e-4)
def test_eval_create_loss_labels_wrong_shape(self):
"""Tests head.loss for eval mode when labels has the wrong shape."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
labels_2x1 = np.array([[1], [1]], dtype=np.int64)
labels_2 = np.array([1, 1], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
if context.executing_eagerly():
with self.assertRaisesRegexp(ValueError, 'Expected labels dimension=2'):
head.loss(logits=logits, labels=labels_2x1, features=features,
mode=model_fn.ModeKeys.EVAL)
with self.assertRaisesRegexp(ValueError, 'Expected labels dimension=2'):
head.loss(logits=logits, labels=labels_2, features=features,
mode=model_fn.ModeKeys.EVAL)
else:
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
actual_training_loss = head.loss(
logits=logits, labels=labels_placeholder, features=features,
mode=model_fn.ModeKeys.EVAL)
with self.cached_session():
test_lib._initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 2\] \[labels_shape: \] \[2 1\]'):
actual_training_loss.eval({
labels_placeholder: labels_2x1
})
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'labels shape must be \[D0, D1, ... DN, 2\]\..*'
r'\[Received shape: \] \[2\]'):
actual_training_loss.eval({
labels_placeholder: labels_2
})
def test_eval_create_loss_loss_fn(self):
"""Tests head.loss for eval mode and custom loss_fn."""
loss = np.array([[1.], [2.]], dtype=np.float32)
logits_input = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels_input = np.array([[1, 0], [1, 1]], dtype=np.int64)
def _loss_fn(labels, logits):
check_labels = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(labels, labels_input)),
data=[labels])
check_logits = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(logits, logits_input)),
data=[logits])
with ops.control_dependencies([check_labels, check_logits]):
return constant_op.constant(loss)
head = head_lib.MultiLabelHead(n_classes=2, loss_fn=_loss_fn)
actual_training_loss = head.loss(
logits=logits_input,
labels=labels_input,
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL)
self.assertAllClose(np.sum(loss) / 2., self.evaluate(actual_training_loss))
def test_eval_create_loss_loss_fn_wrong_shape(self):
"""Tests custom loss_fn that returns Tensor of unexpected shape."""
loss = np.array([1., 2.], dtype=np.float32)
def _loss_fn(labels, logits):
del labels, logits # Unused
return constant_op.constant(loss)
head = head_lib.MultiLabelHead(n_classes=2, loss_fn=_loss_fn)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
if context.executing_eagerly():
with self.assertRaisesRegexp(
ValueError,
'loss_shape'):
head.loss(logits=logits, labels=labels, features=features,
mode=model_fn.ModeKeys.EVAL)
else:
actual_training_loss = head.loss(
logits=logits, labels=labels, features=features,
mode=model_fn.ModeKeys.EVAL)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[loss_fn must return Tensor of shape \[D0, D1, ... DN, 1\]\. \] '
r'\[logits_shape: \] \[2 2\] \[loss_shape: \] \[2\]'):
self.evaluate(actual_training_loss)
def test_eval_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib.MultiLabelHead(n_classes=2)
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.loss(
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=None,
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL)
def _test_eval(
self, head, logits, labels, expected_loss, expected_metrics,
features=None, regularization_losses=None):
tol = 1e-3
if context.executing_eagerly():
loss = head.loss(
logits, labels, features=features or {}, mode=model_fn.ModeKeys.EVAL,
regularization_losses=regularization_losses)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
eval_metrics = head.metrics(regularization_losses=regularization_losses)
updated_metrics = head.update_metrics(
eval_metrics, features or {}, logits, labels,
regularization_losses=regularization_losses)
self.assertItemsEqual(expected_metrics.keys(), updated_metrics.keys())
self.assertAllClose(
expected_metrics,
{k: updated_metrics[k].result() for k in updated_metrics},
rtol=tol,
atol=tol)
return
spec = head.create_estimator_spec(
features=features or {},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels,
regularization_losses=regularization_losses)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
test_lib._assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, _ = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of value ops (in `metrics`).
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol,
atol=tol)
def test_eval(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_sparse_labels(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = sparse_tensor.SparseTensor(
values=[0, 0, 1],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_regularization_losses(self):
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes, loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = sum(
# labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))) / batch_size
expected_unregularized_loss = np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits)) / 2.
expected_regularized_loss = (
expected_unregularized_loss + expected_regularization_loss)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_regularized_loss,
expected_metrics=expected_metrics,
regularization_losses=regularization_losses)
def test_eval_with_label_vocabulary(self):
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes, label_vocabulary=['class0', 'class1'])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = sparse_tensor.SparseTensor(
values=['class0', 'class0', 'class1'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_label_vocabulary_with_multi_hot_input(self):
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes, label_vocabulary=['class0', 'class1'])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
}
self._test_eval(
head=head,
logits=logits,
labels=labels_multi_hot,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_thresholds(self):
n_classes = 2
thresholds = [0.25, 0.5, 0.75]
head = head_lib.MultiLabelHead(n_classes, thresholds=thresholds)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
keys.ACCURACY_AT_THRESHOLD % thresholds[0]: 2. / 4.,
keys.PRECISION_AT_THRESHOLD % thresholds[0]: 2. / 3.,
keys.RECALL_AT_THRESHOLD % thresholds[0]: 2. / 3.,
keys.ACCURACY_AT_THRESHOLD % thresholds[1]: 1. / 4.,
keys.PRECISION_AT_THRESHOLD % thresholds[1]: 1. / 2.,
keys.RECALL_AT_THRESHOLD % thresholds[1]: 1. / 3.,
keys.ACCURACY_AT_THRESHOLD % thresholds[2]: 2. / 4.,
keys.PRECISION_AT_THRESHOLD % thresholds[2]: 1. / 1.,
keys.RECALL_AT_THRESHOLD % thresholds[2]: 1. / 3.,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_classes_for_class_based_metrics(self):
head = head_lib.MultiLabelHead(
n_classes=2, classes_for_class_based_metrics=[0, 1])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
keys.PROBABILITY_MEAN_AT_CLASS % 0:
math_ops.reduce_sum(nn.sigmoid(logits[:, 0])) / 2.,
keys.AUC_AT_CLASS % 0: 0.,
keys.AUC_PR_AT_CLASS % 0: 1.,
keys.PROBABILITY_MEAN_AT_CLASS % 1:
math_ops.reduce_sum(nn.sigmoid(logits[:, 1])) / 2.,
keys.AUC_AT_CLASS % 1: 1.,
keys.AUC_PR_AT_CLASS % 1: 1.,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_classes_for_class_based_metrics_string(self):
head = head_lib.MultiLabelHead(
n_classes=2, label_vocabulary=['a', 'b'],
classes_for_class_based_metrics=['a', 'b'])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = sparse_tensor.SparseTensor(
values=['a', 'a', 'b'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
labels_onehot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels_onehot, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7689,
keys.PROBABILITY_MEAN_AT_NAME % 'a':
math_ops.reduce_sum(nn.sigmoid(logits[:, 0])) / 2.,
keys.AUC_AT_NAME % 'a': 0.,
keys.AUC_PR_AT_NAME % 'a': 1.,
keys.PROBABILITY_MEAN_AT_NAME % 'b':
math_ops.reduce_sum(nn.sigmoid(logits[:, 1])) / 2.,
keys.AUC_AT_NAME % 'b': 1.,
keys.AUC_PR_AT_NAME % 'b': 1.,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
def test_eval_with_weights(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {
'x': np.array([[41], [42]], dtype=np.int32),
'example_weights': np.array([[1.], [2.]], dtype=np.float32),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, weighted sum over examples, divide by batch_size.
# loss = (1 * (10 + 10) / 2 + 2 * (15 + 0) / 2) / 2
expected_loss = 12.5
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over weighted examples (denominator is sum(weights)).
keys.LOSS_MEAN: expected_loss * (2. / 3.),
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.2000,
keys.AUC_PR: 0.7280,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics,
features=features)
def test_train_create_loss_large_logits(self):
"""Tests head.create_loss for train mode and large logits."""
n_classes = 2
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
features = {
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights
}
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
# expected_unreduced_loss = [[(10. + 10.) / 2.], [(15. + 0.) / 2.]]
# expected_weights = [[1.], [2.]]
expected_training_loss = (1. * (10. + 10.) / 2. + 2. * (15. + 0.) / 2.) / 2.
training_loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=model_fn.ModeKeys.TRAIN)
self.assertAllClose(
expected_training_loss, self.evaluate(training_loss), atol=1e-4)
def test_train_create_loss_loss_reduction(self):
"""Tests head.create_loss with loss_reduction."""
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes, weight_column='example_weights',
loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
# expected_unreduced_loss = [[(10. + 10.) / 2.], [(15. + 0.) / 2.]]
# expected_weights = [[1.], [2.]]
expected_training_loss = (1. * (10. + 10.) / 2. + 2. * (15. + 0.) / 2.) / 2.
training_loss = head.loss(
logits=logits,
labels=labels,
features={
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights
},
mode=model_fn.ModeKeys.TRAIN)
self.assertAllClose(
expected_training_loss, self.evaluate(training_loss), atol=1e-4)
def test_train_labels_none(self):
"""Tests that error is raised when labels is None."""
head = head_lib.MultiLabelHead(n_classes=2)
with self.assertRaisesRegexp(
ValueError, r'You must provide a labels Tensor\. Given: None\.'):
head.loss(
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=None,
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN)
def test_train_invalid_indicator_labels(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# The value 2 is outside the allowed range.
labels = np.array([[2, 0], [1, 1]], dtype=np.int64)
if context.executing_eagerly():
with self.assertRaisesRegexp(
ValueError,
r'labels must be an integer indicator Tensor with values in '
r'\[0, 1\]'):
head.loss(
logits=logits,
labels=labels,
features={},
mode=model_fn.ModeKeys.TRAIN)
return
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'labels must be an integer indicator Tensor with values in '
r'\[0, 1\]'):
sess.run(spec.loss)
def test_train_invalid_sparse_labels(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# The value 2 is outside the allowed range.
labels = sparse_tensor.SparseTensor(
values=[2, 0, 1],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
if context.executing_eagerly():
with self.assertRaisesRegexp(
ValueError,
r'labels must be an integer SparseTensor with values in \[0, 2\)'):
head.loss(
logits=logits,
labels=labels,
features={},
mode=model_fn.ModeKeys.TRAIN)
return
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'labels must be an integer SparseTensor with values in \[0, 2\)'):
sess.run(spec.loss)
def _test_train(self, head, logits, labels, expected_loss):
tol = 1e-3
features = {'x': np.array(((42,),), dtype=np.int32)}
if context.executing_eagerly():
loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=model_fn.ModeKeys.TRAIN)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
test_lib._assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
test_lib._assert_simple_summaries(
self, {metric_keys.MetricKeys.LOSS: expected_loss}, summary_str, tol)
def test_train(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
self._test_train(
head=head, logits=logits, labels=labels, expected_loss=expected_loss)
def test_train_sparse_labels(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = sparse_tensor.SparseTensor(
values=[0, 0, 1],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
self._test_train(
head=head, logits=logits, labels=labels, expected_loss=expected_loss)
def test_train_with_label_vocabulary(self):
head = head_lib.MultiLabelHead(
n_classes=2, label_vocabulary=['class0', 'class1'])
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = sparse_tensor.SparseTensor(
values=['class0', 'class0', 'class1'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
self._test_train(
head=head, logits=logits, labels=labels, expected_loss=expected_loss)
def test_train_with_optimizer(self):
head = head_lib.MultiLabelHead(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {'x': np.array(((42,),), dtype=np.int32)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over examples, divide by batch_size.
# loss = ((10 + 10) / 2 + (15 + 0) / 2 ) / 2
expected_loss = 8.75
tol = 1e-3
loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=model_fn.ModeKeys.TRAIN)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)
if context.executing_eagerly():
return
expected_train_result = 'my_train_op'
class _Optimizer(object):
def minimize(self, loss, global_step):
del global_step
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
optimizer=_Optimizer())
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
def test_train_with_regularization_losses(self):
head = head_lib.MultiLabelHead(
n_classes=2, loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
regularization_losses = [1.5, 0.5]
features = {'x': np.array(((42,),), dtype=np.int32)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes and over batch and add regularization loss.
expected_loss = 35. / 4. + 2.
expected_summaries = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_REGULARIZATION: 2.,
}
tol = 1e-3
loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=model_fn.ModeKeys.TRAIN,
regularization_losses=regularization_losses)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)
if context.executing_eagerly():
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn,
regularization_losses=regularization_losses)
# Assert predictions, loss, train_op, and summaries.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
test_lib._assert_simple_summaries(
self, expected_summaries, summary_str, tol)
def test_train_with_weights(self):
n_classes = 2
head = head_lib.MultiLabelHead(n_classes, weight_column='example_weights')
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
features = {
'x': np.array([[41], [42]], dtype=np.int32),
'example_weights': np.array([[1.], [2.]], dtype=np.float32),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, weighted sum over examples, divide by batch_size.
# loss = (1 * (10 + 10) / 2 + 2 * (15 + 0) / 2) / 2
expected_loss = 12.5
tol = 1e-3
loss = head.loss(
logits=logits,
labels=labels,
features=features,
mode=model_fn.ModeKeys.TRAIN)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, self.evaluate(loss), rtol=tol, atol=tol)
if context.executing_eagerly():
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = head.create_estimator_spec(
features=features,
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
test_lib._assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
test_lib._assert_simple_summaries(
self, {metric_keys.MetricKeys.LOSS: expected_loss,}, summary_str, tol)
def test_multi_dim_weighted_train_create_loss(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# unreduced_loss =
# [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# expected_unreduced_loss = [[[20./3.], [10./3.]], [[4.], [8.]]]
# weights are reshaped to [2, 2, 1] to match logits.
# expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]
# loss = (1*20/3 + 1.5*10/3 + 2*4 + 2.5*8) / 4 = 9.9167
expected_training_loss = 9.9167
training_loss = head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN)
atol = 1.e-3
self.assertAllClose(
expected_training_loss, self.evaluate(training_loss), atol=atol)
def test_multi_dim_weighted_train(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# loss = (1*20/3 + 1.5*10/3 + 2*4 + 2.5*8) / 4 = 9.9167
expected_loss = 9.9167
atol = 1.e-3
loss = head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN)
self.assertIsNotNone(loss)
self.assertAllClose(expected_loss, self.evaluate(loss), atol=atol)
if context.executing_eagerly():
return
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
with self.cached_session() as sess:
test_lib._initialize_variables(self, monitored_session.Scaffold())
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, atol=atol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
def test_multi_dim_weights_wrong_inner_dim(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 1]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
if context.executing_eagerly():
with self.assertRaisesRegexp(ValueError, 'weights shape'):
head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN)
return
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
with self.cached_session():
test_lib._initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 1\]'):
spec.loss.eval()
def test_multi_dim_weights_wrong_outer_dim(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2, 3]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[[1., 1., 1.], [1.5, 1.5, 1.5]],
[[2., 2., 2.], [2.5, 2.5, 2.5]]], dtype=np.float32)
if context.executing_eagerly():
with self.assertRaisesRegexp(ValueError, 'weights shape'):
head.loss(
logits=logits,
labels=labels,
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN)
return
weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'weights': weights_placeholder},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
with self.cached_session():
test_lib._initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 2 3\]'):
spec.loss.eval({weights_placeholder: weights})
def test_multi_dim_weighted_eval(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.MultiLabelHead(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# loss = (1*20/3 + 1.5*10/3 + 2*4 + 2.5*8) / 4 = 9.9167
expected_loss = 9.9167
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss * (4. / np.sum(weights)),
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.4977,
keys.AUC_PR: 0.5461,
}
self._test_eval(
head=head,
features={'weights': weights},
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
@test_util.deprecated_graph_mode_only
class MultiLabelHeadForEstimator(test.TestCase):
"""Tests for create_estimator_spec running in Graph mode only."""
def test_predict_with_label_vocabulary(self):
n_classes = 4
head = head_lib.MultiLabelHead(
n_classes, label_vocabulary=['foo', 'bar', 'foobar', 'barfoo'])
logits = np.array(
[[0., 1., 2., -1.], [-1., -2., -3., 1.]], dtype=np.float32)
expected_export_classes = [[b'foo', b'bar', b'foobar', b'barfoo']] * 2
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
self.assertAllEqual(
expected_export_classes,
sess.run(spec.export_outputs[test_lib._DEFAULT_SERVING_KEY].classes))
def test_train_with_update_ops(self):
with ops.Graph().as_default():
w = variables.Variable(1)
update_op = w.assign_add(1)
t = variables.Variable('')
expected_train_result = b'my_train_op'
def _train_op_fn(loss):
del loss
return t.assign(expected_train_result)
head = head_lib.MultiLabelHead(n_classes=2, update_ops=[update_op])
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
labels=np.array([[1, 0], [1, 1]], dtype=np.int64),
train_op_fn=_train_op_fn)
with self.cached_session() as sess:
test_lib._initialize_variables(self, spec.scaffold)
sess.run(spec.train_op)
w_value, t_value = sess.run([w, t])
self.assertEqual(2, w_value)
self.assertEqual(expected_train_result, t_value)
def test_lookup_tables_in_graph(self):
n_classes = 2
head = head_lib.MultiLabelHead(
n_classes=n_classes, label_vocabulary=['class0', 'class1'])
feature_columns = [feature_column.numeric_column('x')]
# Create dnn estimator.
est = dnn.DNNEstimator(
head=head,
hidden_units=(2, 2),
feature_columns=feature_columns)
def input_fn():
return (
{'x': np.array(((42,), (43,),), dtype=np.int32)},
np.array([[1, 0], [1, 1]], dtype=np.int64))
# Train.
num_steps = 1
est.train(input_fn, steps=num_steps)
# Eval.
eval_results = est.evaluate(input_fn, steps=num_steps)
self.assertEqual(num_steps, eval_results[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(eval_results))
# Predict.
est.predict(input_fn)
if __name__ == '__main__':
test.main()
|
{"hexsha": "8a1d6fdd27a9d8b75beb4bdb6065d672b7922654", "size": 54953, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_estimator/python/estimator/head/multi_label_head_test.py", "max_stars_repo_name": "ziky90/estimator", "max_stars_repo_head_hexsha": "825c02ce244ce21ec4f01360dfdf90cbf92f6bde", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow_estimator/python/estimator/head/multi_label_head_test.py", "max_issues_repo_name": "ziky90/estimator", "max_issues_repo_head_hexsha": "825c02ce244ce21ec4f01360dfdf90cbf92f6bde", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_estimator/python/estimator/head/multi_label_head_test.py", "max_forks_repo_name": "ziky90/estimator", "max_forks_repo_head_hexsha": "825c02ce244ce21ec4f01360dfdf90cbf92f6bde", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1703216374, "max_line_length": 83, "alphanum_fraction": 0.6319218241, "include": true, "reason": "import numpy", "num_tokens": 14545}
|
[STATEMENT]
lemma MSB_eq0_D: "MSB I = 0 \<Longrightarrow> x < length I \<Longrightarrow> I ! x = {||}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>MSB I = 0; x < length I\<rbrakk> \<Longrightarrow> I ! x = {||}
[PROOF STEP]
unfolding MSB_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(if \<forall>P\<in>set I. P = {||} then 0 else Suc (Max (\<Union> (fset ` set I)))) = 0; x < length I\<rbrakk> \<Longrightarrow> I ! x = {||}
[PROOF STEP]
by (auto split: if_splits)
|
{"llama_tokens": 214, "file": "Formula_Derivatives_WS1S_Prelim", "length": 2}
|
# -*- coding: utf-8 -*-
r'''This module contains a number of classes for modeling neural nets in Theano.
Neural networks are really just a concise, computational way of describing a
mathematical model of some data. Before getting into the models below, we'll
first set up the ideas and notation that are used on this page.
At a high level, a feedforward neural network describes a parametric mapping
.. math::
F_\theta: \mathcal{S} \to \mathcal{T}
between a source space :math:`\mathcal{S}` and a target space
:math:`\mathcal{T}`, using parameters :math:`\theta`. For the MNIST digits, for
example we could think of :math:`\mathcal{S} = \mathbb{R}^{28 \times 28} =
\mathbb{R}^{784}` (i.e., the space of all 28×28 images), and for classifying the
MNIST digits we could think of :math:`\mathcal{T} = \mathbb{R}^{10}`.
This mapping is assumed to be fairly complex. If it were not -- if you could
capture the mapping using a simple expression like :math:`F_a(x) = ax^2` -- then
we would just use the expression directly and not need to deal with an entire
network. So if the mapping is complex, we will do a couple of things to make our
problem tractable. First, we will assume some structure for :math:`F_\theta`.
Second, we will fit our model to some set of data that we have obtained, so that
our parameters :math:`\theta` are tuned to the problem at hand.
Graph structure
---------------
.. image:: _static/feedforward_layers.svg
The mapping :math:`F_\theta` is implemented in neural networks by assuming a
specific, layered form. Computation nodes -- also called units or (sometimes)
neurons -- are arranged in a :math:`k+1` partite graph, with layer :math:`k`
containing :math:`n_k` nodes. The number of input nodes in the graph is referred
to below as :math:`n_0`.
A **weight matrix** :math:`W^k \in \mathbb{R}^{n_{k-1} \times n_k}` specifies
the strength of the connection between nodes in layer :math:`k` and those in
layer :math:`k-1` -- all other pairs of nodes are typically not connected. Each
layer of nodes also has a **bias vector** that determines the offset of each
node from the origin. Together, the parameters :math:`\theta` of the model are
these :math:`k` weight matrices and :math:`k` bias vectors (there are no weights
or biases for the input nodes in the graph).
Local computation
-----------------
.. image:: _static/feedforward_neuron.svg
In a standard feedforward network, each node :math:`i` in layer :math:`k`
receives inputs from all nodes in layer :math:`k-1`, then transforms the
weighted sum of these inputs:
.. math::
z_i^k = \sigma\left( b_i^k + \sum_{j=1}^{n_{k-1}} w^k_{ji} z_j^{k-1} \right)
where :math:`\sigma: \mathbb{R} \to \mathbb{R}` is an "activation function."
Although many functions will work, typical choices of the activation function
are:
:linear: :math:`\sigma(z) = z`
:rectified linear: :math:`\sigma(z) = \max(0, z)`
:logistic sigmoid: :math:`\sigma(z) = (1 + e^{-z})^{-1}`.
Most activation functions are chosen to incorporate a nonlinearity, since a
model with even multiple linear layers cannot capture nonlinear phenomena. Nodes
in the input layer are assumed to have linear activation (i.e., the input nodes
simply represent the state of the input data), and nodes in the output layer
might have linear or nonlinear activations depending on the modeling task.
Usually all hidden nodes in a network share the same activation function, but
this is not required.
'''
import climate
import gzip
import numpy as np
import pickle
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
logging = climate.get_logger(__name__)
from . import layers
FLOAT = theano.config.floatX
def load(filename, **kwargs):
'''Load an entire network from a pickle file on disk.
If this function is called without extra keyword arguments, a new network
will be created using the keyword arguments that were originally used to
create the pickled network. If this helper function is called with extra
keyword arguments, they will override arguments that were originally used to
create the pickled network. This override allows one to, for example, load a
network that was created with one activation function, and apply a different
activation function to the existing weights. Some options will cause errors
if overridden, such as `layers` or `tied_weights`, since they change the
number of parameters in the model.
Parameters
----------
filename : str
Load the keyword arguments and parameters of a network from a pickle
file at the named path. If this name ends in ".gz" then the input will
automatically be gunzipped; otherwise the input will be treated as a
"raw" pickle.
Returns
-------
network : :class:`Network`
A newly-constructed network, with topology and parameters loaded from
the given pickle file.
'''
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'rb')
pkl = pickle.load(handle)
handle.close()
kw = pkl['kwargs']
kw.update(kwargs)
net = pkl['klass'](**kw)
net.load_params(filename)
return net
class Network(object):
'''The network class encapsulates a fully-connected feedforward net.
In addition to defining standard functionality for feedforward nets, there
are also many options for specifying topology and regularization, several of
which must be provided to the constructor at initialization time.
Parameters
----------
layers : sequence of int, tuple, dict, or :class:`Layer <layers.Layer>`
A sequence of values specifying the layer configuration for the network.
For more information, please see :ref:`creating-specifying-layers`.
hidden_activation : str, optional
The name of an activation function to use on hidden network layers by
default. Defaults to 'logistic'.
output_activation : str, optional
The name of an activation function to use on the output layer by
default. Defaults to 'linear'.
rng : theano RandomStreams object, optional
Use a specific Theano random number generator. A new one will be created
if this is None.
input_noise : float, optional
Standard deviation of desired noise to inject into input.
hidden_noise : float, optional
Standard deviation of desired noise to inject into hidden unit
activation output.
input_dropouts : float in [0, 1], optional
Proportion of input units to randomly set to 0.
hidden_dropouts : float in [0, 1], optional
Proportion of hidden unit activations to randomly set to 0.
decode_from : positive int, optional
Any of the hidden layers can be tapped at the output. Just specify a
value greater than 1 to tap the last N hidden layers. The default is 1,
which decodes from just the last layer.
Attributes
----------
layers : list of :class:`theanets.Layer`
A list of the layers in this network model.
kwargs : dict
A dictionary containing the keyword arguments used to construct the
network.
'''
def __init__(self, **kwargs):
self.layers = []
self.kwargs = kwargs
self.inputs = list(self.setup_vars())
self.setup_layers()
def setup_vars(self):
'''Setup Theano variables required by our network.
The default variable for a network is simply `x`, which represents the
input to the network.
Subclasses may override this method to specify additional variables. For
example, a supervised model might specify an additional variable that
represents the target output for a particular input.
Returns
-------
vars : list of theano variables
A list of the variables that this network requires as inputs.
'''
# x is a proxy for our network's input, and y for its output.
self.x = TT.matrix('x')
return [self.x]
def setup_layers(self):
'''Set up a computation graph for our network.
The default implementation constructs a series of feedforward
layers---called the "encoder" layers---and then calls
:func:`setup_decoder` to construct the decoding apparatus in the
network.
Subclasses may override this method to construct alternative network
topologies.
'''
if 'layers' not in self.kwargs:
return
specs = list(self.encoding_layers)
rng = self.kwargs.get('rng') or RandomStreams()
# setup input layer.
self.layers.append(layers.build('input', specs.pop(0),
rng=rng,
name='in',
dropout=self.kwargs.get('input_dropouts', 0),
noise=self.kwargs.get('input_noise', 0)))
# setup "encoder" layers.
for i, spec in enumerate(specs):
# if spec is a Layer instance, just add it and move on.
if isinstance(spec, layers.Layer):
self.layers.append(spec)
continue
# here we set up some defaults for constructing a new layer.
form = 'feedforward'
kwargs = dict(
nin=self.layers[-1].nout,
rng=rng,
name='hid{}'.format(len(self.layers)),
noise=self.kwargs.get('hidden_noise', 0),
dropout=self.kwargs.get('hidden_dropouts', 0),
batch_size=self.kwargs.get('batch_size', 64),
activation=self.kwargs.get('hidden_activation', 'logistic'),
)
# by default, spec is assumed to be a lowly integer, giving the
# number of units in the layer.
if isinstance(spec, int):
kwargs['nout'] = spec
# if spec is a tuple, assume that it contains one or more of the following:
# - the type of layer to construct (layers.Layer subclass)
# - the name of a class for the layer (str; if layes.Layer subclass)
# - the name of an activation function (str; otherwise)
# - the number of units in the layer (int)
if isinstance(spec, (tuple, list)):
for el in spec:
try:
if issubclass(el, layers.Layer):
form = el.__name__
except TypeError:
pass
if isinstance(el, str):
if el.lower() in layers.Layer._registry:
form = el
else:
kwargs['activation'] = el
if isinstance(el, int):
kwargs['nout'] = el
kwargs['name'] = '{}{}'.format(form, len(self.layers))
# if spec is a dictionary, try to extract a form and size for the
# layer, and override our default keyword arguments with the rest.
if isinstance(spec, dict):
if 'form' in spec:
form = spec['form'].lower()
kwargs['name'] = '{}{}'.format(form, len(self.layers))
if 'size' in spec:
kwargs['nout'] = spec['size']
kwargs.update(spec)
if isinstance(form, str) and form.lower() == 'bidirectional':
kwargs['name'] = 'bd{}{}'.format(
kwargs.get('worker', 'rnn'), len(self.layers))
self.layers.append(layers.build(form, **kwargs))
# setup output layer.
self.setup_decoder()
def setup_decoder(self):
'''Set up the "decoding" computations from layer activations to output.
The default decoder constructs a single weight matrix for each of the
hidden layers in the network that should be used for decoding (see the
`decode_from` parameter) and outputs the sum of the decoders.
This method can be overridden by subclasses to implement alternative
decoding strategies.
Parameters
----------
decode_from : int, optional
Compute the activation of the output vector using the activations of
the last N hidden layers in the network. Defaults to 1, which
results in a traditional setup that decodes only from the
penultimate layer in the network.
'''
sizes = [l.nout for l in self.layers]
back = self.kwargs.get('decode_from', 1)
self.layers.append(layers.build(
'feedforward',
name='out',
nin=sizes[-1] if back <= 1 else sizes[-back:],
nout=self.kwargs['layers'][-1],
activation=self.output_activation))
@property
def output_activation(self):
return self.kwargs.get('output_activation', 'linear')
@property
def encoding_layers(self):
'''Determine the layers that will be part of the network encoder.
This method is used by the default implementation of
:func:`setup_layers` to determine which layers in the network will be
treated as "encoding" layers. The default is to treat all but the last
layer as encoders.
Returns
-------
layers : list of int
A list of integers specifying sizes of the encoder network layers.
'''
return self.kwargs['layers'][:-1]
def _connect(self):
'''Connect the layers in this network to form a computation graph.
Returns
-------
outputs : list of theano variables
A list of expressions giving the output of each layer in the graph.
monitors : list of (name, expression) tuples
A list of expressions to use when monitoring the network.
updates : list of update tuples
A list of updates that should be performed by a theano function that
computes something using this graph.
'''
outputs = []
monitors = []
updates = []
for i, layer in enumerate(self.layers):
if i == 0:
# input to first layer is data.
inputs = self.x
elif i == len(self.layers) - 1:
# inputs to last layer is output of layers to decode.
inputs = outputs[-self.kwargs.get('decode_from', 1):]
else:
# inputs to other layers are outputs of previous layer.
inputs = outputs[-1]
out, mon, upd = layer.output(inputs)
outputs.append(out)
monitors.extend(mon)
updates.extend(upd)
return outputs, monitors, updates
@property
def outputs(self):
return self._connect()[0]
@property
def _monitors(self):
return self._connect()[1]
@property
def updates(self):
return self._connect()[2]
@property
def monitors(self):
'''A sequence of name-value pairs for monitoring the network.
Names in this sequence are strings, and values are theano variables
describing how to compute the relevant quantity.
These monitor expressions are used by network trainers to compute
quantities of interest during training. The default set of monitors
consists of:
- err: the unregularized error of the network
- X<0.1: percent of units in layer X such that :math:`|a_i| < 0.1`
- X<0.9: percent of units in layer X such that :math:`|a_i| < 0.9`
'''
yield 'err', self.error
for name, value in self._monitors:
yield name, value
@property
def params(self):
'''Get a list of the learnable theano parameters for this network.
This attribute is mostly used by :class:`theanets.trainer.Trainer`
implementations to compute the set of parameters that are tunable in a
network.
Returns
-------
params : list of theano variables
A list of parameters that can be learned in this model.
'''
return [p for l in self.layers for p in l.params]
def find(self, layer, param):
'''Get a parameter from a layer in the network.
Parameters
----------
layer : int or str
The layer that owns the parameter to return.
If this is an integer, then 0 refers to the input layer, 1 refers
to the first hidden layer, 2 to the second, and so on.
If this is a string, the layer with the corresponding name, if any,
will be used.
param : int or str
Name of the parameter to retrieve from the specified layer, or its
index in the parameter list of the layer.
Raises
------
KeyError
If there is no such layer, or if there is no such parameter in the
specified layer.
Returns
-------
param : theano shared variable
A shared parameter variable from the indicated layer.
'''
for i, l in enumerate(self.layers):
if layer == i or layer == l.name:
return l.find(param)
raise KeyError(layer)
def feed_forward(self, x):
'''Compute a forward pass of all layers from the given input.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
layers : list of ndarray (num-examples, num-units)
The activation values of each layer in the the network when given
input `x`. For each of the hidden layers, an array is returned
containing one row per input example; the columns of each array
correspond to units in the respective layer. The "output" of the
network is the last element of this list.
'''
if not hasattr(self, '_compute'):
outputs, _, updates = self._connect()
self._compute = theano.function([self.x], outputs, updates=updates)
return self._compute(x)
def predict(self, x):
'''Compute a forward pass of the inputs, returning the network output.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
y : ndarray (num-examples, num-variables
Returns the values of the network output units when given input `x`.
Rows in this array correspond to examples, and columns to output
variables.
'''
return self.feed_forward(x)[-1]
__call__ = predict
def save(self, filename):
'''Save the state of this network to a pickle file on disk.
Parameters
----------
filename : str
Save the parameters of this network to a pickle file at the named
path. If this name ends in ".gz" then the output will automatically
be gzipped; otherwise the output will be a "raw" pickle.
'''
state = dict(klass=self.__class__, kwargs=self.kwargs)
for layer in self.layers:
key = '{}-values'.format(layer.name)
state[key] = [p.get_value() for p in layer.params]
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'wb')
pickle.dump(state, handle, -1)
handle.close()
logging.info('%s: saved model parameters', filename)
def load_params(self, filename):
'''Load the parameters for this network from disk.
Parameters
----------
filename : str
Load the parameters of this network from a pickle file at the named
path. If this name ends in ".gz" then the input will automatically
be gunzipped; otherwise the input will be treated as a "raw" pickle.
'''
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'rb')
saved = pickle.load(handle)
handle.close()
for layer in self.layers:
for p, v in zip(layer.params, saved['{}-values'.format(layer.name)]):
p.set_value(v)
logging.info('%s: loaded model parameters', filename)
def loss(self, **kwargs):
'''Return a variable representing the loss for this network.
The loss includes both the error for the network as well as any
regularizers that are in place.
Parameters
----------
weight_l1 : float, optional
Regularize the L1 norm of unit connection weights by this constant.
weight_l2 : float, optional
Regularize the L2 norm of unit connection weights by this constant.
hidden_l1 : float, optional
Regularize the L1 norm of hidden unit activations by this constant.
hidden_l2 : float, optional
Regularize the L2 norm of hidden unit activations by this constant.
contractive : float, optional
Regularize model using the Frobenius norm of the hidden Jacobian.
Returns
-------
loss : theano variable
A variable representing the loss of this network.
'''
hiddens = self.outputs[1:-1]
regularizers = dict(
weight_l1=(abs(w).sum() for l in self.layers for w in l.params),
weight_l2=((w * w).sum() for l in self.layers for w in l.params),
hidden_l1=(abs(h).mean(axis=0).sum() for h in hiddens),
hidden_l2=((h * h).mean(axis=0).sum() for h in hiddens),
contractive=(TT.sqr(TT.grad(h.mean(axis=0).sum(), self.x)).sum()
for h in hiddens),
)
return self.error + sum(TT.cast(kwargs[weight], FLOAT) * sum(expr)
for weight, expr in regularizers.items()
if kwargs.get(weight, 0) > 0)
class Autoencoder(Network):
r'''An autoencoder attempts to reproduce its input.
Some types of neural network models have been shown to learn useful features
from a set of data without requiring any label information. This learning
task is often referred to as feature learning or manifold learning. A class
of neural network architectures known as autoencoders are ideally suited for
this task. An autoencoder takes as input a data sample and attempts to
produce the same data sample as its output. Formally, an autoencoder defines
a mapping from a source space to itself:
.. math::
F_\theta: \mathcal{S} \to \mathcal{S}
Often, this mapping can be decomposed into an "encoding" stage
:math:`f_\alpha(\cdot)` and a corresponding "decoding" stage
:math:`g_\beta(\cdot)` to and from some latent space :math:`\mathcal{Z} =
\mathbb{R}^{n_z}`:
.. math::
\begin{eqnarray*}
f_\alpha &:& \mathcal{S} \to \mathcal{Z} \\
g_\beta &:& \mathcal{Z} \to \mathcal{S}
\end{eqnarray*}
Autoencoders form an interesting class of models for several reasons. They:
- require only "unlabeled" data (which is typically easy to obtain),
- are generalizations of many popular density estimation techniques, and
- can be used to model the "manifold" or density of a dataset.
If we have a labeled dataset containing :math:`m` :math:`d`-dimensional
input samples :math:`X \in \mathbb{R}^{m \times d}`, then the loss that the
autoencoder model optimizes with respect to the model parameters
:math:`\theta` is:
.. math::
\begin{eqnarray*}
\mathcal{L}(X, \theta) &=& \frac{1}{m} \sum_{i=1}^m \| F_\theta(x_i) - x_i \|_2^2 + R(X, \theta) \\
&=& \frac{1}{m} \sum_{i=1}^m \| g_\beta(f_\alpha(x_i)) - x_i \|_2^2 + R(X, \alpha, \beta)
\end{eqnarray*}
where :math:`R` is a regularization function.
A generic autoencoder can be defined in ``theanets`` by using the
:class:`Autoencoder <theanets.feedforward.Autoencoder>` class::
exp = theanets.Experiment(theanets.Autoencoder)
The ``layers`` parameter is required to define such a model; it can be
provided on the command-line by using ``--layers A B C ... A``, or in your
code::
exp = theanets.Experiment(
theanets.Autoencoder,
layers=(A, B, C, ..., A))
Autoencoders retain all attributes of the parent :class:`Network` class,
but additionally can have "tied weights", if the layer configuration is
palindromic.
Attributes
----------
tied_weights : bool, optional
Construct decoding weights using the transpose of the encoding weights
on corresponding layers. Defaults to False, which means decoding weights
will be constructed using a separate weight matrix.
'''
def setup_decoder(self):
'''Set up weights for the decoder layers of an autoencoder.
This implementation allows for decoding weights to be tied to encoding
weights. If `tied_weights` is False, the decoder is set up using
:func:`Network.setup_decoder`; if True, then the decoder is set up to be
a mirror of the encoding layers, using transposed weights.
Parameters
----------
input_noise : float, optional
Standard deviation of desired noise to inject into input.
hidden_noise : float, optional
Standard deviation of desired noise to inject into hidden unit
activation output.
input_dropouts : float in [0, 1], optional
Proportion of input units to randomly set to 0.
hidden_dropouts : float in [0, 1], optional
Proportion of hidden unit activations to randomly set to 0.
tied_weights : bool, optional
If True, use decoding weights that are "tied" to the encoding
weights. This only makes sense for a limited set of "autoencoder"
layer configurations. Defaults to False.
decode_from : int, optional
For networks without tied weights, compute the activation of the
output vector using the activations of the last N hidden layers in
the network. Defaults to 1, which results in a traditional setup
that decodes only from the penultimate layer in the network.
Returns
-------
count : int
A count of the number of tunable decoder parameters.
'''
if not self.tied_weights:
return super(Autoencoder, self).setup_decoder()
kw = {}
kw.update(self.kwargs)
kw.update(noise=self.kwargs.get('hidden_noise', 0),
dropout=self.kwargs.get('hidden_dropouts', 0))
for i in range(len(self.layers) - 1, 1, -1):
self.layers.append(layers.build('tied', self.layers[i], **kw))
kw = {}
kw.update(self.kwargs)
kw.update(activation=self.output_activation)
self.layers.append(layers.build('tied', self.layers[1], **kw))
@property
def encoding_layers(self):
'''Compute the layers that will be part of the network encoder.
This implementation ensures that --layers is compatible with
--tied-weights; if so, and if the weights are tied, then the encoding
layers are the first half of the layers in the network. If not, or if
the weights are not to be tied, then all but the final layer is
considered an encoding layer.
Returns
-------
layers : list of int
A list of integers specifying sizes of the encoder network layers.
'''
if not self.tied_weights:
return super(Autoencoder, self).encoding_layers
error = 'with --tied-weights, --layers must be an odd-length palindrome'
sizes = []
for layer in self.kwargs['layers']:
if isinstance(layer, layers.Layer):
sizes.append(layer.nout)
if isinstance(layer, int):
sizes.append(layer)
if isinstance(layer, dict):
sizes.append(layer.get('size', layer.get('nout', -1)))
assert len(sizes) % 2 == 1, error
k = len(sizes) // 2
encode = np.asarray(sizes[:k])
decode = np.asarray(sizes[k+1:])
assert (encode == decode[::-1]).all(), error
return self.kwargs['layers'][:k+1]
@property
def tied_weights(self):
'''A boolean indicating whether this network uses tied weights.'''
return self.kwargs.get('tied_weights', False)
@property
def error(self):
'''Returns a theano expression for computing the mean squared error.'''
err = self.outputs[-1] - self.x
return TT.mean((err * err).sum(axis=1))
def encode(self, x, layer=None, sample=False):
'''Encode a dataset using the hidden layer activations of our network.
Parameters
----------
x : ndarray
A dataset to encode. Rows of this dataset capture individual data
points, while columns represent the variables in each data point.
layer : int, optional
The index of the hidden layer activation to use. By default, we use
the "middle" hidden layer---for example, for a 4,2,4 or 4,3,2,3,4
autoencoder, we use the "2" layer (index 1 or 2, respectively).
sample : bool, optional
If True, then draw a sample using the hidden activations as
independent Bernoulli probabilities for the encoded data. This
assumes the hidden layer has a logistic sigmoid activation function.
Returns
-------
ndarray :
The given dataset, encoded by the appropriate hidden layer
activation.
'''
enc = self.feed_forward(x)[(layer or len(self.layers) // 2)]
if sample:
return np.random.binomial(n=1, p=enc).astype(np.uint8)
return enc
def decode(self, z, layer=None):
'''Decode an encoded dataset by computing the output layer activation.
Parameters
----------
z : ndarray
A matrix containing encoded data from this autoencoder.
layer : int, optional
The index of the hidden layer that was used to encode `z`.
Returns
-------
ndarray :
The decoded dataset.
'''
if not hasattr(self, '_decoders'):
self._decoders = {}
layer = layer or len(self.layers) // 2
if layer not in self._decoders:
outputs, _, updates = self._connect()
self._decoders[layer] = theano.function(
[outputs[layer]], [outputs[-1]], updates=updates)
return self._decoders[layer](z)[0]
class Regressor(Network):
r'''A regression model attempts to produce a target output.
Regression models are trained by optimizing a (possibly regularized) loss
that centers around some measurement of error with respect to the target
outputs. This regression model implementation uses the mean squared error.
If we have a labeled dataset containing :math:`m` :math:`d`-dimensional
input samples :math:`X \in \mathbb{R}^{m \times d}` and :math:`m`
:math:`e`-dimensional paired target outputs :math:`Y \in \mathbb{R}^{m
\times e}`, then the loss that the Regressor model optimizes with respect to
the model parameters :math:`\theta` is:
.. math::
\mathcal{L}(X, Y, \theta) = \frac{1}{m} \sum_{i=1}^m \| F_\theta(x_i) - y_i \|_2^2 + R(X, \theta)
where :math:`F_\theta` is the feedforward function that computes the network
output, and :math:`R` is a regularization function.
'''
def setup_vars(self):
'''Setup Theano variables for our network.
Returns
-------
vars : list of theano variables
A list of the variables that this network requires as inputs.
'''
super(Regressor, self).setup_vars()
# this variable holds the target outputs for input x.
self.targets = TT.matrix('targets')
return [self.x, self.targets]
@property
def error(self):
'''Returns a theano expression for computing the mean squared error.'''
err = self.outputs[-1] - self.targets
return TT.mean((err * err).sum(axis=1))
class Classifier(Network):
r'''A classifier attempts to match a 1-hot target output.
Classification models in ``theanets`` are trained by optimizing a (possibly
regularized) loss that centers around the categorical cross-entropy. This
error computes the difference between the distribution generated by the
classification model and the empirical distribution of the labeled data.
If we have a labeled dataset containing :math:`m` :math:`d`-dimensional
input samples :math:`X \in \mathbb{R}^{m \times d}` and :math:`m` paired
target outputs :math:`Y \in \mathbb{R}^m`, then the loss that the
``Classifier`` model optimizes with respect to the model parameters
:math:`\theta` is:
.. math::
\mathcal{L}(X, Y, \theta) = \frac{1}{m} \sum_{i=1}^m -\log F_\theta(x_i)_{y_i} + R(X, \theta)
where :math:`F_\theta` is the softmax output generated by the classification
model and :math:`R` is a regularization function.
'''
def setup_vars(self):
'''Setup Theano variables for our network.
Returns
-------
vars : list of theano variables
A list of the variables that this network requires as inputs.
'''
super(Classifier, self).setup_vars()
# for a classifier, this specifies the correct labels for a given input.
self.labels = TT.ivector('labels')
return [self.x, self.labels]
@property
def output_activation(self):
return 'softmax'
@property
def error(self):
'''Returns a theano computation of cross entropy.'''
out = self.outputs[-1]
prob = out[TT.arange(self.labels.shape[0]), self.labels]
return -TT.mean(TT.log(prob))
@property
def accuracy(self):
'''Returns a theano computation of percent correct classifications.'''
out = self.outputs[-1]
predict = TT.argmax(out, axis=1)
return TT.cast(100, FLOAT) * TT.mean(TT.eq(predict, self.labels))
@property
def monitors(self):
'''A sequence of name-value pairs for monitoring the network.
Names in this sequence are strings, and values are theano variables
describing how to compute the relevant quantity.
These monitor expressions are used by network trainers to compute
quantities of interest during training. The default set of monitors
consists of everything from :func:`Network.monitors`, plus:
- acc: the classification `accuracy` of the network
'''
for name, value in super(Classifier, self).monitors:
yield name, value
yield 'acc', self.accuracy
def classify(self, x):
'''Compute a greedy classification for the given set of data.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing examples to classify. Examples are given as the
rows in this array.
Returns
-------
k : ndarray (num-examples, )
A vector of class index values, one per row of input data.
'''
return self.predict(x).argmax(axis=-1)
|
{"hexsha": "725d5c280b18b1085fdb8e0d1270b24257f3ff2e", "size": 35996, "ext": "py", "lang": "Python", "max_stars_repo_path": "theanets/feedforward.py", "max_stars_repo_name": "hknerdgn/theanets", "max_stars_repo_head_hexsha": "64f9f65c3edd8a9bd74e8a414f2812b95557ff79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-02-12T16:49:03.000Z", "max_stars_repo_stars_event_max_datetime": "2015-02-12T16:49:03.000Z", "max_issues_repo_path": "theanets/feedforward.py", "max_issues_repo_name": "hknerdgn/theanets", "max_issues_repo_head_hexsha": "64f9f65c3edd8a9bd74e8a414f2812b95557ff79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "theanets/feedforward.py", "max_forks_repo_name": "hknerdgn/theanets", "max_forks_repo_head_hexsha": "64f9f65c3edd8a9bd74e8a414f2812b95557ff79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.426067908, "max_line_length": 106, "alphanum_fraction": 0.6234303812, "include": true, "reason": "import numpy,import theano,from theano", "num_tokens": 7967}
|
import numpy as np
from scipy import optimize
class Portfolio:
def __init__(self, identifier, exp_ret, exp_vol, exp_cov, weights):
self.identifier = identifier
self.exp_ret = exp_ret
self.exp_vol = exp_vol
self.exp_cov = exp_cov
self.weights = weights
def portfolio_vol(self):
weights_matrix = np.array(self.weights).reshape(-1,1)*np.array(self.weights)
port_vol = np.sqrt(np.sum(np.multiply(weights_matrix, self.exp_cov)))
return port_vol
def portfolio_ret(self):
port_ret = np.dot(self.exp_ret, self.weights)
return port_ret
def sharpe(self, rf=0):
port_ret = np.dot(self.exp_ret, self.weights)
weights_matrix = np.array(self.weights).reshape(-1,1)*np.array(self.weights)
port_vol = np.sqrt(np.sum(np.multiply(weights_matrix, self.exp_cov)))
sharpe = -((port_ret - rf)/port_vol)
return sharpe
def optimize(self, rf=0, tolerance=10e-6, allow_short=None):
#add function for shorting later
portfolio_size = len(self.exp_ret)
#objective function (sharpe ratio = (port_ret - rf)/port_vol)
def sharpe(weights, exp_ret, exp_vol):
port_ret = np.dot(self.exp_ret, weights)
weights_matrix = np.array(weights).reshape(-1,1)*np.array(weights)
port_vol = np.sqrt(np.sum(np.multiply(weights_matrix, self.exp_cov)))
sharpe = -((port_ret - rf)/port_vol)
return sharpe
#initial guess
xinit = np.repeat(1/portfolio_size, portfolio_size)
#bounds: (0,1) if allow_short=='No', None if allow_short=='Yes',
if allow_short == None:
lb=0
ub=1
bounds = tuple([(lb,ub) for x in xinit])
else:
bounds = None
#constraint: sum(weights)=1
cons = ({'type': 'eq', 'fun': lambda xinit: np.sum(xinit)-1})
#minimization solver
opt = optimize.minimize(sharpe,x0=xinit, args= (self.exp_ret, self.exp_vol),
method = 'SLSQP', bounds=bounds, constraints=cons,tol=10**-3)
result = opt
#print optimal sharpe and weights
optimal_sharpe = -result.fun
weights = result.x
port_exp_ret = np.dot(self.exp_ret, weights)
weights_matrix = np.array(result.x).reshape(-1,1)*np.array(result.x)
port_exp_vol = np.sqrt(np.sum(np.multiply(weights_matrix, self.exp_cov)))
x = Portfolio(self.identifier, self.exp_ret, self.exp_vol, self.exp_cov, weights)
return x
|
{"hexsha": "b4e6f8c75f5debb4d0ba606879e5d115b1b725b6", "size": 2580, "ext": "py", "lang": "Python", "max_stars_repo_path": "financier/portfolio.py", "max_stars_repo_name": "JakeVestal/financier", "max_stars_repo_head_hexsha": "0ace41f3b39bb2e4252cb90eeecf316767679e34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "financier/portfolio.py", "max_issues_repo_name": "JakeVestal/financier", "max_issues_repo_head_hexsha": "0ace41f3b39bb2e4252cb90eeecf316767679e34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "financier/portfolio.py", "max_forks_repo_name": "JakeVestal/financier", "max_forks_repo_head_hexsha": "0ace41f3b39bb2e4252cb90eeecf316767679e34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-21T20:52:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-21T20:52:41.000Z", "avg_line_length": 36.338028169, "max_line_length": 93, "alphanum_fraction": 0.6166666667, "include": true, "reason": "import numpy,from scipy", "num_tokens": 629}
|
"""Evaluation utils for BLEU metrics."""
from collections import Counter
import math
import numpy as np
import subprocess
import sys
def bleu_stats(hypothesis, reference):
"""Compute statistics for BLEU."""
stats = []
stats.append(len(hypothesis))
stats.append(len(reference))
for n in range(1, 5):
s_ngrams = Counter(
[tuple(hypothesis[i:i + n]) for i in range(len(hypothesis) + 1 - n)]
)
r_ngrams = Counter(
[tuple(reference[i:i + n]) for i in range(len(reference) + 1 - n)]
)
stats.append(max([sum((s_ngrams & r_ngrams).values()), 0]))
stats.append(max([len(hypothesis) + 1 - n, 0]))
return stats
def bleu(stats):
"""Compute BLEU given n-gram statistics."""
if len(list(filter(lambda x: x == 0, stats))) > 0:
return 0
(c, r) = stats[:2]
log_bleu_prec = sum(
[math.log(float(x) / y) for x, y in zip(stats[2::2], stats[3::2])]
) / 4.
return math.exp(min([0, 1 - float(r) / c]) + log_bleu_prec)
def get_bleu(hypotheses, reference):
"""Get validation BLEU score for dev set."""
stats = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
for hyp, ref in zip(hypotheses, reference):
stats += np.array(bleu_stats(hyp, ref))
return 100 * bleu(stats)
def get_bleu_moses(hypotheses, reference):
"""Get BLEU score with moses bleu score."""
with open('tmp_hypotheses.txt', 'w') as f:
for hypothesis in hypotheses:
f.write(' '.join(hypothesis) + '\n')
with open('tmp_reference.txt', 'w') as f:
for ref in reference:
f.write(' '.join(ref) + '\n')
hypothesis_pipe = '\n'.join([' '.join(hyp) for hyp in hypotheses])
pipe = subprocess.Popen(
["perl", 'multi-bleu.perl', '-lc', 'tmp_reference.txt'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
pipe.stdin.write(hypothesis_pipe)
pipe.stdin.close()
return pipe.stdout.read()
|
{"hexsha": "af482050c2a55271554137ecaaa9913c18c748da", "size": 1984, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/bleu.py", "max_stars_repo_name": "dingchaoz/suammarize_radiology", "max_stars_repo_head_hexsha": "7964b00596ced172fd75ad977435d00eef4a8a88", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2018-10-25T20:08:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T12:23:23.000Z", "max_issues_repo_path": "utils/bleu.py", "max_issues_repo_name": "dingchaoz/suammarize_radiology", "max_issues_repo_head_hexsha": "7964b00596ced172fd75ad977435d00eef4a8a88", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-10-20T01:14:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T05:43:44.000Z", "max_forks_repo_path": "utils/bleu.py", "max_forks_repo_name": "dingchaoz/suammarize_radiology", "max_forks_repo_head_hexsha": "7964b00596ced172fd75ad977435d00eef4a8a88", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2018-10-25T02:57:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T21:12:17.000Z", "avg_line_length": 30.0606060606, "max_line_length": 80, "alphanum_fraction": 0.5892137097, "include": true, "reason": "import numpy", "num_tokens": 562}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.