text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
from pandas import Series,concat
import pandas as pd
from matplotlib import pyplot
from pandas.plotting import lag_plot
from statsmodels.tsa.ar_model import AR
from sklearn.metrics import mean_squared_error,mean_absolute_error
from IPython.core.pylabtools import figsize
import numpy as np
#series = Series.from_csv('daily-minimum-temperatures.csv', header=0)
speed_train = open("/home/sunyanru19s/solar_wind_coding/LSTM/data/speed_train.txt")
speed_val = open("/home/sunyanru19s/solar_wind_coding/LSTM/data/speed_val(2016).txt")
# create lagged dataset
train_speed_list = []
val_speed_list = []
speed_line = speed_train.readline()
while speed_line:
speed = list(map(float, speed_line.split()))
train_speed_list.append(speed)
speed_line = speed_train.readline()
train_speed_list = np.array(train_speed_list, dtype='float64')
print(train_speed_list.shape)#(43824, 1)
#df_train = pd.DataFrame(train_speed_list)
speed_line_val = speed_val.readline()
while speed_line_val:
speed = list(map(float, speed_line_val.split()))
val_speed_list.append(speed)
speed_line_val = speed_val.readline()
val_speed_list = np.array(val_speed_list, dtype='float64')
print(val_speed_list.shape)#(8784, 1)
#df_test = pd.DataFrame(val_speed_list)
'''
#绘制散点图
lag_plot(df_train, lag=24)
pyplot.title('AM_train')
pyplot.savefig('AM_check_train.jpg')
pyplot.show()
lag_plot(df_test, lag=24)
pyplot.title('AM_test')
pyplot.savefig('AM_check_test.jpg')
pyplot.show()
#计算自相关系数
dataframe_train = concat([df_train.shift(48), df_train], axis=1)
dataframe_train.columns = ['t-48', 't']
result = dataframe_train.corr() # 计算一个相关系数矩阵
print(result)
dataframe_test = concat([df_test.shift(48),df_test],axis=1)
dataframe_test.columns = ['t-48','t']
result_test = dataframe_test.corr()
print(result_test)
'''
'''output
t-24 t
t-24 1.000000 0.688119
t 0.688119 1.000000
t-24 t
t-24 1.000000 0.718967
t 0.718967 1.000000
'''
'''
t-1 t
t-1 1.000000 0.990758
t 0.990758 1.000000
t-1 t
t-1 1.000000 0.992352
t 0.992352 1.000000
'''
'''
t-48 t
t-48 1.000000 0.399389
t 0.399389 1.000000
t-48 t
t-48 1.000000 0.398535
t 0.398535 1.000000
'''
'''
#线状图
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
plot_acf(df_train, lags=31)
#pyplot.figure(figsize=(12, 6))
pyplot.title('acf')
pyplot.savefig('acf.jpg')
plot_pacf(df_train,lags=31)
#pyplot.figure(figsize=(12, 6))
pyplot.title('pacf')
pyplot.savefig('pacf.jpg')
pyplot.show()
'''
#series = Series.from_csv('daily-minimum-temperatures.csv', header=0)
# split dataset
#X = series.values
#train, test = X[1:len(X)-7], X[len(X)-7:]
X_train_speed = []
y_train_speed = []
X_test_speed = []
y_test_speed = []
# split into train and test sets
print("lenlenlenlen:",len(train_speed_list),len(val_speed_list))#lenlenlenlen: 43824 8784
for index in range(len(train_speed_list) - 25 + 1): # 不包含最后一个数字
X_train_speed.append(train_speed_list[index]) # 0:24,1:25...不包含最后一个数字,即24,25...
y_train_speed.append(train_speed_list[index + 25 - 1]) # 0+24+96=120(实际下标为119)
for index in range(len(val_speed_list) - 25 + 1): # 不包含最后一个数字
X_test_speed.append(val_speed_list[index]) # 0:24,1:25...不包含最后一个数字,即24,25...
y_test_speed.append(val_speed_list[index + 25 - 1]) # 0+24+96=120(实际下标为119)
# train autoregression
model = AR(train_speed_list)
model_fit = model.fit()
print('Lag: %s' % model_fit.k_ar)
print('Coefficients: %s' % model_fit.params)
#AR model选择了先前55h的数据作为输入。输出可以看到这55h的值分别对应的系数,通过这些系数我们可以清楚的了解到每一小时对模型预测的贡献。
def computecc(targets, outputs):
"""Computes and stores the average and current value"""
targets = np.array(targets)
outputs = np.array(outputs)
print("***************",targets.shape, outputs.shape)
xBar = targets.mean()
yBar = outputs.mean()
print(xBar,yBar)
SSR = 0
varX = 0 # 公式中分子部分
varY = 0 # 公式中分母部分
for i in range(0, targets.shape[0]):
diffXXBar = targets[i] - xBar
diffYYBar = outputs[i] - yBar
SSR += (diffXXBar * diffYYBar)
varX += diffXXBar ** 2
varY += diffYYBar ** 2
#print(type(varX),type(varY))
#x = torch.tensor(varX)
#y = torch.tensor(varY)
#print(type(x),type(y))
SST = np.sqrt(varX * varY)
#SSR = torch.tensor(SSR)
#print(SSR,SST,type(SSR),type(SST))
xxx = SSR / SST
#result = xxx.detach().numpy()
return xxx
# make predictions
# 注意这里一次预测了整个test
predictions = model_fit.predict(start=len(train_speed_list), end=len(train_speed_list)+len(val_speed_list)-1, dynamic=False)
print("@@@@@@@@@@",len(predictions),len(val_speed_list))
#for i in range(len(predictions)):
#print('predicted=%f, expected=%f' % (predictions[i], val_speed_list[i]))
error = mean_squared_error(val_speed_list, predictions)
print('Test MSE: %.3f' % error)
n = len(val_speed_list)
#mse = sum(np.square(y_test_speed - predictions)) / n
mae = mean_absolute_error(val_speed_list, predictions)
#print("均方误差(MSE):均方根误差(RMSE)mse%%%%%%%%%%%%%%%",mse)
print("平均绝对误差(MAE)mae^^^^^^^^^^^^^^^",mae)
test_cc = computecc(val_speed_list, predictions)
print("啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊val_CC:",test_cc)
np.savetxt('predict_val_predict_24.txt', predictions)
# plot results
figsize(20, 5)
pyplot.plot(val_speed_list,color='cyan', label='true', linewidth=1)
pyplot.plot(predictions, color='magenta', label='predict', linewidth=1)
pyplot.legend(loc='upper right', fontsize=10)
pyplot.title('Autoregression Model')
pyplot.savefig('Autoregression Model_24.jpg')
pyplot.show()
'''output(28)
Lag: 55
Coefficients: [ ]
Test MSE: 11423.790
平均绝对误差(MAE)mae^^^^^^^^^^^^^^^ 82.82515577233744
*************** (8784, 1) (8784,)
446.4091530054645 412.6698854116349
啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊val_CC: [0.00075527]
'''
'''(25)
Lag: 55
Coefficients: [ ]
Test MSE: 11423.790
平均绝对误差(MAE)mae^^^^^^^^^^^^^^^ 82.82515577233744
*************** (8784, 1) (8784,)
446.4091530054645 412.6698854116349
啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊val_CC: [0.00075527]
'''
#CUDA_VISIBLE_DEVICES="" PYTHONHASHSEED=0 python -u AM.py | tee ./AM_24
|
{"hexsha": "b90b8799709665b15a8cfc5f28987eefd95e056e", "size": 6286, "ext": "py", "lang": "Python", "max_stars_repo_path": "compare_model/AM_model/AM.py", "max_stars_repo_name": "syrGitHub/TDAM", "max_stars_repo_head_hexsha": "c66a45a9383681e3a001bacefbbf04b3c471ac47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-21T13:10:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-21T13:10:18.000Z", "max_issues_repo_path": "compare_model/AM_model/AM.py", "max_issues_repo_name": "syrGitHub/TDAM", "max_issues_repo_head_hexsha": "c66a45a9383681e3a001bacefbbf04b3c471ac47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compare_model/AM_model/AM.py", "max_forks_repo_name": "syrGitHub/TDAM", "max_forks_repo_head_hexsha": "c66a45a9383681e3a001bacefbbf04b3c471ac47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5699481865, "max_line_length": 125, "alphanum_fraction": 0.6748329621, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 2134}
|
"""
DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited.
This material is based upon work supported by the United States Air Force under
Air Force Contract No. FA8702-15-D-0001. Any opinions, findings, conclusions
or recommendations expressed in this material are those of the author(s) and
do not necessarily reflect the views of the United States Air Force.
(c) 2020 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the U.S. Government with Unlimited Rights, as defined in
DFARS Part 252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright
notice, U.S. Government rights in this work are defined by DFARS 252.227-7013
or DFARS 252.227-7014 as detailed above. Use of this work other than as
specifically authorized by the U.S. Government may violate any copyrights
that exist in this work.
"""
import pandas as pd
import pod
import scipy.io as sio
import os, sys, tarfile
class Experiment:
def __init__(self, podfile):
csv = pd.read_csv(podfile)
csv['pod'] = csv.apply(lambda x: pod.Pod(x['label'],x['mac'],x['position']), axis='columns')
self.pd_pods = csv
def AddDataFile(self, filename):
# idx = self.pd_pods.label.apply(lambda x: f"{x}-" in filename)
# for p in self.pd_pods['pod'][idx]:
# p.AddDataFile(filename)
if os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
for name in files:
path = os.path.join(root, name)
idx = self.pd_pods.label.apply(lambda x: f"/{x}/" in path)
for p in self.pd_pods['pod'][idx]:
with open(path) as f:
p.AddDataFile(f, path)
elif os.path.isfile(filename):
if filename.endswith('.tar.gz') or filename.endswith('.tgz'):
tar = tarfile.open(filename)
for m in tar.getmembers():
if m.isfile():
f = tar.extractfile(m)
idx = self.pd_pods.label.apply(lambda x: f"/{x}/" in m.name)
for p in self.pd_pods['pod'][idx]:
p.AddDataFile(f, m.name)
elif filename.endswith('.json'):
idx = self.pd_pods.label.apply(lambda x: f"/{x}/" in filename)
for p in self.pd_pods['pod'][idx]:
with open(filename) as f:
p.AddDataFile(f, filename)
else:
print(f"WARNING: skipping file I don't know how to handle: {filename}")
def TimeSort(self):
for p in self.pd_pods['pod']:
p.TimeSort()
def UpdateLocationInfo(self):
for p in self.pd_pods['pod']:
p.UpdateLocalSendLocations()
p.UpdateLocalReceiveLocations()
for p in self.pd_pods['pod']:
for psender in self.pd_pods['pod']:
if p != psender:
p.UpdateSenderInfo(psender)
for p in self.pd_pods['pod']:
p.UpdateTransmitDistances()
def FilterUnknownTx(self):
for p in self.pd_pods['pod']:
p.FilterUnknownTx()
def WriteOutput(self, filename):
sigvars = ['ts', 'rssi', 'address', 'rx_x', 'rx_y', 'rx_z', 'tx_x', 'tx_y', 'tx_z', 'd']
#sigvars = ['ts', 'rssi', 'rx_x', 'rx_y', 'rx_z', 'tx_x', 'tx_y', 'tx_z', 'd']
if filename.endswith('.mat'):
m = {}
for p in self.pd_pods['pod']:
print("len signalData is ", len(p.signalData))
label = p.label.replace('-','_')
if len(p.signalData):
m[label] = {name: col.values for name, col in p.signalData[sigvars].items()}
m[label]['mac'] = p.mac
else:
m[label] = {'mac': p.mac}
#m[p.label] = pd.DataFrame(columns=sigvars)
sio.savemat(filename, m)
else:
sys.exit(f"ERROR: unable to write file type for {filename}")
|
{"hexsha": "857774211862aaedefa15c045f7004d8fc824c6a", "size": 4174, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/experiment.py", "max_stars_repo_name": "mit-ll/pact-echo", "max_stars_repo_head_hexsha": "df9a4b53e97b6ed0ccd90dbffe3853850fc28730", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/experiment.py", "max_issues_repo_name": "mit-ll/pact-echo", "max_issues_repo_head_hexsha": "df9a4b53e97b6ed0ccd90dbffe3853850fc28730", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-11-27T16:31:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T12:43:12.000Z", "max_forks_repo_path": "analysis/experiment.py", "max_forks_repo_name": "mit-ll/pact-echo", "max_forks_repo_head_hexsha": "df9a4b53e97b6ed0ccd90dbffe3853850fc28730", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.74, "max_line_length": 100, "alphanum_fraction": 0.5577383805, "include": true, "reason": "import scipy", "num_tokens": 1000}
|
//
// eigen_utils.hpp
//
// Created By Davis Blalock on 3/2/16.
// Copyright (c) 2016 Davis Blalock. All rights reserved.
//
#ifndef __EIGEN_UTILS_HPP
#define __EIGEN_UTILS_HPP
#define EIGEN_DONT_PARALLELIZE // ensure no multithreading
#include <Eigen/Dense>
#include <unsupported/Eigen/CXX11/Tensor>
// ================================================================
// typealiases
// ================================================================
template <class T, int Rows = Eigen::Dynamic, int Cols = Eigen::Dynamic>
using RowMatrix = Eigen::Matrix<T, Rows, Cols, Eigen::RowMajor>;
template <class T, int Rows = Eigen::Dynamic, int Cols = Eigen::Dynamic>
using ColMatrix = Eigen::Matrix<T, Rows, Cols, Eigen::ColMajor>;
template <class T> using ColVector = Eigen::Matrix<T, Eigen::Dynamic, 1>;
template <class T>
using RowVector = Eigen::Matrix<T, 1, Eigen::Dynamic, Eigen::RowMajor>;
#endif
|
{"hexsha": "1c56cfc709e125c6d404f2a45b94c2aa63fe8825", "size": 912, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "maddness/cpp/src/include/eigen_utils.hpp", "max_stars_repo_name": "joennlae/halutmatmul", "max_stars_repo_head_hexsha": "69340d3386298401d421b0e67dcb0649534b0c12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maddness/cpp/src/include/eigen_utils.hpp", "max_issues_repo_name": "joennlae/halutmatmul", "max_issues_repo_head_hexsha": "69340d3386298401d421b0e67dcb0649534b0c12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maddness/cpp/src/include/eigen_utils.hpp", "max_forks_repo_name": "joennlae/halutmatmul", "max_forks_repo_head_hexsha": "69340d3386298401d421b0e67dcb0649534b0c12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5, "max_line_length": 73, "alphanum_fraction": 0.6184210526, "num_tokens": 230}
|
# Copyright (c) 2020 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import json
import os
from collections.abc import Collection, MutableSequence
import pytest
from signac.synced_collections import SyncedList
from signac.synced_collections.backends.collection_json import JSONDict
from signac.synced_collections.numpy_utils import NumpyConversionWarning
from signac.synced_collections.utils import (
AbstractTypeResolver,
SyncedCollectionJSONEncoder,
)
try:
import numpy
NUMPY = True
except ImportError:
NUMPY = False
def test_type_resolver():
resolver = AbstractTypeResolver(
{
"dict": lambda obj: isinstance(obj, dict),
"tuple": lambda obj: isinstance(obj, tuple),
"str": lambda obj: isinstance(obj, str),
"mutablesequence": lambda obj: isinstance(obj, MutableSequence),
"collection": lambda obj: isinstance(obj, Collection),
"set": lambda obj: isinstance(obj, set),
}
)
assert resolver.get_type({}) == "dict"
assert resolver.get_type((0, 1)) == "tuple"
assert resolver.get_type("abc") == "str"
assert resolver.get_type([]) == "mutablesequence"
# Make sure that order matters; collection should be found before list.
assert resolver.get_type(set()) == "collection"
def test_json_encoder(tmpdir):
# Raw dictionaries should be encoded transparently.
data = {"foo": 1, "bar": 2, "baz": 3}
json_str_data = '{"foo": 1, "bar": 2, "baz": 3}'
assert json.dumps(data) == json_str_data
assert json.dumps(data, cls=SyncedCollectionJSONEncoder) == json_str_data
assert json.dumps(data, cls=SyncedCollectionJSONEncoder) == json.dumps(data)
fn = os.path.join(tmpdir, "test_json_encoding.json")
synced_data = JSONDict(fn)
synced_data.update(data)
with pytest.raises(TypeError):
json.dumps(synced_data)
assert json.dumps(synced_data, cls=SyncedCollectionJSONEncoder) == json_str_data
if NUMPY:
# Test both scalar and array numpy types since they could have
# different problems.
array = numpy.array(3)
with pytest.warns(NumpyConversionWarning):
synced_data["foo"] = array
assert isinstance(synced_data["foo"], int)
array = numpy.random.rand(3)
with pytest.warns(NumpyConversionWarning):
synced_data["foo"] = array
assert isinstance(synced_data["foo"], SyncedList)
assert (
json.loads(json.dumps(synced_data, cls=SyncedCollectionJSONEncoder))
== synced_data()
)
|
{"hexsha": "3df1774ba5f258761c7d756cd9c2c8fecf29c701", "size": 2662, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_synced_collections/test_utils.py", "max_stars_repo_name": "rohanbabbar04/signac", "max_stars_repo_head_hexsha": "dfc28cbfdd11ea2f2e226f87719f323595e0f4ff", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 100, "max_stars_repo_stars_event_min_datetime": "2019-01-31T01:37:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T10:35:34.000Z", "max_issues_repo_path": "tests/test_synced_collections/test_utils.py", "max_issues_repo_name": "rohanbabbar04/signac", "max_issues_repo_head_hexsha": "dfc28cbfdd11ea2f2e226f87719f323595e0f4ff", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 607, "max_issues_repo_issues_event_min_datetime": "2019-01-31T14:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:51:48.000Z", "max_forks_repo_path": "tests/test_synced_collections/test_utils.py", "max_forks_repo_name": "daico007/signac", "max_forks_repo_head_hexsha": "a20d815bd87af3d8992c71871071024062cada07", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2019-01-31T14:36:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T03:48:32.000Z", "avg_line_length": 34.1282051282, "max_line_length": 84, "alphanum_fraction": 0.6803155522, "include": true, "reason": "import numpy", "num_tokens": 603}
|
**********************************************************************
**********************************************************************
integer function poslc(ndum,indlc,lcmax,index)
*
* used to find actual number of lens OR cell
* with index 'ndum' ( searches with bisection in ordered (!) vector)
*
* input:
* ndum - index of cell whose actual number is wanted
* indlens/indcell - vector with actual numbers of lenses/cells
* lensmax/cellmax - maximal number of lenses/cells
* index - total number of (ordered) lenses /cells up to now
*
* output:
* position - function value (positive: exists at this position;
* negative: does note exist, but belongs at - position;
* zero: not possible)
*
* called in:
* pos
*
* use of subroutines:
* none
*
* use of functions:
* none
* October 31, 1991; J. Wambsganss
*
implicit none
integer ndum,lcmax,index,lower,upper,middle,debug,indlc(lcmax)
common/test/debug
*
* bisection with bracketing values lower, upper:
*
lower = 0
upper = index + 1
10 if(upper-lower.gt.1)then
middle = (lower + upper)/2
if(ndum.gt.indlc(middle))then
lower = middle
else
upper = middle
endif
goto 10
endif
*
if(upper.le.index.and.ndum.eq.indlc(upper))then
poslc = upper
elseif(lower.gt.0.and.ndum.eq.indlc(lower))then
poslc = lower
else
poslc = - upper
endif
*
return
end
|
{"hexsha": "74ed6cc86e1f4fdaae27a3d9d56cb3cb10d503c3", "size": 3175, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "sntd/microlens/poslc.f", "max_stars_repo_name": "srodney/sntd", "max_stars_repo_head_hexsha": "c091e3ce76fcb6a73d31eb2a719bbb4fc649016c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sntd/microlens/poslc.f", "max_issues_repo_name": "srodney/sntd", "max_issues_repo_head_hexsha": "c091e3ce76fcb6a73d31eb2a719bbb4fc649016c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sntd/microlens/poslc.f", "max_forks_repo_name": "srodney/sntd", "max_forks_repo_head_hexsha": "c091e3ce76fcb6a73d31eb2a719bbb4fc649016c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-08T13:06:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T13:06:50.000Z", "avg_line_length": 55.701754386, "max_line_length": 72, "alphanum_fraction": 0.2658267717, "num_tokens": 443}
|
from __future__ import annotations
import warnings
import numpy as np
import pandas as pd
import pytest
from pandas.tseries.frequencies import to_offset
import xarray as xr
from xarray import DataArray, Dataset, Variable
from xarray.core.groupby import _consolidate_slices
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
create_test_data,
requires_dask,
requires_flox,
requires_scipy,
)
@pytest.fixture
def dataset():
ds = xr.Dataset(
{
"foo": (("x", "y", "z"), np.random.randn(3, 4, 2)),
"baz": ("x", ["e", "f", "g"]),
},
{"x": ["a", "b", "c"], "y": [1, 2, 3, 4], "z": [1, 2]},
)
ds["boo"] = (("z", "y"), [["f", "g", "h", "j"]] * 2)
return ds
@pytest.fixture
def array(dataset):
return dataset["foo"]
def test_consolidate_slices() -> None:
assert _consolidate_slices([slice(3), slice(3, 5)]) == [slice(5)]
assert _consolidate_slices([slice(2, 3), slice(3, 6)]) == [slice(2, 6)]
assert _consolidate_slices([slice(2, 3, 1), slice(3, 6, 1)]) == [slice(2, 6, 1)]
slices = [slice(2, 3), slice(5, 6)]
assert _consolidate_slices(slices) == slices
with pytest.raises(ValueError):
_consolidate_slices([slice(3), 4])
def test_groupby_dims_property(dataset) -> None:
assert dataset.groupby("x").dims == dataset.isel(x=1).dims
assert dataset.groupby("y").dims == dataset.isel(y=1).dims
stacked = dataset.stack({"xy": ("x", "y")})
assert stacked.groupby("xy").dims == stacked.isel(xy=0).dims
def test_multi_index_groupby_map(dataset) -> None:
# regression test for GH873
ds = dataset.isel(z=1, drop=True)[["foo"]]
expected = 2 * ds
actual = (
ds.stack(space=["x", "y"])
.groupby("space")
.map(lambda x: 2 * x)
.unstack("space")
)
assert_equal(expected, actual)
def test_reduce_numeric_only(dataset) -> None:
gb = dataset.groupby("x", squeeze=False)
with xr.set_options(use_flox=False):
expected = gb.sum()
with xr.set_options(use_flox=True):
actual = gb.sum()
assert_identical(expected, actual)
def test_multi_index_groupby_sum() -> None:
# regression test for GH873
ds = xr.Dataset(
{"foo": (("x", "y", "z"), np.ones((3, 4, 2)))},
{"x": ["a", "b", "c"], "y": [1, 2, 3, 4]},
)
expected = ds.sum("z")
actual = ds.stack(space=["x", "y"]).groupby("space").sum("z").unstack("space")
assert_equal(expected, actual)
def test_groupby_da_datetime() -> None:
# test groupby with a DataArray of dtype datetime for GH1132
# create test data
times = pd.date_range("2000-01-01", periods=4)
foo = xr.DataArray([1, 2, 3, 4], coords=dict(time=times), dims="time")
# create test index
dd = times.to_pydatetime()
reference_dates = [dd[0], dd[2]]
labels = reference_dates[0:1] * 2 + reference_dates[1:2] * 2
ind = xr.DataArray(
labels, coords=dict(time=times), dims="time", name="reference_date"
)
g = foo.groupby(ind)
actual = g.sum(dim="time")
expected = xr.DataArray(
[3, 7], coords=dict(reference_date=reference_dates), dims="reference_date"
)
assert_equal(expected, actual)
def test_groupby_duplicate_coordinate_labels() -> None:
# fix for http://stackoverflow.com/questions/38065129
array = xr.DataArray([1, 2, 3], [("x", [1, 1, 2])])
expected = xr.DataArray([3, 3], [("x", [1, 2])])
actual = array.groupby("x").sum()
assert_equal(expected, actual)
def test_groupby_input_mutation() -> None:
# regression test for GH2153
array = xr.DataArray([1, 2, 3], [("x", [2, 2, 1])])
array_copy = array.copy()
expected = xr.DataArray([3, 3], [("x", [1, 2])])
actual = array.groupby("x").sum()
assert_identical(expected, actual)
assert_identical(array, array_copy) # should not modify inputs
@pytest.mark.parametrize(
"obj",
[
xr.DataArray([1, 2, 3, 4, 5, 6], [("x", [1, 1, 1, 2, 2, 2])]),
xr.Dataset({"foo": ("x", [1, 2, 3, 4, 5, 6])}, {"x": [1, 1, 1, 2, 2, 2]}),
],
)
def test_groupby_map_shrink_groups(obj) -> None:
expected = obj.isel(x=[0, 1, 3, 4])
actual = obj.groupby("x").map(lambda f: f.isel(x=[0, 1]))
assert_identical(expected, actual)
@pytest.mark.parametrize(
"obj",
[
xr.DataArray([1, 2, 3], [("x", [1, 2, 2])]),
xr.Dataset({"foo": ("x", [1, 2, 3])}, {"x": [1, 2, 2]}),
],
)
def test_groupby_map_change_group_size(obj) -> None:
def func(group):
if group.sizes["x"] == 1:
result = group.isel(x=[0, 0])
else:
result = group.isel(x=[0])
return result
expected = obj.isel(x=[0, 0, 1])
actual = obj.groupby("x").map(func)
assert_identical(expected, actual)
def test_da_groupby_map_func_args() -> None:
def func(arg1, arg2, arg3=0):
return arg1 + arg2 + arg3
array = xr.DataArray([1, 1, 1], [("x", [1, 2, 3])])
expected = xr.DataArray([3, 3, 3], [("x", [1, 2, 3])])
actual = array.groupby("x").map(func, args=(1,), arg3=1)
assert_identical(expected, actual)
def test_ds_groupby_map_func_args() -> None:
def func(arg1, arg2, arg3=0):
return arg1 + arg2 + arg3
dataset = xr.Dataset({"foo": ("x", [1, 1, 1])}, {"x": [1, 2, 3]})
expected = xr.Dataset({"foo": ("x", [3, 3, 3])}, {"x": [1, 2, 3]})
actual = dataset.groupby("x").map(func, args=(1,), arg3=1)
assert_identical(expected, actual)
def test_da_groupby_empty() -> None:
empty_array = xr.DataArray([], dims="dim")
with pytest.raises(ValueError):
empty_array.groupby("dim")
def test_da_groupby_quantile() -> None:
array = xr.DataArray(
data=[1, 2, 3, 4, 5, 6], coords={"x": [1, 1, 1, 2, 2, 2]}, dims="x"
)
# Scalar quantile
expected = xr.DataArray(
data=[2, 5], coords={"x": [1, 2], "quantile": 0.5}, dims="x"
)
actual = array.groupby("x").quantile(0.5)
assert_identical(expected, actual)
# Vector quantile
expected = xr.DataArray(
data=[[1, 3], [4, 6]],
coords={"x": [1, 2], "quantile": [0, 1]},
dims=("x", "quantile"),
)
actual = array.groupby("x").quantile([0, 1])
assert_identical(expected, actual)
array = xr.DataArray(
data=[np.NaN, 2, 3, 4, 5, 6], coords={"x": [1, 1, 1, 2, 2, 2]}, dims="x"
)
for skipna in (True, False, None):
e = [np.NaN, 5] if skipna is False else [2.5, 5]
expected = xr.DataArray(data=e, coords={"x": [1, 2], "quantile": 0.5}, dims="x")
actual = array.groupby("x").quantile(0.5, skipna=skipna)
assert_identical(expected, actual)
# Multiple dimensions
array = xr.DataArray(
data=[[1, 11, 26], [2, 12, 22], [3, 13, 23], [4, 16, 24], [5, 15, 25]],
coords={"x": [1, 1, 1, 2, 2], "y": [0, 0, 1]},
dims=("x", "y"),
)
actual_x = array.groupby("x").quantile(0, dim=...)
expected_x = xr.DataArray(
data=[1, 4], coords={"x": [1, 2], "quantile": 0}, dims="x"
)
assert_identical(expected_x, actual_x)
actual_y = array.groupby("y").quantile(0, dim=...)
expected_y = xr.DataArray(
data=[1, 22], coords={"y": [0, 1], "quantile": 0}, dims="y"
)
assert_identical(expected_y, actual_y)
actual_xx = array.groupby("x").quantile(0)
expected_xx = xr.DataArray(
data=[[1, 11, 22], [4, 15, 24]],
coords={"x": [1, 2], "y": [0, 0, 1], "quantile": 0},
dims=("x", "y"),
)
assert_identical(expected_xx, actual_xx)
actual_yy = array.groupby("y").quantile(0)
expected_yy = xr.DataArray(
data=[[1, 26], [2, 22], [3, 23], [4, 24], [5, 25]],
coords={"x": [1, 1, 1, 2, 2], "y": [0, 1], "quantile": 0},
dims=("x", "y"),
)
assert_identical(expected_yy, actual_yy)
times = pd.date_range("2000-01-01", periods=365)
x = [0, 1]
foo = xr.DataArray(
np.reshape(np.arange(365 * 2), (365, 2)),
coords={"time": times, "x": x},
dims=("time", "x"),
)
g = foo.groupby(foo.time.dt.month)
actual = g.quantile(0, dim=...)
expected = xr.DataArray(
data=[
0.0,
62.0,
120.0,
182.0,
242.0,
304.0,
364.0,
426.0,
488.0,
548.0,
610.0,
670.0,
],
coords={"month": np.arange(1, 13), "quantile": 0},
dims="month",
)
assert_identical(expected, actual)
actual = g.quantile(0, dim="time")[:2]
expected = xr.DataArray(
data=[[0.0, 1], [62.0, 63]],
coords={"month": [1, 2], "x": [0, 1], "quantile": 0},
dims=("month", "x"),
)
assert_identical(expected, actual)
# method keyword
array = xr.DataArray(data=[1, 2, 3, 4], coords={"x": [1, 1, 2, 2]}, dims="x")
expected = xr.DataArray(
data=[1, 3], coords={"x": [1, 2], "quantile": 0.5}, dims="x"
)
actual = array.groupby("x").quantile(0.5, method="lower")
assert_identical(expected, actual)
def test_ds_groupby_quantile() -> None:
ds = xr.Dataset(
data_vars={"a": ("x", [1, 2, 3, 4, 5, 6])}, coords={"x": [1, 1, 1, 2, 2, 2]}
)
# Scalar quantile
expected = xr.Dataset(
data_vars={"a": ("x", [2, 5])}, coords={"quantile": 0.5, "x": [1, 2]}
)
actual = ds.groupby("x").quantile(0.5)
assert_identical(expected, actual)
# Vector quantile
expected = xr.Dataset(
data_vars={"a": (("x", "quantile"), [[1, 3], [4, 6]])},
coords={"x": [1, 2], "quantile": [0, 1]},
)
actual = ds.groupby("x").quantile([0, 1])
assert_identical(expected, actual)
ds = xr.Dataset(
data_vars={"a": ("x", [np.NaN, 2, 3, 4, 5, 6])},
coords={"x": [1, 1, 1, 2, 2, 2]},
)
for skipna in (True, False, None):
e = [np.NaN, 5] if skipna is False else [2.5, 5]
expected = xr.Dataset(
data_vars={"a": ("x", e)}, coords={"quantile": 0.5, "x": [1, 2]}
)
actual = ds.groupby("x").quantile(0.5, skipna=skipna)
assert_identical(expected, actual)
# Multiple dimensions
ds = xr.Dataset(
data_vars={
"a": (
("x", "y"),
[[1, 11, 26], [2, 12, 22], [3, 13, 23], [4, 16, 24], [5, 15, 25]],
)
},
coords={"x": [1, 1, 1, 2, 2], "y": [0, 0, 1]},
)
actual_x = ds.groupby("x").quantile(0, dim=...)
expected_x = xr.Dataset({"a": ("x", [1, 4])}, coords={"x": [1, 2], "quantile": 0})
assert_identical(expected_x, actual_x)
actual_y = ds.groupby("y").quantile(0, dim=...)
expected_y = xr.Dataset({"a": ("y", [1, 22])}, coords={"y": [0, 1], "quantile": 0})
assert_identical(expected_y, actual_y)
actual_xx = ds.groupby("x").quantile(0)
expected_xx = xr.Dataset(
{"a": (("x", "y"), [[1, 11, 22], [4, 15, 24]])},
coords={"x": [1, 2], "y": [0, 0, 1], "quantile": 0},
)
assert_identical(expected_xx, actual_xx)
actual_yy = ds.groupby("y").quantile(0)
expected_yy = xr.Dataset(
{"a": (("x", "y"), [[1, 26], [2, 22], [3, 23], [4, 24], [5, 25]])},
coords={"x": [1, 1, 1, 2, 2], "y": [0, 1], "quantile": 0},
).transpose()
assert_identical(expected_yy, actual_yy)
times = pd.date_range("2000-01-01", periods=365)
x = [0, 1]
foo = xr.Dataset(
{"a": (("time", "x"), np.reshape(np.arange(365 * 2), (365, 2)))},
coords=dict(time=times, x=x),
)
g = foo.groupby(foo.time.dt.month)
actual = g.quantile(0, dim=...)
expected = xr.Dataset(
{
"a": (
"month",
[
0.0,
62.0,
120.0,
182.0,
242.0,
304.0,
364.0,
426.0,
488.0,
548.0,
610.0,
670.0,
],
)
},
coords={"month": np.arange(1, 13), "quantile": 0},
)
assert_identical(expected, actual)
actual = g.quantile(0, dim="time").isel(month=slice(None, 2))
expected = xr.Dataset(
data_vars={"a": (("month", "x"), [[0.0, 1], [62.0, 63]])},
coords={"month": [1, 2], "x": [0, 1], "quantile": 0},
)
assert_identical(expected, actual)
ds = xr.Dataset(data_vars={"a": ("x", [1, 2, 3, 4])}, coords={"x": [1, 1, 2, 2]})
# method keyword
expected = xr.Dataset(
data_vars={"a": ("x", [1, 3])}, coords={"quantile": 0.5, "x": [1, 2]}
)
actual = ds.groupby("x").quantile(0.5, method="lower")
assert_identical(expected, actual)
@pytest.mark.parametrize("as_dataset", [False, True])
def test_groupby_quantile_interpolation_deprecated(as_dataset) -> None:
array = xr.DataArray(data=[1, 2, 3, 4], coords={"x": [1, 1, 2, 2]}, dims="x")
arr: xr.DataArray | xr.Dataset
arr = array.to_dataset(name="name") if as_dataset else array
with pytest.warns(
FutureWarning,
match="`interpolation` argument to quantile was renamed to `method`",
):
actual = arr.quantile(0.5, interpolation="lower")
expected = arr.quantile(0.5, method="lower")
assert_identical(actual, expected)
with warnings.catch_warnings(record=True):
with pytest.raises(TypeError, match="interpolation and method keywords"):
arr.quantile(0.5, method="lower", interpolation="lower")
def test_da_groupby_assign_coords() -> None:
actual = xr.DataArray(
[[3, 4, 5], [6, 7, 8]], dims=["y", "x"], coords={"y": range(2), "x": range(3)}
)
actual1 = actual.groupby("x").assign_coords({"y": [-1, -2]})
actual2 = actual.groupby("x").assign_coords(y=[-1, -2])
expected = xr.DataArray(
[[3, 4, 5], [6, 7, 8]], dims=["y", "x"], coords={"y": [-1, -2], "x": range(3)}
)
assert_identical(expected, actual1)
assert_identical(expected, actual2)
repr_da = xr.DataArray(
np.random.randn(10, 20, 6, 24),
dims=["x", "y", "z", "t"],
coords={
"z": ["a", "b", "c", "a", "b", "c"],
"x": [1, 1, 1, 2, 2, 3, 4, 5, 3, 4],
"t": pd.date_range("2001-01-01", freq="M", periods=24),
"month": ("t", list(range(1, 13)) * 2),
},
)
@pytest.mark.parametrize("dim", ["x", "y", "z", "month"])
@pytest.mark.parametrize("obj", [repr_da, repr_da.to_dataset(name="a")])
def test_groupby_repr(obj, dim) -> None:
actual = repr(obj.groupby(dim))
expected = f"{obj.__class__.__name__}GroupBy"
expected += ", grouped over %r" % dim
expected += "\n%r groups with labels " % (len(np.unique(obj[dim])))
if dim == "x":
expected += "1, 2, 3, 4, 5."
elif dim == "y":
expected += "0, 1, 2, 3, 4, 5, ..., 15, 16, 17, 18, 19."
elif dim == "z":
expected += "'a', 'b', 'c'."
elif dim == "month":
expected += "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12."
assert actual == expected
@pytest.mark.parametrize("obj", [repr_da, repr_da.to_dataset(name="a")])
def test_groupby_repr_datetime(obj) -> None:
actual = repr(obj.groupby("t.month"))
expected = f"{obj.__class__.__name__}GroupBy"
expected += ", grouped over 'month'"
expected += "\n%r groups with labels " % (len(np.unique(obj.t.dt.month)))
expected += "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12."
assert actual == expected
def test_groupby_drops_nans() -> None:
# GH2383
# nan in 2D data variable (requires stacking)
ds = xr.Dataset(
{
"variable": (("lat", "lon", "time"), np.arange(60.0).reshape((4, 3, 5))),
"id": (("lat", "lon"), np.arange(12.0).reshape((4, 3))),
},
coords={"lat": np.arange(4), "lon": np.arange(3), "time": np.arange(5)},
)
ds["id"].values[0, 0] = np.nan
ds["id"].values[3, 0] = np.nan
ds["id"].values[-1, -1] = np.nan
grouped = ds.groupby(ds.id)
# non reduction operation
expected = ds.copy()
expected.variable.values[0, 0, :] = np.nan
expected.variable.values[-1, -1, :] = np.nan
expected.variable.values[3, 0, :] = np.nan
actual = grouped.map(lambda x: x).transpose(*ds.variable.dims)
assert_identical(actual, expected)
# reduction along grouped dimension
actual = grouped.mean()
stacked = ds.stack({"xy": ["lat", "lon"]})
expected = (
stacked.variable.where(stacked.id.notnull())
.rename({"xy": "id"})
.to_dataset()
.reset_index("id", drop=True)
.drop_vars(["lon", "lat"])
.assign(id=stacked.id.values)
.dropna("id")
.transpose(*actual.dims)
)
assert_identical(actual, expected)
# reduction operation along a different dimension
actual = grouped.mean("time")
expected = ds.mean("time").where(ds.id.notnull())
assert_identical(actual, expected)
# NaN in non-dimensional coordinate
array = xr.DataArray([1, 2, 3], [("x", [1, 2, 3])])
array["x1"] = ("x", [1, 1, np.nan])
expected_da = xr.DataArray(3, [("x1", [1])])
actual = array.groupby("x1").sum()
assert_equal(expected_da, actual)
# NaT in non-dimensional coordinate
array["t"] = (
"x",
[
np.datetime64("2001-01-01"),
np.datetime64("2001-01-01"),
np.datetime64("NaT"),
],
)
expected_da = xr.DataArray(3, [("t", [np.datetime64("2001-01-01")])])
actual = array.groupby("t").sum()
assert_equal(expected_da, actual)
# test for repeated coordinate labels
array = xr.DataArray([0, 1, 2, 4, 3, 4], [("x", [np.nan, 1, 1, np.nan, 2, np.nan])])
expected_da = xr.DataArray([3, 3], [("x", [1, 2])])
actual = array.groupby("x").sum()
assert_equal(expected_da, actual)
def test_groupby_grouping_errors() -> None:
dataset = xr.Dataset({"foo": ("x", [1, 1, 1])}, {"x": [1, 2, 3]})
with pytest.raises(
ValueError, match=r"None of the data falls within bins with edges"
):
dataset.groupby_bins("x", bins=[0.1, 0.2, 0.3])
with pytest.raises(
ValueError, match=r"None of the data falls within bins with edges"
):
dataset.to_array().groupby_bins("x", bins=[0.1, 0.2, 0.3])
with pytest.raises(ValueError, match=r"All bin edges are NaN."):
dataset.groupby_bins("x", bins=[np.nan, np.nan, np.nan])
with pytest.raises(ValueError, match=r"All bin edges are NaN."):
dataset.to_array().groupby_bins("x", bins=[np.nan, np.nan, np.nan])
with pytest.raises(ValueError, match=r"Failed to group data."):
dataset.groupby(dataset.foo * np.nan)
with pytest.raises(ValueError, match=r"Failed to group data."):
dataset.to_array().groupby(dataset.foo * np.nan)
def test_groupby_reduce_dimension_error(array) -> None:
grouped = array.groupby("y")
with pytest.raises(ValueError, match=r"cannot reduce over dimensions"):
grouped.mean()
with pytest.raises(ValueError, match=r"cannot reduce over dimensions"):
grouped.mean("huh")
with pytest.raises(ValueError, match=r"cannot reduce over dimensions"):
grouped.mean(("x", "y", "asd"))
grouped = array.groupby("y", squeeze=False)
assert_identical(array, grouped.mean())
assert_identical(array.mean("x"), grouped.reduce(np.mean, "x"))
assert_allclose(array.mean(["x", "z"]), grouped.reduce(np.mean, ["x", "z"]))
def test_groupby_multiple_string_args(array) -> None:
with pytest.raises(TypeError):
array.groupby("x", "y")
def test_groupby_bins_timeseries() -> None:
ds = xr.Dataset()
ds["time"] = xr.DataArray(
pd.date_range("2010-08-01", "2010-08-15", freq="15min"), dims="time"
)
ds["val"] = xr.DataArray(np.ones(ds["time"].shape), dims="time")
time_bins = pd.date_range(start="2010-08-01", end="2010-08-15", freq="24H")
actual = ds.groupby_bins("time", time_bins).sum()
expected = xr.DataArray(
96 * np.ones((14,)),
dims=["time_bins"],
coords={"time_bins": pd.cut(time_bins, time_bins).categories},
).to_dataset(name="val")
assert_identical(actual, expected)
def test_groupby_none_group_name() -> None:
# GH158
# xarray should not fail if a DataArray's name attribute is None
data = np.arange(10) + 10
da = xr.DataArray(data) # da.name = None
key = xr.DataArray(np.floor_divide(data, 2))
mean = da.groupby(key).mean()
assert "group" in mean.dims
def test_groupby_getitem(dataset) -> None:
assert_identical(dataset.sel(x="a"), dataset.groupby("x")["a"])
assert_identical(dataset.sel(z=1), dataset.groupby("z")[1])
assert_identical(dataset.foo.sel(x="a"), dataset.foo.groupby("x")["a"])
assert_identical(dataset.foo.sel(z=1), dataset.foo.groupby("z")[1])
actual = dataset.groupby("boo")["f"].unstack().transpose("x", "y", "z")
expected = dataset.sel(y=[1], z=[1, 2]).transpose("x", "y", "z")
assert_identical(expected, actual)
def test_groupby_dataset() -> None:
data = Dataset(
{"z": (["x", "y"], np.random.randn(3, 5))},
{"x": ("x", list("abc")), "c": ("x", [0, 1, 0]), "y": range(5)},
)
groupby = data.groupby("x")
assert len(groupby) == 3
expected_groups = {"a": 0, "b": 1, "c": 2}
assert groupby.groups == expected_groups
expected_items = [
("a", data.isel(x=0)),
("b", data.isel(x=1)),
("c", data.isel(x=2)),
]
for actual, expected in zip(groupby, expected_items):
assert actual[0] == expected[0]
assert_equal(actual[1], expected[1])
def identity(x):
return x
for k in ["x", "c", "y"]:
actual = data.groupby(k, squeeze=False).map(identity)
assert_equal(data, actual)
def test_groupby_dataset_returns_new_type() -> None:
data = Dataset({"z": (["x", "y"], np.random.randn(3, 5))})
actual = data.groupby("x").map(lambda ds: ds["z"])
expected = data["z"]
assert_identical(expected, actual)
actual = data["z"].groupby("x").map(lambda x: x.to_dataset())
expected_ds = data
assert_identical(expected_ds, actual)
def test_groupby_dataset_iter() -> None:
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby("dim1"))[:3]):
assert data["dim1"][n] == t
assert_equal(data["var1"][n], sub["var1"])
assert_equal(data["var2"][n], sub["var2"])
assert_equal(data["var3"][:, n], sub["var3"])
def test_groupby_dataset_errors() -> None:
data = create_test_data()
with pytest.raises(TypeError, match=r"`group` must be"):
data.groupby(np.arange(10))
with pytest.raises(ValueError, match=r"length does not match"):
data.groupby(data["dim1"][:3])
with pytest.raises(TypeError, match=r"`group` must be"):
data.groupby(data.coords["dim1"].to_index())
def test_groupby_dataset_reduce() -> None:
data = Dataset(
{
"xy": (["x", "y"], np.random.randn(3, 4)),
"xonly": ("x", np.random.randn(3)),
"yonly": ("y", np.random.randn(4)),
"letters": ("y", ["a", "a", "b", "b"]),
}
)
expected = data.mean("y")
expected["yonly"] = expected["yonly"].variable.set_dims({"x": 3})
actual = data.groupby("x").mean(...)
assert_allclose(expected, actual)
actual = data.groupby("x").mean("y")
assert_allclose(expected, actual)
letters = data["letters"]
expected = Dataset(
{
"xy": data["xy"].groupby(letters).mean(...),
"xonly": (data["xonly"].mean().variable.set_dims({"letters": 2})),
"yonly": data["yonly"].groupby(letters).mean(),
}
)
actual = data.groupby("letters").mean(...)
assert_allclose(expected, actual)
@pytest.mark.parametrize("squeeze", [True, False])
def test_groupby_dataset_math(squeeze) -> None:
def reorder_dims(x):
return x.transpose("dim1", "dim2", "dim3", "time")
ds = create_test_data()
ds["dim1"] = ds["dim1"]
grouped = ds.groupby("dim1", squeeze=squeeze)
expected = reorder_dims(ds + ds.coords["dim1"])
actual = grouped + ds.coords["dim1"]
assert_identical(expected, reorder_dims(actual))
actual = ds.coords["dim1"] + grouped
assert_identical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
assert_identical(expected, reorder_dims(actual))
actual = ds2 + grouped
assert_identical(expected, reorder_dims(actual))
def test_groupby_math_more() -> None:
ds = create_test_data()
grouped = ds.groupby("numbers")
zeros = DataArray([0, 0, 0, 0], [("numbers", range(4))])
expected = (ds + Variable("dim3", np.zeros(10))).transpose(
"dim3", "dim1", "dim2", "time"
)
actual = grouped + zeros
assert_equal(expected, actual)
actual = zeros + grouped
assert_equal(expected, actual)
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
grouped + ds
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
ds + grouped
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + 1
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + grouped
with pytest.raises(TypeError, match=r"in-place operations"):
ds += grouped
ds = Dataset(
{
"x": ("time", np.arange(100)),
"time": pd.date_range("2000-01-01", periods=100),
}
)
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
ds + ds.groupby("time.month")
@pytest.mark.parametrize("indexed_coord", [True, False])
def test_groupby_bins_math(indexed_coord) -> None:
N = 7
da = DataArray(np.random.random((N, N)), dims=("x", "y"))
if indexed_coord:
da["x"] = np.arange(N)
da["y"] = np.arange(N)
g = da.groupby_bins("x", np.arange(0, N + 1, 3))
mean = g.mean()
expected = da.isel(x=slice(1, None)) - mean.isel(x_bins=("x", [0, 0, 0, 1, 1, 1]))
actual = g - mean
assert_identical(expected, actual)
def test_groupby_math_nD_group() -> None:
N = 40
da = DataArray(
np.random.random((N, N)),
dims=("x", "y"),
coords={
"labels": (
"x",
np.repeat(["a", "b", "c", "d", "e", "f", "g", "h"], repeats=N // 8),
),
},
)
da["labels2d"] = xr.broadcast(da.labels, da)[0]
g = da.groupby("labels2d")
mean = g.mean()
expected = da - mean.sel(labels2d=da.labels2d)
expected["labels"] = expected.labels.broadcast_like(expected.labels2d)
actual = g - mean
assert_identical(expected, actual)
da["num"] = (
"x",
np.repeat([1, 2, 3, 4, 5, 6, 7, 8], repeats=N // 8),
)
da["num2d"] = xr.broadcast(da.num, da)[0]
g = da.groupby_bins("num2d", bins=[0, 4, 6])
mean = g.mean()
idxr = np.digitize(da.num2d, bins=(0, 4, 6), right=True)[:30, :] - 1
expanded_mean = mean.drop_vars("num2d_bins").isel(num2d_bins=(("x", "y"), idxr))
expected = da.isel(x=slice(30)) - expanded_mean
expected["labels"] = expected.labels.broadcast_like(expected.labels2d)
expected["num"] = expected.num.broadcast_like(expected.num2d)
expected["num2d_bins"] = (("x", "y"), mean.num2d_bins.data[idxr])
actual = g - mean
assert_identical(expected, actual)
def test_groupby_dataset_math_virtual() -> None:
ds = Dataset({"x": ("t", [1, 2, 3])}, {"t": pd.date_range("20100101", periods=3)})
grouped = ds.groupby("t.day")
actual = grouped - grouped.mean(...)
expected = Dataset({"x": ("t", [0, 0, 0])}, ds[["t", "t.day"]])
assert_identical(actual, expected)
def test_groupby_math_dim_order() -> None:
da = DataArray(
np.ones((10, 10, 12)),
dims=("x", "y", "time"),
coords={"time": pd.date_range("2001-01-01", periods=12, freq="6H")},
)
grouped = da.groupby("time.day")
result = grouped - grouped.mean()
assert result.dims == da.dims
def test_groupby_dataset_nan() -> None:
# nan should be excluded from groupby
ds = Dataset({"foo": ("x", [1, 2, 3, 4])}, {"bar": ("x", [1, 1, 2, np.nan])})
actual = ds.groupby("bar").mean(...)
expected = Dataset({"foo": ("bar", [1.5, 3]), "bar": [1, 2]})
assert_identical(actual, expected)
def test_groupby_dataset_order() -> None:
# groupby should preserve variables order
ds = Dataset()
for vn in ["a", "b", "c"]:
ds[vn] = DataArray(np.arange(10), dims=["t"])
data_vars_ref = list(ds.data_vars.keys())
ds = ds.groupby("t").mean(...)
data_vars = list(ds.data_vars.keys())
assert data_vars == data_vars_ref
# coords are now at the end of the list, so the test below fails
# all_vars = list(ds.variables.keys())
# all_vars_ref = list(ds.variables.keys())
# .assertEqual(all_vars, all_vars_ref)
def test_groupby_dataset_fillna():
ds = Dataset({"a": ("x", [np.nan, 1, np.nan, 3])}, {"x": [0, 1, 2, 3]})
expected = Dataset({"a": ("x", range(4))}, {"x": [0, 1, 2, 3]})
for target in [ds, expected]:
target.coords["b"] = ("x", [0, 0, 1, 1])
actual = ds.groupby("b").fillna(DataArray([0, 2], dims="b"))
assert_identical(expected, actual)
actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])}))
assert_identical(expected, actual)
# attrs with groupby
ds.attrs["attr"] = "ds"
ds.a.attrs["attr"] = "da"
actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])}))
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
def test_groupby_dataset_where():
# groupby
ds = Dataset({"a": ("x", range(5))}, {"c": ("x", [0, 0, 1, 1, 1])})
cond = Dataset({"a": ("c", [True, False])})
expected = ds.copy(deep=True)
expected["a"].values = [0, 1] + [np.nan] * 3
actual = ds.groupby("c").where(cond)
assert_identical(expected, actual)
# attrs with groupby
ds.attrs["attr"] = "ds"
ds.a.attrs["attr"] = "da"
actual = ds.groupby("c").where(cond)
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
def test_groupby_dataset_assign():
ds = Dataset({"a": ("x", range(3))}, {"b": ("x", ["A"] * 2 + ["B"])})
actual = ds.groupby("b").assign(c=lambda ds: 2 * ds.a)
expected = ds.merge({"c": ("x", [0, 2, 4])})
assert_identical(actual, expected)
actual = ds.groupby("b").assign(c=lambda ds: ds.a.sum())
expected = ds.merge({"c": ("x", [1, 1, 2])})
assert_identical(actual, expected)
actual = ds.groupby("b").assign_coords(c=lambda ds: ds.a.sum())
expected = expected.set_coords("c")
assert_identical(actual, expected)
def test_groupby_dataset_map_dataarray_func():
# regression GH6379
ds = Dataset({"foo": ("x", [1, 2, 3, 4])}, coords={"x": [0, 0, 1, 1]})
actual = ds.groupby("x").map(lambda grp: grp.foo.mean())
expected = DataArray([1.5, 3.5], coords={"x": [0, 1]}, dims="x", name="foo")
assert_identical(actual, expected)
def test_groupby_dataarray_map_dataset_func():
# regression GH6379
da = DataArray([1, 2, 3, 4], coords={"x": [0, 0, 1, 1]}, dims="x", name="foo")
actual = da.groupby("x").map(lambda grp: grp.mean().to_dataset())
expected = xr.Dataset({"foo": ("x", [1.5, 3.5])}, coords={"x": [0, 1]})
assert_identical(actual, expected)
@requires_flox
@pytest.mark.parametrize("kwargs", [{"method": "map-reduce"}, {"engine": "numpy"}])
def test_groupby_flox_kwargs(kwargs):
ds = Dataset({"a": ("x", range(5))}, {"c": ("x", [0, 0, 1, 1, 1])})
with xr.set_options(use_flox=False):
expected = ds.groupby("c").mean()
with xr.set_options(use_flox=True):
actual = ds.groupby("c").mean(**kwargs)
assert_identical(expected, actual)
class TestDataArrayGroupBy:
@pytest.fixture(autouse=True)
def setup(self):
self.attrs = {"attr1": "value1", "attr2": 2929}
self.x = np.random.random((10, 20))
self.v = Variable(["x", "y"], self.x)
self.va = Variable(["x", "y"], self.x, self.attrs)
self.ds = Dataset({"foo": self.v})
self.dv = self.ds["foo"]
self.mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
self.mda = DataArray([0, 1, 2, 3], coords={"x": self.mindex}, dims="x")
self.da = self.dv.copy()
self.da.coords["abc"] = ("y", np.array(["a"] * 9 + ["c"] + ["b"] * 10))
self.da.coords["y"] = 20 + 100 * self.da["y"]
def test_stack_groupby_unsorted_coord(self):
data = [[0, 1], [2, 3]]
data_flat = [0, 1, 2, 3]
dims = ["x", "y"]
y_vals = [2, 3]
arr = xr.DataArray(data, dims=dims, coords={"y": y_vals})
actual1 = arr.stack(z=dims).groupby("z").first()
midx1 = pd.MultiIndex.from_product([[0, 1], [2, 3]], names=dims)
expected1 = xr.DataArray(data_flat, dims=["z"], coords={"z": midx1})
assert_equal(actual1, expected1)
# GH: 3287. Note that y coord values are not in sorted order.
arr = xr.DataArray(data, dims=dims, coords={"y": y_vals[::-1]})
actual2 = arr.stack(z=dims).groupby("z").first()
midx2 = pd.MultiIndex.from_product([[0, 1], [3, 2]], names=dims)
expected2 = xr.DataArray(data_flat, dims=["z"], coords={"z": midx2})
assert_equal(actual2, expected2)
def test_groupby_iter(self):
for ((act_x, act_dv), (exp_x, exp_ds)) in zip(
self.dv.groupby("y"), self.ds.groupby("y")
):
assert exp_x == act_x
assert_identical(exp_ds["foo"], act_dv)
for ((_, exp_dv), act_dv) in zip(self.dv.groupby("x"), self.dv):
assert_identical(exp_dv, act_dv)
def test_groupby_properties(self):
grouped = self.da.groupby("abc")
expected_groups = {"a": range(0, 9), "c": [9], "b": range(10, 20)}
assert expected_groups.keys() == grouped.groups.keys()
for key in expected_groups:
assert_array_equal(expected_groups[key], grouped.groups[key])
assert 3 == len(grouped)
@pytest.mark.parametrize(
"by, use_da", [("x", False), ("y", False), ("y", True), ("abc", False)]
)
@pytest.mark.parametrize("shortcut", [True, False])
@pytest.mark.parametrize("squeeze", [True, False])
def test_groupby_map_identity(self, by, use_da, shortcut, squeeze) -> None:
expected = self.da
if use_da:
by = expected.coords[by]
def identity(x):
return x
grouped = expected.groupby(by, squeeze=squeeze)
actual = grouped.map(identity, shortcut=shortcut)
assert_identical(expected, actual)
def test_groupby_sum(self):
array = self.da
grouped = array.groupby("abc")
expected_sum_all = Dataset(
{
"foo": Variable(
["abc"],
np.array(
[
self.x[:, :9].sum(),
self.x[:, 10:].sum(),
self.x[:, 9:10].sum(),
]
).T,
),
"abc": Variable(["abc"], np.array(["a", "b", "c"])),
}
)["foo"]
assert_allclose(expected_sum_all, grouped.reduce(np.sum, dim=...))
assert_allclose(expected_sum_all, grouped.sum(...))
expected = DataArray(
[
array["y"].values[idx].sum()
for idx in [slice(9), slice(10, None), slice(9, 10)]
],
[["a", "b", "c"]],
["abc"],
)
actual = array["y"].groupby("abc").map(np.sum)
assert_allclose(expected, actual)
actual = array["y"].groupby("abc").sum(...)
assert_allclose(expected, actual)
expected_sum_axis1 = Dataset(
{
"foo": (
["x", "abc"],
np.array(
[
self.x[:, :9].sum(1),
self.x[:, 10:].sum(1),
self.x[:, 9:10].sum(1),
]
).T,
),
"abc": Variable(["abc"], np.array(["a", "b", "c"])),
}
)["foo"]
assert_allclose(expected_sum_axis1, grouped.reduce(np.sum, "y"))
assert_allclose(expected_sum_axis1, grouped.sum("y"))
@pytest.mark.parametrize("method", ["sum", "mean", "median"])
def test_groupby_reductions(self, method):
array = self.da
grouped = array.groupby("abc")
reduction = getattr(np, method)
expected = Dataset(
{
"foo": Variable(
["x", "abc"],
np.array(
[
reduction(self.x[:, :9], axis=-1),
reduction(self.x[:, 10:], axis=-1),
reduction(self.x[:, 9:10], axis=-1),
]
).T,
),
"abc": Variable(["abc"], np.array(["a", "b", "c"])),
}
)["foo"]
with xr.set_options(use_flox=False):
actual_legacy = getattr(grouped, method)(dim="y")
with xr.set_options(use_flox=True):
actual_npg = getattr(grouped, method)(dim="y")
assert_allclose(expected, actual_legacy)
assert_allclose(expected, actual_npg)
def test_groupby_count(self):
array = DataArray(
[0, 0, np.nan, np.nan, 0, 0],
coords={"cat": ("x", ["a", "b", "b", "c", "c", "c"])},
dims="x",
)
actual = array.groupby("cat").count()
expected = DataArray([1, 1, 2], coords=[("cat", ["a", "b", "c"])])
assert_identical(actual, expected)
@pytest.mark.skip("needs to be fixed for shortcut=False, keep_attrs=False")
def test_groupby_reduce_attrs(self):
array = self.da
array.attrs["foo"] = "bar"
for shortcut in [True, False]:
for keep_attrs in [True, False]:
print(f"shortcut={shortcut}, keep_attrs={keep_attrs}")
actual = array.groupby("abc").reduce(
np.mean, keep_attrs=keep_attrs, shortcut=shortcut
)
expected = array.groupby("abc").mean()
if keep_attrs:
expected.attrs["foo"] = "bar"
assert_identical(expected, actual)
def test_groupby_map_center(self):
def center(x):
return x - np.mean(x)
array = self.da
grouped = array.groupby("abc")
expected_ds = array.to_dataset()
exp_data = np.hstack(
[center(self.x[:, :9]), center(self.x[:, 9:10]), center(self.x[:, 10:])]
)
expected_ds["foo"] = (["x", "y"], exp_data)
expected_centered = expected_ds["foo"]
assert_allclose(expected_centered, grouped.map(center))
def test_groupby_map_ndarray(self):
# regression test for #326
array = self.da
grouped = array.groupby("abc")
actual = grouped.map(np.asarray)
assert_equal(array, actual)
def test_groupby_map_changes_metadata(self):
def change_metadata(x):
x.coords["x"] = x.coords["x"] * 2
x.attrs["fruit"] = "lemon"
return x
array = self.da
grouped = array.groupby("abc")
actual = grouped.map(change_metadata)
expected = array.copy()
expected = change_metadata(expected)
assert_equal(expected, actual)
@pytest.mark.parametrize("squeeze", [True, False])
def test_groupby_math_squeeze(self, squeeze):
array = self.da
grouped = array.groupby("x", squeeze=squeeze)
expected = array + array.coords["x"]
actual = grouped + array.coords["x"]
assert_identical(expected, actual)
actual = array.coords["x"] + grouped
assert_identical(expected, actual)
ds = array.coords["x"].to_dataset(name="X")
expected = array + ds
actual = grouped + ds
assert_identical(expected, actual)
actual = ds + grouped
assert_identical(expected, actual)
def test_groupby_math(self):
array = self.da
grouped = array.groupby("abc")
expected_agg = (grouped.mean(...) - np.arange(3)).rename(None)
actual = grouped - DataArray(range(3), [("abc", ["a", "b", "c"])])
actual_agg = actual.groupby("abc").mean(...)
assert_allclose(expected_agg, actual_agg)
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + 1
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + grouped
with pytest.raises(TypeError, match=r"in-place operations"):
array += grouped
def test_groupby_math_not_aligned(self):
array = DataArray(
range(4), {"b": ("x", [0, 0, 1, 1]), "x": [0, 1, 2, 3]}, dims="x"
)
other = DataArray([10], coords={"b": [0]}, dims="b")
actual = array.groupby("b") + other
expected = DataArray([10, 11, np.nan, np.nan], array.coords)
assert_identical(expected, actual)
other = DataArray([10], coords={"c": 123, "b": [0]}, dims="b")
actual = array.groupby("b") + other
expected.coords["c"] = (["x"], [123] * 2 + [np.nan] * 2)
assert_identical(expected, actual)
other = Dataset({"a": ("b", [10])}, {"b": [0]})
actual = array.groupby("b") + other
expected = Dataset({"a": ("x", [10, 11, np.nan, np.nan])}, array.coords)
assert_identical(expected, actual)
def test_groupby_restore_dim_order(self):
array = DataArray(
np.random.randn(5, 3),
coords={"a": ("x", range(5)), "b": ("y", range(3))},
dims=["x", "y"],
)
for by, expected_dims in [
("x", ("x", "y")),
("y", ("x", "y")),
("a", ("a", "y")),
("b", ("x", "b")),
]:
result = array.groupby(by).map(lambda x: x.squeeze())
assert result.dims == expected_dims
def test_groupby_restore_coord_dims(self):
array = DataArray(
np.random.randn(5, 3),
coords={
"a": ("x", range(5)),
"b": ("y", range(3)),
"c": (("x", "y"), np.random.randn(5, 3)),
},
dims=["x", "y"],
)
for by, expected_dims in [
("x", ("x", "y")),
("y", ("x", "y")),
("a", ("a", "y")),
("b", ("x", "b")),
]:
result = array.groupby(by, restore_coord_dims=True).map(
lambda x: x.squeeze()
)["c"]
assert result.dims == expected_dims
def test_groupby_first_and_last(self):
array = DataArray([1, 2, 3, 4, 5], dims="x")
by = DataArray(["a"] * 2 + ["b"] * 3, dims="x", name="ab")
expected = DataArray([1, 3], [("ab", ["a", "b"])])
actual = array.groupby(by).first()
assert_identical(expected, actual)
expected = DataArray([2, 5], [("ab", ["a", "b"])])
actual = array.groupby(by).last()
assert_identical(expected, actual)
array = DataArray(np.random.randn(5, 3), dims=["x", "y"])
expected = DataArray(array[[0, 2]], {"ab": ["a", "b"]}, ["ab", "y"])
actual = array.groupby(by).first()
assert_identical(expected, actual)
actual = array.groupby("x").first()
expected = array # should be a no-op
assert_identical(expected, actual)
def make_groupby_multidim_example_array(self):
return DataArray(
[[[0, 1], [2, 3]], [[5, 10], [15, 20]]],
coords={
"lon": (["ny", "nx"], [[30, 40], [40, 50]]),
"lat": (["ny", "nx"], [[10, 10], [20, 20]]),
},
dims=["time", "ny", "nx"],
)
def test_groupby_multidim(self):
array = self.make_groupby_multidim_example_array()
for dim, expected_sum in [
("lon", DataArray([5, 28, 23], coords=[("lon", [30.0, 40.0, 50.0])])),
("lat", DataArray([16, 40], coords=[("lat", [10.0, 20.0])])),
]:
actual_sum = array.groupby(dim).sum(...)
assert_identical(expected_sum, actual_sum)
def test_groupby_multidim_map(self):
array = self.make_groupby_multidim_example_array()
actual = array.groupby("lon").map(lambda x: x - x.mean())
expected = DataArray(
[[[-2.5, -6.0], [-5.0, -8.5]], [[2.5, 3.0], [8.0, 8.5]]],
coords=array.coords,
dims=array.dims,
)
assert_identical(expected, actual)
def test_groupby_bins(self):
array = DataArray(np.arange(4), dims="dim_0")
# the first value should not be part of any group ("right" binning)
array[0] = 99
# bins follow conventions for pandas.cut
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
bins = [0, 1.5, 5]
bin_coords = pd.cut(array["dim_0"], bins).categories
expected = DataArray(
[1, 5], dims="dim_0_bins", coords={"dim_0_bins": bin_coords}
)
actual = array.groupby_bins("dim_0", bins=bins).sum()
assert_identical(expected, actual)
actual = array.groupby_bins("dim_0", bins=bins).map(lambda x: x.sum())
assert_identical(expected, actual)
# make sure original array dims are unchanged
assert len(array.dim_0) == 4
da = xr.DataArray(np.ones((2, 3, 4)))
bins = [-1, 0, 1, 2]
with xr.set_options(use_flox=False):
actual = da.groupby_bins("dim_0", bins).mean(...)
with xr.set_options(use_flox=True):
expected = da.groupby_bins("dim_0", bins).mean(...)
assert_allclose(actual, expected)
def test_groupby_bins_empty(self):
array = DataArray(np.arange(4), [("x", range(4))])
# one of these bins will be empty
bins = [0, 4, 5]
bin_coords = pd.cut(array["x"], bins).categories
actual = array.groupby_bins("x", bins).sum()
expected = DataArray([6, np.nan], dims="x_bins", coords={"x_bins": bin_coords})
assert_identical(expected, actual)
# make sure original array is unchanged
# (was a problem in earlier versions)
assert len(array.x) == 4
def test_groupby_bins_multidim(self):
array = self.make_groupby_multidim_example_array()
bins = [0, 15, 20]
bin_coords = pd.cut(array["lat"].values.flat, bins).categories
expected = DataArray([16, 40], dims="lat_bins", coords={"lat_bins": bin_coords})
actual = array.groupby_bins("lat", bins).map(lambda x: x.sum())
assert_identical(expected, actual)
# modify the array coordinates to be non-monotonic after unstacking
array["lat"].data = np.array([[10.0, 20.0], [20.0, 10.0]])
expected = DataArray([28, 28], dims="lat_bins", coords={"lat_bins": bin_coords})
actual = array.groupby_bins("lat", bins).map(lambda x: x.sum())
assert_identical(expected, actual)
bins = [-2, -1, 0, 1, 2]
field = DataArray(np.ones((5, 3)), dims=("x", "y"))
by = DataArray(
np.array([[-1.5, -1.5, 0.5, 1.5, 1.5] * 3]).reshape(5, 3), dims=("x", "y")
)
actual = field.groupby_bins(by, bins=bins).count()
bincoord = np.array(
[
pd.Interval(left, right, closed="right")
for left, right in zip(bins[:-1], bins[1:])
],
dtype=object,
)
expected = DataArray(
np.array([6, np.nan, 3, 6]),
dims="group_bins",
coords={"group_bins": bincoord},
)
assert_identical(actual, expected)
def test_groupby_bins_sort(self):
data = xr.DataArray(
np.arange(100), dims="x", coords={"x": np.linspace(-100, 100, num=100)}
)
binned_mean = data.groupby_bins("x", bins=11).mean()
assert binned_mean.to_index().is_monotonic_increasing
with xr.set_options(use_flox=True):
actual = data.groupby_bins("x", bins=11).count()
with xr.set_options(use_flox=False):
expected = data.groupby_bins("x", bins=11).count()
assert_identical(actual, expected)
def test_groupby_assign_coords(self):
array = DataArray([1, 2, 3, 4], {"c": ("x", [0, 0, 1, 1])}, dims="x")
actual = array.groupby("c").assign_coords(d=lambda a: a.mean())
expected = array.copy()
expected.coords["d"] = ("x", [1.5, 1.5, 3.5, 3.5])
assert_identical(actual, expected)
def test_groupby_fillna(self):
a = DataArray([np.nan, 1, np.nan, 3], coords={"x": range(4)}, dims="x")
fill_value = DataArray([0, 1], dims="y")
actual = a.fillna(fill_value)
expected = DataArray(
[[0, 1], [1, 1], [0, 1], [3, 3]], coords={"x": range(4)}, dims=("x", "y")
)
assert_identical(expected, actual)
b = DataArray(range(4), coords={"x": range(4)}, dims="x")
expected = b.copy()
for target in [a, expected]:
target.coords["b"] = ("x", [0, 0, 1, 1])
actual = a.groupby("b").fillna(DataArray([0, 2], dims="b"))
assert_identical(expected, actual)
class TestDataArrayResample:
def test_resample(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
array = DataArray(np.arange(10), [("time", times)])
actual = array.resample(time="24H").mean()
expected = DataArray(array.to_series().resample("24H").mean())
assert_identical(expected, actual)
actual = array.resample(time="24H").reduce(np.mean)
assert_identical(expected, actual)
# Our use of `loffset` may change if we align our API with pandas' changes.
# ref https://github.com/pydata/xarray/pull/4537
actual = array.resample(time="24H", loffset="-12H").mean()
expected_ = array.to_series().resample("24H").mean()
expected_.index += to_offset("-12H")
expected = DataArray.from_series(expected_)
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"index must be monotonic"):
array[[2, 0, 1]].resample(time="1D")
def test_da_resample_func_args(self):
def func(arg1, arg2, arg3=0.0):
return arg1.mean("time") + arg2 + arg3
times = pd.date_range("2000", periods=3, freq="D")
da = xr.DataArray([1.0, 1.0, 1.0], coords=[times], dims=["time"])
expected = xr.DataArray([3.0, 3.0, 3.0], coords=[times], dims=["time"])
actual = da.resample(time="D").map(func, args=(1.0,), arg3=1.0)
assert_identical(actual, expected)
def test_resample_first(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
array = DataArray(np.arange(10), [("time", times)])
actual = array.resample(time="1D").first()
expected = DataArray([0, 4, 8], [("time", times[::4])])
assert_identical(expected, actual)
# verify that labels don't use the first value
actual = array.resample(time="24H").first()
expected = DataArray(array.to_series().resample("24H").first())
assert_identical(expected, actual)
# missing values
array = array.astype(float)
array[:2] = np.nan
actual = array.resample(time="1D").first()
expected = DataArray([2, 4, 8], [("time", times[::4])])
assert_identical(expected, actual)
actual = array.resample(time="1D").first(skipna=False)
expected = DataArray([np.nan, 4, 8], [("time", times[::4])])
assert_identical(expected, actual)
# regression test for http://stackoverflow.com/questions/33158558/
array = Dataset({"time": times})["time"]
actual = array.resample(time="1D").last()
expected_times = pd.to_datetime(
["2000-01-01T18", "2000-01-02T18", "2000-01-03T06"]
)
expected = DataArray(expected_times, [("time", times[::4])], name="time")
assert_identical(expected, actual)
def test_resample_bad_resample_dim(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
array = DataArray(np.arange(10), [("__resample_dim__", times)])
with pytest.raises(ValueError, match=r"Proxy resampling dimension"):
array.resample(**{"__resample_dim__": "1D"}).first()
@requires_scipy
def test_resample_drop_nondim_coords(self):
xs = np.arange(6)
ys = np.arange(3)
times = pd.date_range("2000-01-01", freq="6H", periods=5)
data = np.tile(np.arange(5), (6, 3, 1))
xx, yy = np.meshgrid(xs * 5, ys * 2.5)
tt = np.arange(len(times), dtype=int)
array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time"))
xcoord = DataArray(xx.T, {"x": xs, "y": ys}, ("x", "y"))
ycoord = DataArray(yy.T, {"x": xs, "y": ys}, ("x", "y"))
tcoord = DataArray(tt, {"time": times}, ("time",))
ds = Dataset({"data": array, "xc": xcoord, "yc": ycoord, "tc": tcoord})
ds = ds.set_coords(["xc", "yc", "tc"])
# Select the data now, with the auxiliary coordinates in place
array = ds["data"]
# Re-sample
actual = array.resample(time="12H", restore_coord_dims=True).mean("time")
assert "tc" not in actual.coords
# Up-sample - filling
actual = array.resample(time="1H", restore_coord_dims=True).ffill()
assert "tc" not in actual.coords
# Up-sample - interpolation
actual = array.resample(time="1H", restore_coord_dims=True).interpolate(
"linear"
)
assert "tc" not in actual.coords
def test_resample_keep_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
array = DataArray(np.ones(10), [("time", times)])
array.attrs["meta"] = "data"
result = array.resample(time="1D").mean(keep_attrs=True)
expected = DataArray([1, 1, 1], [("time", times[::4])], attrs=array.attrs)
assert_identical(result, expected)
with pytest.warns(
UserWarning, match="Passing ``keep_attrs`` to ``resample`` has no effect."
):
array.resample(time="1D", keep_attrs=True)
def test_resample_skipna(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
array = DataArray(np.ones(10), [("time", times)])
array[1] = np.nan
result = array.resample(time="1D").mean(skipna=False)
expected = DataArray([np.nan, 1, 1], [("time", times[::4])])
assert_identical(result, expected)
def test_upsample(self):
times = pd.date_range("2000-01-01", freq="6H", periods=5)
array = DataArray(np.arange(5), [("time", times)])
# Forward-fill
actual = array.resample(time="3H").ffill()
expected = DataArray(array.to_series().resample("3H").ffill())
assert_identical(expected, actual)
# Backward-fill
actual = array.resample(time="3H").bfill()
expected = DataArray(array.to_series().resample("3H").bfill())
assert_identical(expected, actual)
# As frequency
actual = array.resample(time="3H").asfreq()
expected = DataArray(array.to_series().resample("3H").asfreq())
assert_identical(expected, actual)
# Pad
actual = array.resample(time="3H").pad()
expected = DataArray(array.to_series().resample("3H").ffill())
assert_identical(expected, actual)
# Nearest
rs = array.resample(time="3H")
actual = rs.nearest()
new_times = rs._full_index
expected = DataArray(array.reindex(time=new_times, method="nearest"))
assert_identical(expected, actual)
def test_upsample_nd(self):
# Same as before, but now we try on multi-dimensional DataArrays.
xs = np.arange(6)
ys = np.arange(3)
times = pd.date_range("2000-01-01", freq="6H", periods=5)
data = np.tile(np.arange(5), (6, 3, 1))
array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time"))
# Forward-fill
actual = array.resample(time="3H").ffill()
expected_data = np.repeat(data, 2, axis=-1)
expected_times = times.to_series().resample("3H").asfreq().index
expected_data = expected_data[..., : len(expected_times)]
expected = DataArray(
expected_data,
{"time": expected_times, "x": xs, "y": ys},
("x", "y", "time"),
)
assert_identical(expected, actual)
# Backward-fill
actual = array.resample(time="3H").ffill()
expected_data = np.repeat(np.flipud(data.T).T, 2, axis=-1)
expected_data = np.flipud(expected_data.T).T
expected_times = times.to_series().resample("3H").asfreq().index
expected_data = expected_data[..., : len(expected_times)]
expected = DataArray(
expected_data,
{"time": expected_times, "x": xs, "y": ys},
("x", "y", "time"),
)
assert_identical(expected, actual)
# As frequency
actual = array.resample(time="3H").asfreq()
expected_data = np.repeat(data, 2, axis=-1).astype(float)[..., :-1]
expected_data[..., 1::2] = np.nan
expected_times = times.to_series().resample("3H").asfreq().index
expected = DataArray(
expected_data,
{"time": expected_times, "x": xs, "y": ys},
("x", "y", "time"),
)
assert_identical(expected, actual)
# Pad
actual = array.resample(time="3H").pad()
expected_data = np.repeat(data, 2, axis=-1)
expected_data[..., 1::2] = expected_data[..., ::2]
expected_data = expected_data[..., :-1]
expected_times = times.to_series().resample("3H").asfreq().index
expected = DataArray(
expected_data,
{"time": expected_times, "x": xs, "y": ys},
("x", "y", "time"),
)
assert_identical(expected, actual)
def test_upsample_tolerance(self):
# Test tolerance keyword for upsample methods bfill, pad, nearest
times = pd.date_range("2000-01-01", freq="1D", periods=2)
times_upsampled = pd.date_range("2000-01-01", freq="6H", periods=5)
array = DataArray(np.arange(2), [("time", times)])
# Forward fill
actual = array.resample(time="6H").ffill(tolerance="12H")
expected = DataArray([0.0, 0.0, 0.0, np.nan, 1.0], [("time", times_upsampled)])
assert_identical(expected, actual)
# Backward fill
actual = array.resample(time="6H").bfill(tolerance="12H")
expected = DataArray([0.0, np.nan, 1.0, 1.0, 1.0], [("time", times_upsampled)])
assert_identical(expected, actual)
# Nearest
actual = array.resample(time="6H").nearest(tolerance="6H")
expected = DataArray([0, 0, np.nan, 1, 1], [("time", times_upsampled)])
assert_identical(expected, actual)
@requires_scipy
def test_upsample_interpolate(self):
from scipy.interpolate import interp1d
xs = np.arange(6)
ys = np.arange(3)
times = pd.date_range("2000-01-01", freq="6H", periods=5)
z = np.arange(5) ** 2
data = np.tile(z, (6, 3, 1))
array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time"))
expected_times = times.to_series().resample("1H").asfreq().index
# Split the times into equal sub-intervals to simulate the 6 hour
# to 1 hour up-sampling
new_times_idx = np.linspace(0, len(times) - 1, len(times) * 5)
for kind in ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"]:
actual = array.resample(time="1H").interpolate(kind)
f = interp1d(
np.arange(len(times)),
data,
kind=kind,
axis=-1,
bounds_error=True,
assume_sorted=True,
)
expected_data = f(new_times_idx)
expected = DataArray(
expected_data,
{"time": expected_times, "x": xs, "y": ys},
("x", "y", "time"),
)
# Use AllClose because there are some small differences in how
# we upsample timeseries versus the integer indexing as I've
# done here due to floating point arithmetic
assert_allclose(expected, actual, rtol=1e-16)
@requires_scipy
def test_upsample_interpolate_bug_2197(self):
dates = pd.date_range("2007-02-01", "2007-03-01", freq="D")
da = xr.DataArray(np.arange(len(dates)), [("time", dates)])
result = da.resample(time="M").interpolate("linear")
expected_times = np.array(
[np.datetime64("2007-02-28"), np.datetime64("2007-03-31")]
)
expected = xr.DataArray([27.0, np.nan], [("time", expected_times)])
assert_equal(result, expected)
@requires_scipy
def test_upsample_interpolate_regression_1605(self):
dates = pd.date_range("2016-01-01", "2016-03-31", freq="1D")
expected = xr.DataArray(
np.random.random((len(dates), 2, 3)),
dims=("time", "x", "y"),
coords={"time": dates},
)
actual = expected.resample(time="1D").interpolate("linear")
assert_allclose(actual, expected, rtol=1e-16)
@requires_dask
@requires_scipy
@pytest.mark.parametrize("chunked_time", [True, False])
def test_upsample_interpolate_dask(self, chunked_time):
from scipy.interpolate import interp1d
xs = np.arange(6)
ys = np.arange(3)
times = pd.date_range("2000-01-01", freq="6H", periods=5)
z = np.arange(5) ** 2
data = np.tile(z, (6, 3, 1))
array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time"))
chunks = {"x": 2, "y": 1}
if chunked_time:
chunks["time"] = 3
expected_times = times.to_series().resample("1H").asfreq().index
# Split the times into equal sub-intervals to simulate the 6 hour
# to 1 hour up-sampling
new_times_idx = np.linspace(0, len(times) - 1, len(times) * 5)
for kind in ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"]:
actual = array.chunk(chunks).resample(time="1H").interpolate(kind)
actual = actual.compute()
f = interp1d(
np.arange(len(times)),
data,
kind=kind,
axis=-1,
bounds_error=True,
assume_sorted=True,
)
expected_data = f(new_times_idx)
expected = DataArray(
expected_data,
{"time": expected_times, "x": xs, "y": ys},
("x", "y", "time"),
)
# Use AllClose because there are some small differences in how
# we upsample timeseries versus the integer indexing as I've
# done here due to floating point arithmetic
assert_allclose(expected, actual, rtol=1e-16)
class TestDatasetResample:
def test_resample_and_first(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
actual = ds.resample(time="1D").first(keep_attrs=True)
expected = ds.isel(time=[0, 4, 8])
assert_identical(expected, actual)
# upsampling
expected_time = pd.date_range("2000-01-01", freq="3H", periods=19)
expected = ds.reindex(time=expected_time)
actual = ds.resample(time="3H")
for how in ["mean", "sum", "first", "last"]:
method = getattr(actual, how)
result = method()
assert_equal(expected, result)
for method in [np.mean]:
result = actual.reduce(method)
assert_equal(expected, result)
def test_resample_min_count(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
# inject nan
ds["foo"] = xr.where(ds["foo"] > 2.0, np.nan, ds["foo"])
actual = ds.resample(time="1D").sum(min_count=1)
expected = xr.concat(
[
ds.isel(time=slice(i * 4, (i + 1) * 4)).sum("time", min_count=1)
for i in range(3)
],
dim=actual["time"],
)
assert_allclose(expected, actual)
def test_resample_by_mean_with_keep_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").mean(keep_attrs=True)
actual = resampled_ds["bar"].attrs
expected = ds["bar"].attrs
assert expected == actual
actual = resampled_ds.attrs
expected = ds.attrs
assert expected == actual
with pytest.warns(
UserWarning, match="Passing ``keep_attrs`` to ``resample`` has no effect."
):
ds.resample(time="1D", keep_attrs=True)
def test_resample_loffset(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
# Our use of `loffset` may change if we align our API with pandas' changes.
# ref https://github.com/pydata/xarray/pull/4537
actual = ds.resample(time="24H", loffset="-12H").mean().bar
expected_ = ds.bar.to_series().resample("24H").mean()
expected_.index += to_offset("-12H")
expected = DataArray.from_series(expected_)
assert_allclose(actual, expected)
def test_resample_by_mean_discarding_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").mean(keep_attrs=False)
assert resampled_ds["bar"].attrs == {}
assert resampled_ds.attrs == {}
def test_resample_by_last_discarding_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").last(keep_attrs=False)
assert resampled_ds["bar"].attrs == {}
assert resampled_ds.attrs == {}
@requires_scipy
def test_resample_drop_nondim_coords(self):
xs = np.arange(6)
ys = np.arange(3)
times = pd.date_range("2000-01-01", freq="6H", periods=5)
data = np.tile(np.arange(5), (6, 3, 1))
xx, yy = np.meshgrid(xs * 5, ys * 2.5)
tt = np.arange(len(times), dtype=int)
array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time"))
xcoord = DataArray(xx.T, {"x": xs, "y": ys}, ("x", "y"))
ycoord = DataArray(yy.T, {"x": xs, "y": ys}, ("x", "y"))
tcoord = DataArray(tt, {"time": times}, ("time",))
ds = Dataset({"data": array, "xc": xcoord, "yc": ycoord, "tc": tcoord})
ds = ds.set_coords(["xc", "yc", "tc"])
# Re-sample
actual = ds.resample(time="12H").mean("time")
assert "tc" not in actual.coords
# Up-sample - filling
actual = ds.resample(time="1H").ffill()
assert "tc" not in actual.coords
# Up-sample - interpolation
actual = ds.resample(time="1H").interpolate("linear")
assert "tc" not in actual.coords
def test_resample_old_api(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", "time")
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", dim="time", how="mean")
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", dim="time")
def test_resample_ds_da_are_the_same(self):
time = pd.date_range("2000-01-01", freq="6H", periods=365 * 4)
ds = xr.Dataset(
{
"foo": (("time", "x"), np.random.randn(365 * 4, 5)),
"time": time,
"x": np.arange(5),
}
)
assert_allclose(
ds.resample(time="M").mean()["foo"], ds.foo.resample(time="M").mean()
)
def test_ds_resample_apply_func_args(self):
def func(arg1, arg2, arg3=0.0):
return arg1.mean("time") + arg2 + arg3
times = pd.date_range("2000", freq="D", periods=3)
ds = xr.Dataset({"foo": ("time", [1.0, 1.0, 1.0]), "time": times})
expected = xr.Dataset({"foo": ("time", [3.0, 3.0, 3.0]), "time": times})
actual = ds.resample(time="D").map(func, args=(1.0,), arg3=1.0)
assert_identical(expected, actual)
|
{"hexsha": "f0b16bc42c76131d820d5092eb512b2f12675d3f", "size": 71007, "ext": "py", "lang": "Python", "max_stars_repo_path": "xarray/tests/test_groupby.py", "max_stars_repo_name": "ianthomas23/xarray", "max_stars_repo_head_hexsha": "aa1d1d19b822897399c8ed2cf346afbac71f45b3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xarray/tests/test_groupby.py", "max_issues_repo_name": "ianthomas23/xarray", "max_issues_repo_head_hexsha": "aa1d1d19b822897399c8ed2cf346afbac71f45b3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xarray/tests/test_groupby.py", "max_forks_repo_name": "ianthomas23/xarray", "max_forks_repo_head_hexsha": "aa1d1d19b822897399c8ed2cf346afbac71f45b3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6281986954, "max_line_length": 88, "alphanum_fraction": 0.5467911614, "include": true, "reason": "import numpy,from scipy", "num_tokens": 20241}
|
from typing import Optional
from copy import copy
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from scipy import stats
from scipy.special import inv_boxcox, boxcox
from statsmodels.tsa.api import STLForecast
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
from fedot.core.data.data import InputData
from fedot.core.log import Log
from fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations import \
ts_to_table, prepare_target
from fedot.core.operations.evaluation. \
operation_implementations.implementation_interfaces import ModelImplementation
from fedot.core.pipelines.ts_wrappers import _update_input, exception_if_not_ts_task
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.utilities.ts_gapfilling import SimpleGapFiller
from sklearn.preprocessing import StandardScaler
class ARIMAImplementation(ModelImplementation):
def __init__(self, log: Log = None, **params):
super().__init__(log)
self.params = params
self.arima = None
self.lambda_value = None
self.scope = None
self.actual_ts_len = None
self.sts = None
def fit(self, input_data):
""" Class fit arima model on data
:param input_data: data with features, target and ids to process
"""
source_ts = np.array(input_data.features)
# Save actual time series length
self.actual_ts_len = len(source_ts)
self.sts = source_ts
# Apply box-cox transformation for positive values
min_value = np.min(source_ts)
if min_value > 0:
pass
else:
# Making a shift to positive values
self.scope = abs(min_value) + 1
source_ts = source_ts + self.scope
_, self.lambda_value = stats.boxcox(source_ts)
transformed_ts = boxcox(source_ts, self.lambda_value)
# Set parameters
p = int(self.params.get('p'))
d = int(self.params.get('d'))
q = int(self.params.get('q'))
params = {'order': (p, d, q)}
self.arima = ARIMA(transformed_ts, **params).fit()
return self.arima
def predict(self, input_data, is_fit_pipeline_stage: bool):
""" Method for time series prediction on forecast length
:param input_data: data with features, target and ids to process
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return output_data: output data with smoothed time series
"""
input_data = copy(input_data)
parameters = input_data.task.task_params
forecast_length = parameters.forecast_length
old_idx = input_data.idx
target = input_data.target
# For training pipeline get fitted data
if is_fit_pipeline_stage:
fitted_values = self.arima.fittedvalues
fitted_values = self._inverse_boxcox(predicted=fitted_values,
lambda_param=self.lambda_value)
# Undo shift operation
fitted_values = self._inverse_shift(fitted_values)
diff = int(self.actual_ts_len - len(fitted_values))
# If first elements skipped
if diff != 0:
# Fill nans with first values
first_element = fitted_values[0]
first_elements = [first_element] * diff
first_elements.extend(list(fitted_values))
fitted_values = np.array(first_elements)
_, predict = ts_to_table(idx=old_idx,
time_series=fitted_values,
window_size=forecast_length)
new_idx, target_columns = ts_to_table(idx=old_idx,
time_series=target,
window_size=forecast_length)
# Update idx and target
input_data.idx = new_idx
input_data.target = target_columns
# For predict stage we can make prediction
else:
start_id = old_idx[-1] - forecast_length + 1
end_id = old_idx[-1]
predicted = self.arima.predict(start=start_id,
end=end_id)
predicted = self._inverse_boxcox(predicted=predicted,
lambda_param=self.lambda_value)
# Undo shift operation
predict = self._inverse_shift(predicted)
# Convert one-dim array as column
predict = np.array(predict).reshape(1, -1)
new_idx = np.arange(start_id, end_id + 1)
# Update idx
input_data.idx = new_idx
# Update idx and features
output_data = self._convert_to_output(input_data,
predict=predict,
data_type=DataTypesEnum.table)
return output_data
def get_params(self):
return self.params
def _inverse_boxcox(self, predicted, lambda_param):
""" Method apply inverse Box-Cox transformation """
if lambda_param == 0:
return np.exp(predicted)
else:
res = inv_boxcox(predicted, lambda_param)
res = self._filling_gaps(res)
return res
def _inverse_shift(self, values):
""" Method apply inverse shift operation """
if self.scope is None:
pass
else:
values = values - self.scope
return values
@staticmethod
def _filling_gaps(res):
nan_ind = np.argwhere(np.isnan(res))
res[nan_ind] = -100.0
# Gaps in first and last elements fills with mean value
if 0 in nan_ind:
res[0] = np.mean(res)
if int(len(res) - 1) in nan_ind:
res[int(len(res) - 1)] = np.mean(res)
# Gaps in center of timeseries fills with linear interpolation
if len(np.ravel(np.argwhere(np.isnan(res)))) != 0:
gf = SimpleGapFiller()
res = gf.linear_interpolation(res)
return res
class AutoRegImplementation(ModelImplementation):
def __init__(self, log: Log = None, **params):
super().__init__(log)
self.params = params
self.actual_ts_len = None
self.autoreg = None
def fit(self, input_data):
""" Class fit ar model on data
:param input_data: data with features, target and ids to process
"""
source_ts = np.array(input_data.features)
self.actual_ts_len = len(source_ts)
lag_1 = int(self.params.get('lag_1'))
lag_2 = int(self.params.get('lag_2'))
params = {'lags': [lag_1, lag_2]}
self.autoreg = AutoReg(source_ts, **params).fit()
return self.autoreg
def predict(self, input_data, is_fit_pipeline_stage: bool):
""" Method for time series prediction on forecast length
:param input_data: data with features, target and ids to process
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return output_data: output data with smoothed time series
"""
input_data = copy(input_data)
parameters = input_data.task.task_params
forecast_length = parameters.forecast_length
old_idx = input_data.idx
target = input_data.target
if is_fit_pipeline_stage:
fitted = self.autoreg.predict(start=old_idx[0], end=old_idx[-1])
# First n elements in time series are skipped
diff = self.actual_ts_len - len(fitted)
# Fill nans with first values
first_element = fitted[0]
first_elements = [first_element] * diff
first_elements.extend(list(fitted))
fitted = np.array(first_elements)
_, predict = ts_to_table(idx=old_idx,
time_series=fitted,
window_size=forecast_length)
new_idx, target_columns = ts_to_table(idx=old_idx,
time_series=target,
window_size=forecast_length)
# Update idx and target
input_data.idx = new_idx
input_data.target = target_columns
# For predict stage we can make prediction
else:
start_id = old_idx[-1] - forecast_length + 1
end_id = old_idx[-1]
predicted = self.autoreg.predict(start=start_id,
end=end_id)
# Convert one-dim array as column
predict = np.array(predicted).reshape(1, -1)
new_idx = np.arange(start_id, end_id + 1)
# Update idx
input_data.idx = new_idx
# Update idx and features
output_data = self._convert_to_output(input_data,
predict=predict,
data_type=DataTypesEnum.table)
return output_data
def get_params(self):
return self.params
class STLForecastARIMAImplementation(ModelImplementation):
def __init__(self, log: Log = None, **params: Optional[dict]):
super().__init__(log)
self.params = params
self.model = None
self.lambda_param = None
self.scope = None
self.actual_ts_len = None
self.sts = None
def fit(self, input_data):
""" Class fit STLForecast arima model on data
:param input_data: data with features, target and ids to process
"""
source_ts = np.array(input_data.features)
# Save actual time series length
self.actual_ts_len = len(source_ts)
self.sts = source_ts
if not self.params:
# Default data
self.params = {'p': 2, 'd': 0, 'q': 2, 'period': 365}
p = int(self.params.get('p'))
d = int(self.params.get('d'))
q = int(self.params.get('q'))
period = int(self.params.get('period'))
params = {'period': period, 'model_kwargs': {'order': (p, d, q)}}
self.model = STLForecast(source_ts, ARIMA, **params).fit()
return self.model
def predict(self, input_data, is_fit_pipeline_stage: bool):
""" Method for time series prediction on forecast length
:param input_data: data with features, target and ids to process
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return output_data: output data with smoothed time series
"""
parameters = input_data.task.task_params
forecast_length = parameters.forecast_length
old_idx = input_data.idx
target = input_data.target
# For training pipeline get fitted data
if is_fit_pipeline_stage:
fitted_values = self.model.get_prediction(start=old_idx[0], end=old_idx[-1]).predicted_mean
diff = int(self.actual_ts_len) - len(fitted_values)
# If first elements skipped
if diff != 0:
# Fill nans with first values
first_element = fitted_values[0]
first_elements = [first_element] * diff
first_elements.extend(list(fitted_values))
fitted_values = np.array(first_elements)
_, predict = ts_to_table(idx=old_idx,
time_series=fitted_values,
window_size=forecast_length)
new_idx, target_columns = ts_to_table(idx=old_idx,
time_series=target,
window_size=forecast_length)
# Update idx and target
input_data.idx = new_idx
input_data.target = target_columns
# For predict stage we can make prediction
else:
start_id = old_idx[-1] - forecast_length + 1
end_id = old_idx[-1]
predicted = self.model.get_prediction(start=start_id, end=end_id).predicted_mean
# Convert one-dim array as column
predict = np.array(predicted).reshape(1, -1)
new_idx = np.arange(start_id, end_id + 1)
# Update idx
input_data.idx = new_idx
# Update idx and features
output_data = self._convert_to_output(input_data,
predict=predict,
data_type=DataTypesEnum.table)
return output_data
def get_params(self):
return self.params
class CLSTMImplementation(ModelImplementation):
def __init__(self, log: Log = None, **params):
super().__init__(log)
self.params = params
self.epochs = params.get("num_epochs")
self.batch_size = params.get("batch_size")
self.learning_rate = params.get("learning_rate")
self.window_size = int(params.get("window_size"))
self.teacher_forcing = int(params.get("teacher_forcing"))
self.device = self._get_device()
self.model = LSTMNetwork(
hidden_size=int(params.get("hidden_size")),
cnn1_kernel_size=int(params.get("cnn1_kernel_size")),
cnn1_output_size=int(params.get("cnn1_output_size")),
cnn2_kernel_size=int(params.get("cnn2_kernel_size")),
cnn2_output_size=int(params.get("cnn2_output_size"))
)
self.optim_dict = {
'adam': torch.optim.Adam(self.model.parameters(), lr=self.learning_rate),
'sgd': torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
}
self.loss_dict = {
'mae': nn.L1Loss,
'mse': nn.MSELoss
}
self.scaler = StandardScaler()
self.optimizer = self.optim_dict[params.get("optimizer")]
self.criterion = self.loss_dict[params.get("loss")]()
def fit(self, train_data: InputData):
""" Class fit ar model on data.
Implementation uses the idea of teacher forcing. That means model learns
to predict data when horizon != 1. It uses real values or previous model output
to predict next value. self.teacher_forcing param is used to control probability
of using real y values.
:param train_data: data with features, target and ids to process
"""
self.model = self.model.to(self.device)
data_loader, forecast_length = self._create_dataloader(train_data)
self.model.train()
for epoch in range(self.epochs):
for x, y in data_loader:
self.optimizer.zero_grad()
x = x.to(self.device)
y = y.to(self.device)
final_output = self._apply_teacher_forcing(x, y, forecast_length)
loss = self.criterion(final_output, y)
loss.backward()
self.optimizer.step()
return self.model
def _apply_teacher_forcing(self, x, y, forecast_length):
final_output = None
for i in range(forecast_length):
self.model.init_hidden(x.shape[0], self.device)
output = self.model(x.unsqueeze(1)).squeeze(0)
if np.random.random_sample() > self.teacher_forcing:
x = torch.hstack((x[:, 1:], output))
else:
x = torch.hstack((x, y[:, i].unsqueeze(1)))
if final_output is not None:
final_output = torch.hstack((final_output, output))
else:
final_output = output
return final_output
def predict(self, input_data: InputData, is_fit_pipeline_stage: Optional[bool]):
""" Method for time series prediction on forecast length
:param input_data: data with features, target and ids to process
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return output_data: output data with smoothed time series
"""
self.model.eval()
input_data_new = copy(input_data)
old_idx = input_data_new.idx
forecast_length = input_data.task.task_params.forecast_length
if is_fit_pipeline_stage:
new_idx, lagged_table = ts_to_table(idx=old_idx,
time_series=input_data_new.features,
window_size=self.window_size)
final_idx, features_columns, final_target = prepare_target(idx=new_idx,
features_columns=lagged_table,
target=input_data_new.target,
forecast_length=forecast_length)
input_data_new.idx = final_idx
input_data_new.features = features_columns
input_data_new.target = final_target
else:
input_data_new.features = input_data_new.features[-self.window_size:].reshape(1, -1)
input_data_new.idx = input_data_new.idx[-forecast_length:]
predict = self._out_of_sample_ts_forecast(input_data_new)
output_data = self._convert_to_output(input_data_new,
predict=predict,
data_type=DataTypesEnum.table)
return output_data
def _predict(self, input_data: InputData):
features_scaled = self._transform_scaler_features(input_data)
x = torch.Tensor(features_scaled).to(self.device)
self.model.init_hidden(x.shape[0], self.device)
predict = self.model(x.unsqueeze(1)).squeeze(0).cpu().detach().numpy()
return self._inverse_transform_scaler(predict)
def _out_of_sample_ts_forecast(self, input_data: InputData) -> np.array:
""" Method for out_of_sample CLSTM forecasting (use previous outputs as next inputs)
:param input_data: data with features, target and ids to process
:return np.array: np.array with predicted values to process it into output_data
"""
input_data_new = copy(input_data)
# Prepare data for time series forecasting
task = input_data_new.task
exception_if_not_ts_task(task)
pre_history_ts = np.array(input_data_new.features)
number_of_iterations = task.task_params.forecast_length
final_forecast = None
for _ in range(0, number_of_iterations):
with torch.no_grad():
iter_predict = self._predict(input_data_new)
if final_forecast is not None:
final_forecast = np.hstack((final_forecast, iter_predict))
else:
final_forecast = iter_predict
# Add prediction to the historical data - update it
pre_history_ts = np.hstack((pre_history_ts[:, 1:], iter_predict))
# Prepare InputData for next iteration
input_data_new = _update_input(pre_history_ts, number_of_iterations, task)
return final_forecast
def _fit_transform_scaler(self, data: InputData):
f_scaled = self.scaler.fit_transform(data.features.reshape(-1, 1)).reshape(-1)
t_scaled = self.scaler.transform(data.target.reshape(-1, 1)).reshape(-1)
return f_scaled, t_scaled
def _inverse_transform_scaler(self, data: np.ndarray):
start_shape = data.shape
return self.scaler.inverse_transform(data.reshape(-1, 1)).reshape(start_shape)
def _transform_scaler_features(self, data: InputData):
start_shape = data.features.shape
return self.scaler.transform(data.features.reshape(-1, 1)).reshape(start_shape)
def _transform_scaler_target(self, data: InputData):
start_shape = data.features.shape
return self.scaler.transform(data.target.reshape(-1, 1)).reshape(start_shape)
def get_params(self):
return self.params
@staticmethod
def _get_device():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
return device
def _create_dataloader(self, input_data: InputData):
""" Method for creating torch.utils.data.DataLoader object from input_data
Generate lag tables and process it into DataLoader
:param input_data: data with features, target and ids to process
:return torch.utils.data.DataLoader: DataLoader with train data
"""
forecast_length = input_data.task.task_params.forecast_length
features_scaled, target_scaled = self._fit_transform_scaler(input_data)
new_idx, lagged_table = ts_to_table(idx=input_data.idx,
time_series=features_scaled,
window_size=self.window_size)
final_idx, features_columns, final_target = prepare_target(idx=new_idx,
features_columns=lagged_table,
target=target_scaled,
forecast_length=forecast_length)
x = torch.from_numpy(features_columns.copy()).float()
y = torch.from_numpy(final_target.copy()).float()
return DataLoader(TensorDataset(x, y), batch_size=self.batch_size), forecast_length
class LSTMNetwork(nn.Module):
def __init__(self,
hidden_size=200,
cnn1_kernel_size=5,
cnn1_output_size=16,
cnn2_kernel_size=3,
cnn2_output_size=32,
):
super().__init__()
self.hidden_size = hidden_size
self.conv_block1 = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=cnn1_output_size, kernel_size=cnn1_kernel_size),
nn.ReLU()
)
self.conv_block2 = nn.Sequential(
nn.Conv1d(in_channels=cnn1_output_size, out_channels=cnn2_output_size, kernel_size=cnn2_kernel_size),
nn.ReLU()
)
self.lstm = nn.LSTM(cnn2_output_size, self.hidden_size, dropout=0.1)
self.hidden_cell = None
self.linear = nn.Linear(self.hidden_size * 2, 1)
def init_hidden(self, batch_size, device):
self.hidden_cell = (torch.zeros(1, batch_size, self.hidden_size).to(device),
torch.zeros(1, batch_size, self.hidden_size).to(device))
def forward(self, x):
if self.hidden_cell is None:
raise Exception
x = self.conv_block1(x)
x = self.conv_block2(x)
x = x.permute(2, 0, 1)
out, self.hidden_cell = self.lstm(x, self.hidden_cell)
hidden_cat = torch.cat([self.hidden_cell[0], self.hidden_cell[1]], dim=2)
predictions = self.linear(hidden_cat)
return predictions
|
{"hexsha": "27bc831133614dcba1b95fd285942d75c4d91634", "size": 23180, "ext": "py", "lang": "Python", "max_stars_repo_path": "fedot/core/operations/evaluation/operation_implementations/models/ts_implementations.py", "max_stars_repo_name": "rozlana-g/FEDOT", "max_stars_repo_head_hexsha": "a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 358, "max_stars_repo_stars_event_min_datetime": "2020-06-11T09:34:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:56:22.000Z", "max_issues_repo_path": "fedot/core/operations/evaluation/operation_implementations/models/ts_implementations.py", "max_issues_repo_name": "rozlana-g/FEDOT", "max_issues_repo_head_hexsha": "a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 467, "max_issues_repo_issues_event_min_datetime": "2020-06-11T13:49:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:19:48.000Z", "max_forks_repo_path": "fedot/core/operations/evaluation/operation_implementations/models/ts_implementations.py", "max_forks_repo_name": "rozlana-g/FEDOT", "max_forks_repo_head_hexsha": "a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 48, "max_forks_repo_forks_event_min_datetime": "2020-07-13T14:50:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T09:37:13.000Z", "avg_line_length": 38.9579831933, "max_line_length": 113, "alphanum_fraction": 0.5974115617, "include": true, "reason": "import numpy,from scipy,from statsmodels", "num_tokens": 4712}
|
import numpy as np
import urllib.request
import os.path
class Dataset:
# TODO test
def normalize(self, x, train_min=None, train_max=None):
if train_min is None:
train_min = np.min(x)
if train_min > 0:
x -= train_min
if train_max is None:
train_max = np.max(np.abs(x))
return x / train_max, train_min, train_max
# TODO test it
@staticmethod
def transform_y_from_label_values_to_label_indices(y, nr_labels):
# for every y=i value, transform it into an array where v[i] = 1
#assert np.min(y) == 0 and np.max(y) == nr_labels-1
new_y_train = np.ndarray((y.shape[0], nr_labels))
for i in range(nr_labels):
new_y_train[:, i] = (y == i).astype(int)
return new_y_train
# TODO test it
@staticmethod
def get_columns_with_constant_values_for_all_entries(x_train):
indexes=[]
for i in range(x_train.shape[1]):
# if all values in that column are equal to the first value in the column
if np.count_nonzero(x_train[:, i] == x_train[0, i]) == x_train.shape[0]:
indexes.append(i)
return np.array(indexes)
# TODO test it
@staticmethod
def subsampling_average(x_train):
# 1st dimension is entry/row/image number
# 2nd and 3rd is the Y and X of the image
assert np.ndim(x_train) == 3
result = np.zeros( (x_train.shape[0],
int(x_train.shape[1]/2 + x_train.shape[1]%2),
int(x_train.shape[2]/2 + x_train.shape[2]%2)))
for image_i in range(result.shape[0]):
for i in range(result.shape[1]):
for j in range(result.shape[2]):
result[image_i, i, j] += x_train[image_i, i*2, j*2]
counter = 1
if i * 2 + 1 < x_train.shape[1]:
result[image_i, i, j] += x_train[image_i, i * 2 + 1, j * 2]
counter += 1
if j * 2 + 1 < x_train.shape[2]:
result[image_i, i, j] += x_train[image_i, i * 2, j * 2 + 1]
counter += 1
if i*2+1 < x_train.shape[1] and j*2+1 < x_train.shape[2]:
result[image_i, i, j] += x_train[image_i, i * 2 + 1, j * 2 + 1]
counter += 1
result[image_i, i, j] /= counter
return result
def deskew_image(self):
""" From Lecun98: The deslanting computes the second moments of inertia of the pixels
(counting a foreground pixel as 1 and a background pixel as 0 and shears the image by
horizontally shifting the lines so that the principal axis is vertical.
This version of the database will be referred to as the deslanted database"""
class MnistDataset(Dataset):
def get_dataset(self, urlretrieve = urllib.request.urlretrieve):
filename = 'mnist.npz'
if not os.path.exists(filename):
url = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
urlretrieve(url+filename, filename)
with np.load(filename) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
return (x_train, y_train), (x_test, y_test)
|
{"hexsha": "a7a46e32d8dade9c2510f4f8857dd52568de1974", "size": 3392, "ext": "py", "lang": "Python", "max_stars_repo_path": "implementations-from-scratch/dataset/dataset.py", "max_stars_repo_name": "georgepachitariu/machine-learning-portfolio", "max_stars_repo_head_hexsha": "47452524b0f2ccf409ba12e6a717157e569d62e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-25T11:27:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-19T17:42:47.000Z", "max_issues_repo_path": "implementations-from-scratch/dataset/dataset.py", "max_issues_repo_name": "georgepachitariu/machine-learning-portfolio", "max_issues_repo_head_hexsha": "47452524b0f2ccf409ba12e6a717157e569d62e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "implementations-from-scratch/dataset/dataset.py", "max_forks_repo_name": "georgepachitariu/machine-learning-portfolio", "max_forks_repo_head_hexsha": "47452524b0f2ccf409ba12e6a717157e569d62e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7052631579, "max_line_length": 93, "alphanum_fraction": 0.5586674528, "include": true, "reason": "import numpy", "num_tokens": 860}
|
[STATEMENT]
lemma update_arg_wf_tuples' [elim]:
"\<And>n hops nhip pre. Suc 0 \<le> n \<Longrightarrow> update_arg_wf (n, kno, val, hops, nhip, pre)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n hops nhip pre. Suc 0 \<le> n \<Longrightarrow> update_arg_wf (n, kno, val, hops, nhip, pre)
[PROOF STEP]
unfolding update_arg_wf_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n hops nhip pre. Suc 0 \<le> n \<Longrightarrow> \<pi>\<^sub>4 (n, kno, val, hops, nhip, pre) = val \<and> (\<pi>\<^sub>2 (n, kno, val, hops, nhip, pre) = 0) = (\<pi>\<^sub>3 (n, kno, val, hops, nhip, pre) = unk) \<and> (\<pi>\<^sub>3 (n, kno, val, hops, nhip, pre) = unk \<longrightarrow> \<pi>\<^sub>5 (n, kno, val, hops, nhip, pre) = 1)
[PROOF STEP]
by auto
|
{"llama_tokens": 346, "file": "AODV_variants_a_norreqid_A_Aodv_Data", "length": 2}
|
from pydicom import dcmread
from numpy import zeros
from matplotlib.pyplot import figure, imshow, show, subplot
from sys import argv
from glob import glob
# load the DICOM files
def read_slices(paths):
files = [dcmread(fname) for fname in paths]
print("file count: {}".format(len(files)))
return sorted([f for f in files if hasattr(f, 'SliceLocation')],
key=lambda s: s.SliceLocation)
def get_pixel_aspects(slices):
# pixel aspects, assuming all slices are the same
ps = slices[0].PixelSpacing
ss = slices[0].SliceThickness
return ps[1]/ps[0], ps[1]/ss, ss/ps[0]
def create_image(slices):
# create 3D array
img_shape = list(slices[0].pixel_array.shape)
img_shape.append(len(slices))
img3d = zeros(img_shape)
# fill 3D array with the images from the files
for i, slice in enumerate(slices):
img2d = slice.pixel_array
img3d[:, :, i] = img2d
return img3d, img_shape
if __name__=='__main__':
slices = read_slices(glob(argv[1], recursive=False))
ax_aspect, sag_aspect, cor_aspect = get_pixel_aspects(slices)
img3d, img_shape = create_image(slices)
# plot 3 orthogonal slices
fig = figure()
a1 = subplot(2, 2, 1)
imshow(img3d[:, :, img_shape[2]//2])
a1.set_aspect(ax_aspect)
a2 = subplot(2, 2, 2)
imshow(img3d[:, img_shape[1]//2, :])
a2.set_aspect(sag_aspect)
a3 = subplot(2, 2, 3)
imshow(img3d[img_shape[0]//2, :, :].T)
a3.set_aspect(cor_aspect)
show()
|
{"hexsha": "4a2e185893e81e202f614ae17d82034fc3bc9279", "size": 1589, "ext": "py", "lang": "Python", "max_stars_repo_path": "reslice.py", "max_stars_repo_name": "weka511/rsna", "max_stars_repo_head_hexsha": "3414137e8373d8e1614921e2e1f416aaec8abd82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reslice.py", "max_issues_repo_name": "weka511/rsna", "max_issues_repo_head_hexsha": "3414137e8373d8e1614921e2e1f416aaec8abd82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-07-24T21:53:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-10T22:16:26.000Z", "max_forks_repo_path": "reslice.py", "max_forks_repo_name": "weka511/rsna", "max_forks_repo_head_hexsha": "3414137e8373d8e1614921e2e1f416aaec8abd82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3965517241, "max_line_length": 68, "alphanum_fraction": 0.6274386407, "include": true, "reason": "from numpy", "num_tokens": 455}
|
SUBROUTINE RDFIL (UNIT,INST,CLEN,NCOL,NOFILE)
C
IMPLICIT INTEGER(A-Z)
LOGICAL NOFILE
DIMENSION BUF(128)
C
CALL XVUNIT (UNIT,'INP',INST,STATUS,' ')
NOFILE = STATUS.NE.1
IF (.NOT.NOFILE) THEN
CALL XVOPEN (UNIT,STATUS,'OP','UPDATE',
+ 'OPEN_ACT','SA', 'IO_ACT','SA',' ')
CALL XVREAD (UNIT,BUF,STATUS,' ')
CLEN = BUF(1)
CALL XVGET (UNIT,STATUS,'NL',NBLK,' ')
NRCOL = (CLEN+127)/128
NCOL = (NBLK-1)/MAX(NRCOL,1)
ENDIF
RETURN
END
C*******************************************************************************
SUBROUTINE WRFIL (UNIT,INST,CLEN,NCOL,NOFILE)
C
IMPLICIT INTEGER(A-Z)
LOGICAL NOFILE
DIMENSION BUF(128)
C
NLO = 1+NCOL*((CLEN+127)/128)
CALL XVUNIT (UNIT,'OUT',INST,STATUS,' ')
NOFILE = STATUS.NE.1
IF (.NOT.NOFILE) THEN
CALL XVOPEN (UNIT,STATUS,'OP','WRITE','U_NL',NLO,
* 'U_NS',512,'O_FORMAT','BYTE','U_FORMAT','BYTE',
* 'OPEN_ACT','SA', 'IO_ACT','SA', ' ')
BUF(1) = CLEN
DO I=2,128
BUF(I) = 0
ENDDO
CALL XVWRIT (UNIT,BUF,STATUS,' ')
BUF(1)=0
DO I=2,NLO
CALL XVWRIT(UNIT,BUF,STATUS,' ')
ENDDO
CALL XVCLOSE(UNIT,STATUS,' ')
CALL XVOPEN(UNIT,STATUS,'OP','UPDATE',
+ 'OPEN_ACT','SA', 'IO_ACT','SA',' ')
ENDIF
RETURN
END
C*******************************************************************************
SUBROUTINE GETCOL (UNIT,ICOL,CLEN,COL)
IMPLICIT INTEGER(A-Z)
REAL*4 COL(*)
C
NREC = (CLEN+127)/128
REC = 2+(ICOL-1)*NREC
PTR = 1
DO I=1,NREC
CALL XVREAD (UNIT,COL(PTR),STATUS,'LINE',REC,' ')
REC = REC+1
PTR = PTR+128
ENDDO
RETURN
END
C*******************************************************************************
SUBROUTINE PUTCOL(UNIT,ICOL,CLEN,COL)
IMPLICIT INTEGER(A-Z)
REAL*4 COL(*)
C
NREC = (CLEN+127)/128
REC = 2+(ICOL-1)*NREC
PTR = 1
DO I=1,NREC
CALL XVWRIT (UNIT,COL(PTR),STATUS,'LINE',REC,' ')
REC = REC+1
PTR = PTR+128
ENDDO
RETURN
END
C*******************************************************************************
SUBROUTINE GETREC (UNIT,DCOL,COLS,DATA,REC,CLEN,A)
C SERIAL READ ONLY
IMPLICIT INTEGER(A-Z)
DIMENSION COLS(*),DATA(*),A(128,*)
C
PTR = MOD(REC-1,128)+1
IF (PTR.EQ.1) THEN
OFFSET = (REC+127)/128
NRCOL = (CLEN+127)/128
DO IX=1,DCOL
R = NRCOL*(COLS(IX)-1)+OFFSET+1
CALL XVREAD (UNIT,A(1,IX),STATUS,'LINE',R,' ')
ENDDO
ENDIF
DO I=1,DCOL
DATA(I) = A(PTR,I)
ENDDO
RETURN
END
C*******************************************************************************
SUBROUTINE PUTREC (UNIT,DCOL,COLS,DATA,REC,CLEN,A)
C SERIAL WRITE ONLY
IMPLICIT INTEGER(A-Z)
DIMENSION COLS(*),DATA(*),A(128,*)
C
PTR = MOD(REC-1,128)+1
DO I=1,DCOL
A(PTR,I) = DATA(I)
ENDDO
IF (PTR.EQ.128.OR.REC.EQ.CLEN) THEN
C IF (PTR.EQ.128.OR.REC.NE.CLEN) THEN
IF (PTR.NE.128.AND.REC.EQ.CLEN) THEN
PTR1 = PTR+1
DO IX=1,DCOL
DO I=PTR1,128
A(I,IX) = 0
ENDDO
ENDDO
ENDIF
OFFSET = (REC+127)/128
NRCOL = (CLEN+127)/128
DO IX=1,DCOL
R = NRCOL*(COLS(IX)-1)+OFFSET+1
CALL XVWRIT (UNIT,A(1,IX),STATUS,'LINE',R,' ')
ENDDO
ENDIF
RETURN
END
|
{"hexsha": "c86df59e2dad8957f18e9dea57f4326c6368a4f9", "size": 3134, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "vos/p3/sub/ibisfil/ibisfil.f", "max_stars_repo_name": "NASA-AMMOS/VICAR", "max_stars_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-10-21T05:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:01.000Z", "max_issues_repo_path": "vos/p3/sub/ibisfil/ibisfil.f", "max_issues_repo_name": "NASA-AMMOS/VICAR", "max_issues_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vos/p3/sub/ibisfil/ibisfil.f", "max_forks_repo_name": "NASA-AMMOS/VICAR", "max_forks_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-09T01:51:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T00:23:24.000Z", "avg_line_length": 24.484375, "max_line_length": 80, "alphanum_fraction": 0.5223356733, "num_tokens": 1155}
|
"""
utilities functions for dcase submission format
"""
import numpy as np
def load_output_format_file(_output_format_file, version='2021'):
"""
copy from cls_feature_class: remove class params
Loads DCASE output format csv file and returns it in dictionary format
can load both polar and xyz format
params:
_output_format_file: DCASE output format CSV
submission output format: [frame_index, sound_class_idx, azimuth(degree), elevation(degree)]
ground truth format: [frame_index, sound_class_idx, track_num, azimuth(degree), elevation(degree)]
baseline format: [frame_index, sound_class_idx, track_num, x, y, z]
version: choice: '2020', '2021', version '2021' includes track_num in the dictionary but this varialbe is ignored.
return:
_output_dict: dictionary
"""
_output_dict = {}
_fid = open(_output_format_file, 'r')
# next(_fid)
if version == '2021':
for _line in _fid:
_words = _line.strip().split(',')
_frame_ind = int(_words[0])
if _frame_ind not in _output_dict:
_output_dict[_frame_ind] = []
if len(_words) == 4: # output format of submission files
_output_dict[_frame_ind].append([int(_words[1]), float(_words[2]), float(_words[3]), 0])
elif len(_words) == 5: #read polar coordinates format, we ignore the track count
_output_dict[_frame_ind].append([int(_words[1]), float(_words[3]), float(_words[4]), int(_words[2])])
elif len(_words) == 6: # read Cartesian coordinates format, we ignore the track count
_output_dict[_frame_ind].append([int(_words[1]), float(_words[3]), float(_words[4]), float(_words[5]),
int(_words[2])])
elif version == '2020':
for _line in _fid:
_words = _line.strip().split(',')
_frame_ind = int(_words[0])
if _frame_ind not in _output_dict:
_output_dict[_frame_ind] = []
if len(_words) == 4: # output format of submission files
_output_dict[_frame_ind].append([int(_words[1]), float(_words[2]), float(_words[3])])
elif len(_words) == 5: #read polar coordinates format, we ignore the track count
_output_dict[_frame_ind].append([int(_words[1]), float(_words[3]), float(_words[4])])
elif len(_words) == 6: # read Cartesian coordinates format, we ignore the track count
_output_dict[_frame_ind].append([int(_words[1]), float(_words[3]), float(_words[4]), float(_words[5])])
else:
raise ValueError('version {} is not implemented'.format(version))
_fid.close()
return _output_dict
def convert_output_format_polar_to_cartesian(in_dict, version='2021'):
'''
copy from cls_feature_class, remove class params
convert polar format in degree to cartesian format'''
out_dict = {}
for frame_cnt in in_dict.keys():
if frame_cnt not in out_dict:
out_dict[frame_cnt] = []
for tmp_val in in_dict[frame_cnt]:
ele_rad = tmp_val[2]*np.pi/180.
azi_rad = tmp_val[1]*np.pi/180
tmp_label = np.cos(ele_rad)
x = np.cos(azi_rad) * tmp_label
y = np.sin(azi_rad) * tmp_label
z = np.sin(ele_rad)
if version == '2021':
out_dict[frame_cnt].append([tmp_val[0], x, y, z, tmp_val[-1]])
elif version == '2020':
out_dict[frame_cnt].append([tmp_val[0], x, y, z])
else:
raise ValueError('version {} is not implemented'.format(version))
return out_dict
def convert_output_format_cartesian_to_polar(in_dict, version='2021'):
''' convert cartesian format to polar format in degree'''
out_dict = {}
for frame_cnt in in_dict.keys():
if frame_cnt not in out_dict:
out_dict[frame_cnt] = []
for tmp_val in in_dict[frame_cnt]:
x = tmp_val[1]
y = tmp_val[2]
z = tmp_val[3]
azi_deg = np.arctan2(y,x) * 180.0/np.pi
ele_deg = np.arctan2(z, np.sqrt(x**2 + y**2)) * 180.0/np.pi
if version == '2021':
out_dict[frame_cnt].append([tmp_val[0], azi_deg, ele_deg, tmp_val[-1]])
elif version == '2020':
out_dict[frame_cnt].append([tmp_val[0], azi_deg, ele_deg])
else:
raise ValueError('version {} is not implemented'.format(version))
return out_dict
def output_format_to_regression_format(output_dict, doa_output_format='polar', n_classes=14, n_max_frames=600,
version='2021'):
''' convert output format in dictionary to regression output format, this will overwite some segments
where events have the same classes.
params:
output_dict: key: frame index
values: [sound_class_idx, azimuth (degrees), elevation (degrees)] or
[sound_class_idx, x, y, z]
n_max_frames < label_frames_per_1s * file_len_s
doa_input_format (infer): 'polar' (degree) | 'xyz'
doa_output_format: (str) 'polar' (degree) | 'xyz'
polar -> polar, xyz
xyz -> polar, xyz
returns:
[sed_output, doa_output]
sed_output: (numpy.array) [n_max_frames, n_classes]
doa_output: (numpy.array) [n_max_frames, 2 * n_classes] if doa_format is polar ('degree')
[n_max_frames, 3 * n_classes] if doa_format is 'xyz'
'''
# n_max_frames = int(label_frames_per_1s * file_len_s)
sed_output = np.zeros((n_max_frames, n_classes))
if doa_output_format == 'xyz':
doa_output = np.zeros((n_max_frames, n_classes*3))
else:
doa_output = np.zeros((n_max_frames, n_classes*2))
count = 0
for frame_idx, values in output_dict.items():
if frame_idx < n_max_frames:
for value in values:
if count == 0:
if version == '2020':
if len(value) == 3:
doa_input_format = 'polar'
elif len(value) == 4:
doa_input_format = 'xyz'
elif version == '2021':
if len(value) == 3 or len(value) == 4:
doa_input_format = 'polar'
elif len(value) == 5:
doa_input_format == 'xyz'
else:
raise ValueError('Version {} is unknown'.format(version))
count += 1
sound_class_idx = int(value[0])
sed_output[frame_idx, sound_class_idx] = 1
if doa_input_format == 'polar' and doa_output_format == 'polar':
doa_output[frame_idx, sound_class_idx] = value[1]
doa_output[frame_idx, n_classes + sound_class_idx] = value[2]
elif doa_input_format == 'polar' and doa_output_format == 'xyz':
azi_rad = value[1]*np.pi/180
ele_rad = value[2]*np.pi/180.
x = np.cos(azi_rad) * np.cos(ele_rad)
y = np.sin(azi_rad) * np.cos(ele_rad)
z = np.sin(ele_rad)
doa_output[frame_idx, sound_class_idx] = x
doa_output[frame_idx, n_classes + sound_class_idx] = y
doa_output[frame_idx, 2*n_classes + sound_class_idx] = z
elif doa_input_format == 'xyz' and doa_output_format == 'polar':
x = value[1]
y = value[2]
z = value[3]
azi_rad = np.arctan2(y, x)
ele_rad = np.arctan2(z, np.sqrt(x**2 + y**2))
doa_output[frame_idx, sound_class_idx] = azi_rad * 180.0/np.pi
doa_output[frame_idx, n_classes + sound_class_idx] = ele_rad * 180.0/np.pi
else: #elif doa_input_format == 'xyz' and doa_output_format == 'xyz':
doa_output[frame_idx, sound_class_idx] = value[1]
doa_output[frame_idx, n_classes + sound_class_idx] = value[2]
doa_output[frame_idx, 2*n_classes + sound_class_idx] = value[3]
return [sed_output, doa_output]
def segment_labels(_pred_dict, _max_frames=600, _nb_label_frames_1s=10):
'''
Same for both 2021 and 2020 evaluation metrics
copy form cls_feature_class: remove class params
Collects class-wise sound event location information in segments of length 1s from reference dataset
:param
_pred_dict: Dictionary containing frame-wise sound event time and location information. Output of SELD method
_max_frames: Total number of frames in the recording
_nb_label_frames_1s: label frame rate or number of frame per second for label
:return: Dictionary containing class-wise sound event location information in each segment of audio
dictionary_name[segment-index][class-index] = list(frame-cnt-within-segment, azimuth, elevation)
'''
nb_blocks = int(np.ceil(_max_frames/float(_nb_label_frames_1s)))
output_dict = {x: {} for x in range(nb_blocks)}
for frame_cnt in range(0, _max_frames, _nb_label_frames_1s):
# Collect class-wise information for each block
# [class][frame] = <list of doa values>
# Data structure supports multi-instance occurence of same class
block_cnt = frame_cnt // _nb_label_frames_1s
loc_dict = {}
for audio_frame in range(frame_cnt, frame_cnt+_nb_label_frames_1s):
if audio_frame not in _pred_dict:
continue
for value in _pred_dict[audio_frame]:
if value[0] not in loc_dict:
loc_dict[value[0]] = {} # key of loc_dict: dict[class_idx][block_frame] = [azi, ele, track_idx]
block_frame = audio_frame - frame_cnt # block_frame range: [0, 10)
if block_frame not in loc_dict[value[0]]:
loc_dict[value[0]][block_frame] = []
loc_dict[value[0]][block_frame].append(value[1:])
# Update the block wise details collected above in a global structure
for class_cnt in loc_dict:
if class_cnt not in output_dict[block_cnt]:
output_dict[block_cnt][class_cnt] = []
keys = [k for k in loc_dict[class_cnt]] # keys: list of block_frames
values = [loc_dict[class_cnt][k] for k in loc_dict[class_cnt]] # values: list of [azi, ele, track_idx]
output_dict[block_cnt][class_cnt].append([keys, values]) # output_dict[block_idx][class_idx] = [[keys, values]]
return output_dict
def regression_label_format_to_output_format(_sed_labels, _doa_labels, _nb_classes=14):
"""
copy form cls_feature_class remove class parma
Converts the sed (classification) and doa labels predicted in regression format to dcase output format.
:param _sed_labels: SED labels matrix [nb_frames, nb_classes]
:param _doa_labels: DOA labels matrix [nb_frames, 2*nb_classes] or [nb_frames, 3*nb_classes]
:return: _output_dict: returns a dict containing dcase output format
"""
_is_polar = _doa_labels.shape[-1] == 2*_nb_classes
_azi_labels, _ele_labels = None, None
_x, _y, _z = None, None, None
if _is_polar:
_azi_labels = _doa_labels[:, :_nb_classes]
_ele_labels = _doa_labels[:, _nb_classes:]
else:
_x = _doa_labels[:, :_nb_classes]
_y = _doa_labels[:, _nb_classes:2*_nb_classes]
_z = _doa_labels[:, 2*_nb_classes:]
_output_dict = {}
for _frame_ind in range(_sed_labels.shape[0]):
_tmp_ind = np.where(_sed_labels[_frame_ind, :])
if len(_tmp_ind[0]):
_output_dict[_frame_ind] = []
for _tmp_class in _tmp_ind[0]:
if _is_polar:
_output_dict[_frame_ind].append([_tmp_class, _azi_labels[_frame_ind, _tmp_class], _ele_labels[_frame_ind, _tmp_class]])
else:
_output_dict[_frame_ind].append([_tmp_class, _x[_frame_ind, _tmp_class], _y[_frame_ind, _tmp_class], _z[_frame_ind, _tmp_class]])
return _output_dict
def output_format_dict_to_classification_labels(output_dict, azimuths, elevations,
n_classes=14, n_max_frames_per_file=600,
joint=True):
'''
if joint is True, return [n_max_frames_per_file, n_classes, n_azimuths * n_elevations]
else: return [n_max_frames_per_file, n_classes, n_azimuths, n_elevations]
output dict:
key: frame_idx
values: [sound_class_idx, azimuth (degrees), elevation (degrees)]
returns:
classification format:[n_max_frames_per_file, n_classes, n_azimuths * n_elevations]'''
n_azis = len(azimuths)
n_eles = len(elevations)
azi_reln = int(abs(azimuths[1] - azimuths[0]))
ele_reln = int(abs(elevations[1] - elevations[0]))
if joint:
labels = np.zeros((n_max_frames_per_file, n_classes, n_azis * n_eles))
else:
labels = np.zeros((n_max_frames_per_file, n_classes, n_azis, n_eles))
for frame_idx in output_dict.keys():
if frame_idx <= n_max_frames_per_file:
for value in output_dict[frame_idx]:
# Making sure the doa's are within the limits
azi = np.clip(value[1], azimuths[0], azimuths[-1])
ele = np.clip(value[2], elevations[0], elevations[-1])
if joint:
doa_idx = int(azi - azimuths[0])//azi_reln * n_eles + int(ele-elevations[0])//ele_reln
# create label
labels[frame_idx, value[0], int(doa_idx)] = 1
else:
azi_idx = int((azi - azimuths[0])//azi_reln)
ele_idx = int((ele - elevations[0])//ele_reln)
labels[frame_idx, value[0], azi_idx, ele_idx] = 1
return labels
|
{"hexsha": "f1894fa691aec26f0eb5b01f84f1b54600e66c99", "size": 14512, "ext": "py", "lang": "Python", "max_stars_repo_path": "metrics/dcase_utils.py", "max_stars_repo_name": "thomeou/SALSA", "max_stars_repo_head_hexsha": "30bc6b8c758fc7355a2649ec695db44da22e7feb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2021-11-03T12:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T08:44:24.000Z", "max_issues_repo_path": "metrics/dcase_utils.py", "max_issues_repo_name": "haojunyong/SALSA", "max_issues_repo_head_hexsha": "ccbadb9e35538bb17c10f1cacf57deb9b41e6dbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-11-30T02:14:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T17:08:38.000Z", "max_forks_repo_path": "metrics/dcase_utils.py", "max_forks_repo_name": "haojunyong/SALSA", "max_forks_repo_head_hexsha": "ccbadb9e35538bb17c10f1cacf57deb9b41e6dbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-11-15T01:28:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T19:24:53.000Z", "avg_line_length": 47.8943894389, "max_line_length": 153, "alphanum_fraction": 0.5846885336, "include": true, "reason": "import numpy", "num_tokens": 3517}
|
c*********************************************************************
c
c molecular matching program
c
c copyright daresbury laboratory
c author w.smith march 2004
c
c*********************************************************************
implicit none
integer, parameter :: npass=10
integer, parameter :: mxatms=10000
character*40 file0,file1
integer i,pass,natms,natm0,natm1,imcon0,imcon1
real*8 cell0,cell1,quality,compare,fitold,fitnew,offset
real*8 xx0(mxatms),yy0(mxatms),zz0(mxatms)
real*8 xx1(mxatms),yy1(mxatms),zz1(mxatms)
dimension cell0(9),cell1(9)
c read template stucture
write(*,*)'Enter name of template CONFIG file'
read(*,'(a40)')file0
call cfgread(file0,natm0,imcon0,cell0,xx0,yy0,zz0)
c read trial stucture
write(*,*)'Enter name of trial CONFIG file'
read(*,'(a40)')file1
call cfgread(file1,natm1,imcon1,cell1,xx1,yy1,zz1)
if(natm0.ne.natm1)then
write(*,*)'Error - CONFIG files not equivalent'
stop
endif
natms=natm0
write(*,*)'Number of atoms in template',natms
c centre the template
call centre(natms,xx0,yy0,zz0)
c optimise match
quality=1.d-6
compare=1.d20
fitnew=compare
pass=0
do while(compare.gt.quality.and.pass.lt.npass)
pass=pass+1
c slide positions to best match of template
call slide(natms,xx0,yy0,zz0,xx1,yy1,zz1)
c rotate positions to best match of template
call rotate(natms,xx0,yy0,zz0,xx1,yy1,zz1)
c calculate current mismatch with template
fitold=fitnew
fitnew=offset(natms,xx0,yy0,zz0,xx1,yy1,zz1)
compare=abs(fitnew-fitold)
enddo
c print out best fit
write(*,*)'RMS structural fit (Angstroms) = ',fitnew
end
subroutine cfgread(fname,natms,imcon,cell,xxx,yyy,zzz)
c*********************************************************************
c
c read a DL_POLY CONFIG file
c
c copyright daresbury laboratory
c author w.smith march 2004
c
c*********************************************************************
implicit none
character*8 name
character*80 head
character*40 fname
integer i,imcon,levcfg,natms
real*8 a,b,c,cell,xxx,yyy,zzz
dimension cell(*),xxx(*),yyy(*),zzz(*)
c open CONFIG file
open(7,file=fname)
c read file header
read(7,'(a80)')head
write(*,*)'File header: ',head
read(7,'(2i10)')levcfg,imcon
c set default cell vectors
do i=1,9
cell(i)=0.d0
enddo
c read cell vectors
if(imcon.gt.0)then
read(7,*)cell(1),cell(2),cell(3)
read(7,*)cell(4),cell(5),cell(6)
read(7,*)cell(7),cell(8),cell(9)
endif
i=0
do while(.true.)
read(7,'(a8)',end=100)name
i=i+1
read(7,*)xxx(i),yyy(i),zzz(i)
if(levcfg.gt.0)read(7,*)a,b,c
if(levcfg.gt.1)read(7,*)a,b,c
enddo
100 natms=i
c close CONFIG file
close(7)
return
end
subroutine centre(natms,xx0,yy0,zz0)
c*********************************************************************
c
c subroutine to centre the template
c
c copyright daresbury laboratory
c author w.smith march 2004
c
c*********************************************************************
implicit none
integer i,natms
real*8 rrr,xx0,yy0,zz0
dimension xx0(*),yy0(*),zz0(*)
dimension rrr(3)
rrr(1)=0.d0
rrr(2)=0.d0
rrr(3)=0.d0
do i=1,natms
rrr(1)=rrr(1)+xx0(i)
rrr(2)=rrr(2)+yy0(i)
rrr(3)=rrr(3)+zz0(i)
enddo
rrr(1)=rrr(1)/dble(natms)
rrr(2)=rrr(2)/dble(natms)
rrr(3)=rrr(3)/dble(natms)
do i=1,natms
xx0(i)=xx0(i)-rrr(1)
yy0(i)=yy0(i)-rrr(2)
zz0(i)=zz0(i)-rrr(3)
enddo
return
end
subroutine slide(natms,xx0,yy0,zz0,xx1,yy1,zz1)
c*********************************************************************
c
c slide structure to best match of template
c
c copyright daresbury laboratory
c author w.smith march 2004
c
c*********************************************************************
implicit none
integer i,natms
real*8 rrr,xx0,yy0,zz0,xx1,yy1,zz1
dimension xx0(*),yy0(*),zz0(*),xx1(*),yy1(*),zz1(*)
dimension rrr(3)
rrr(1)=0.d0
rrr(2)=0.d0
rrr(3)=0.d0
do i=1,natms
rrr(1)=rrr(1)+(xx1(i)-xx0(i))
rrr(2)=rrr(2)+(yy1(i)-yy0(i))
rrr(3)=rrr(3)+(zz1(i)-zz0(i))
enddo
rrr(1)=rrr(1)/dble(natms)
rrr(2)=rrr(2)/dble(natms)
rrr(3)=rrr(3)/dble(natms)
do i=1,natms
xx1(i)=xx1(i)-rrr(1)
yy1(i)=yy1(i)-rrr(2)
zz1(i)=zz1(i)-rrr(3)
enddo
return
end
subroutine rotate(natms,xx0,yy0,zz0,xx1,yy1,zz1)
c*********************************************************************
c
c subroutine to centre the template
c
c copyright daresbury laboratory
c author w.smith march 2004
c
c*********************************************************************
implicit none
integer i,j,k,natms
real*8 xx0,yy0,zz0,xx1,yy1,zz1,mat,vec,aaa,qqq,rot,txx,tyy,tzz
dimension xx0(*),yy0(*),zz0(*),xx1(*),yy1(*),zz1(*)
dimension aaa(3,3),mat(4,4),vec(4,4),qqq(4),rot(3,3)
c zero work arrays
do i=1,3
do j=1,3
aaa(i,j)=0.d0
enddo
enddo
c calculate optimisation parameters
do i=1,natms
aaa(1,1)=aaa(1,1)+xx0(i)*xx1(i)
aaa(2,1)=aaa(2,1)+yy0(i)*xx1(i)
aaa(3,1)=aaa(3,1)+zz0(i)*xx1(i)
aaa(1,2)=aaa(1,2)+xx0(i)*yy1(i)
aaa(2,2)=aaa(2,2)+yy0(i)*yy1(i)
aaa(3,2)=aaa(3,2)+zz0(i)*yy1(i)
aaa(1,3)=aaa(1,3)+xx0(i)*zz1(i)
aaa(2,3)=aaa(2,3)+yy0(i)*zz1(i)
aaa(3,3)=aaa(3,3)+zz0(i)*zz1(i)
enddo
c construct optimisation matrix
mat(1,1)=aaa(1,1)+aaa(2,2)+aaa(3,3)
mat(2,2)=aaa(1,1)-aaa(2,2)-aaa(3,3)
mat(3,3)=aaa(2,2)-aaa(1,1)-aaa(3,3)
mat(4,4)=aaa(3,3)-aaa(2,2)-aaa(1,1)
mat(1,2)=aaa(2,3)-aaa(3,2)
mat(2,1)=aaa(2,3)-aaa(3,2)
mat(1,3)=aaa(3,1)-aaa(1,3)
mat(3,1)=aaa(3,1)-aaa(1,3)
mat(1,4)=aaa(1,2)-aaa(2,1)
mat(4,1)=aaa(1,2)-aaa(2,1)
mat(2,3)=aaa(1,2)+aaa(2,1)
mat(3,2)=aaa(1,2)+aaa(2,1)
mat(2,4)=aaa(1,3)+aaa(3,1)
mat(4,2)=aaa(1,3)+aaa(3,1)
mat(3,4)=aaa(2,3)+aaa(3,2)
mat(4,3)=aaa(2,3)+aaa(3,2)
c diagonalise optimisation matrix
call jacobi(4,mat,vec)
c find largest eigenvalue
k=1
do i=2,4
if(mat(i,i).gt.mat(k,k))k=i
enddo
c obtain optimal quaternion
do i=1,4
qqq(i)=vec(i,k)
enddo
c construct rotation matrix
rot(1,1)=qqq(1)**2+qqq(2)**2-qqq(3)**2-qqq(4)**2
rot(1,2)=2.d0*(qqq(2)*qqq(3)+qqq(1)*qqq(4))
rot(1,3)=2.d0*(qqq(2)*qqq(4)-qqq(1)*qqq(3))
rot(2,1)=2.d0*(qqq(2)*qqq(3)-qqq(1)*qqq(4))
rot(2,2)=qqq(1)**2-qqq(2)**2+qqq(3)**2-qqq(4)**2
rot(2,3)=2.d0*(qqq(3)*qqq(4)+qqq(1)*qqq(2))
rot(3,1)=2.d0*(qqq(2)*qqq(4)+qqq(1)*qqq(3))
rot(3,2)=2.d0*(qqq(3)*qqq(4)-qqq(1)*qqq(2))
rot(3,3)=qqq(1)**2-qqq(2)**2-qqq(3)**2+qqq(4)**2
c rotate trial structure
do i=1,natms
txx=xx1(i)
tyy=yy1(i)
tzz=zz1(i)
xx1(i)=rot(1,1)*txx+rot(1,2)*tyy+rot(1,3)*tzz
yy1(i)=rot(2,1)*txx+rot(2,2)*tyy+rot(2,3)*tzz
zz1(i)=rot(3,1)*txx+rot(3,2)*tyy+rot(3,3)*tzz
enddo
return
end
function offset(natms,xx0,yy0,zz0,xx1,yy1,zz1)
c*********************************************************************
c
c calculate degree of fit between trial and template structures
c
c copyright daresbury laboratory
c author w.smith march 2004
c
c*********************************************************************
implicit none
integer i,natms
real*8 xx0,yy0,zz0,xx1,yy1,zz1,offset
dimension xx0(*),yy0(*),zz0(*),xx1(*),yy1(*),zz1(*)
offset=0.d0
do i=1,natms
offset=offset+(xx0(i)-xx1(i))**2+(yy0(i)-yy1(i))**2+
x (zz0(i)-zz1(i))**2
enddo
offset=sqrt(offset/dble(natms))
return
end
subroutine jacobi(n,a,v)
c***********************************************************************
c
c diagonalisation of real symmetric matices by jacobi method
c
c input parameters:
c
c a(n,n) is the matrix to be diagonalised
c v(n,n) is the eigenvector matrix
c n is the dimension of the matrices
c
c jacobi processes lower triangle only (upper triangle unchanged)
c
c variable rho sets absolute tolerance on convergence
c variable tes is a moving tolerance that diminishes
c on each pass until at true convergence tes<rho
c
c***********************************************************************
implicit none
logical pass
integer n,i,j,k
real*8 a,v,rho,tes,scl,v1,v2,v3,omg,s,c,u,tem
dimension a(n,n),v(n,n)
rho=1.0d-16
tes=0.0d0
scl=0.0d0
c initialize eigenvectors
do i=1,n
do j=1,n
v(i,j)=0.0d0
enddo
v(i,i)=1.0d0
enddo
c rescale matrix for optimal accuracy
do i=1,n
if(abs(a(i,i)).gt.scl)scl=abs(a(i,i))
enddo
do i=1,n
do j=1,i
a(i,j)=a(i,j)/scl
enddo
enddo
c set initial value of moving tolerance
do i=2,n
do j=1,i-1
tes=tes+2.0d0*a(i,j)*a(i,j)
enddo
enddo
tes=sqrt(tes)
c recycle until absolute tolerance satisfied
do while(tes.gt.rho)
pass=.true.
tes=tes/dble(n)
if(tes.lt.rho)tes=rho
c recycle until moving tolerance satisfied
do while(pass)
pass=.false.
c jacobi diagonalisation
do i=2,n
do j=1,i-1
if(abs(a(i,j)).ge.tes)then
pass=.true.
v1=a(j,j)
v2=a(i,j)
v3=a(i,i)
u=0.5d0*(v1-v3)
if(abs(u).lt.rho)then
omg=-1.0d0
else
omg=-v2/sqrt(v2*v2+u*u)
if(u.lt.0.0d0)omg=-omg
endif
s=omg/sqrt(2.0d0*(1.0d0+sqrt(1.0d0-omg*omg)))
c=sqrt(1.0d0-s*s)
do k=1,n
if(k.ge.i)then
tem=a(k,j)*c-a(k,i)*s
a(k,i)=a(k,j)*s+a(k,i)*c
a(k,j)=tem
else if(k.lt.j)then
tem=a(j,k)*c-a(i,k)*s
a(i,k)=a(j,k)*s+a(i,k)*c
a(j,k)=tem
else
tem=a(k,j)*c-a(i,k)*s
a(i,k)=a(k,j)*s+a(i,k)*c
a(k,j)=tem
endif
tem=v(k,j)*c-v(k,i)*s
v(k,i)=v(k,j)*s+v(k,i)*c
v(k,j)=tem
enddo
a(j,j)=v1*c*c+v3*s*s-2.0d0*v2*s*c
a(i,i)=v1*s*s+v3*c*c+2.0d0*v2*s*c
a(i,j)=(v1-v3)*s*c+v2*(c*c-s*s)
endif
enddo
enddo
enddo
enddo
c rescale matrix
do i=1,n
do j=1,i
a(i,j)=scl*a(i,j)
enddo
enddo
return
end
|
{"hexsha": "506c6b5b269bce8f136ccdf609ed08fab646c1dc", "size": 12279, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "utility/matchup.f", "max_stars_repo_name": "zzalscv2/DL_POLY_Classic", "max_stars_repo_head_hexsha": "f2712ca1cdddd154f621f9f5a3c2abac94e41e58", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utility/matchup.f", "max_issues_repo_name": "zzalscv2/DL_POLY_Classic", "max_issues_repo_head_hexsha": "f2712ca1cdddd154f621f9f5a3c2abac94e41e58", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utility/matchup.f", "max_forks_repo_name": "zzalscv2/DL_POLY_Classic", "max_forks_repo_head_hexsha": "f2712ca1cdddd154f621f9f5a3c2abac94e41e58", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4478976234, "max_line_length": 72, "alphanum_fraction": 0.444335858, "num_tokens": 4105}
|
import numpy
def histogram(image, bitDepth=8):
"""
title::
histogram_where
description::
This method will generate the histogram, probability density funciton,
and the cumulative density funciton of an image. It will use the
numpy.where function from the numpy library to help generate each
output. Each output will be returned as a list.
attributes::
image
(numpy ndarray) An image file that is read in by the cv2.imread
function. The image can be eihter black and white or full color and
can have any bit depth. For color images, the color channel order
is BGR (blue, green, red).
bitDepth
(int [optional]) The bit depth of each color channel of the image.
Defaults to 8 bits per color channel.
returns::
h
(list) The histogram for the image. For a color image, the
histogram is a list of three lists with each list representing the
histogram for each color channel in BRG order. For a grayscale
image, the histogram will be returned as a 2^N element list, N
being the bit depth of the image.
pdf
(list) The PDF (probability density function) for the image. For a
color image, the PDF is a list of three lists with each list
representing the PDF for each color channel in BGR order. For a
grayslcae image, the PDF will be returned as a 2^N element list, N
being the bit depth of the image.
cdf
(list) The CDF (cumulative density function) for the image. For a
color image, the CDF is a list of three lists with each list
representing the CDF for each color channel in BGR order. For a
grayscale image, the CDF will be returned as a 2^N element list, N
being the bit depth of the image.
author::
Alex Perkins
copyright::
Copyright (C) 2016, Rochester Institute of Technology
version::
1.0.0
"""
# Determine number of pixel values in the image
maxCount = 2**bitDepth
# Check if the image is a color image
if len(image.shape) == 3:
# Get the number of rows, columns, and planes in image
rows, cols, planes = image.shape
# Get the number of pixels in the image
numPixels = rows*cols
# Create the histogram with BGR color channels
h = numpy.array([[0]*maxCount, [0]*maxCount, [0]*maxCount])
# Iterate through number of planes and pixel values and find where
# where in the image the pixels are equal to a certain pixel value.
# Find the number of pixels returned and add them to the histogram.
for plane in range(planes):
for x in range(maxCount):
index = numpy.where(image[:, :, plane] == x)
h[plane][x] = len(index[0])
# Generate the PDF and CDF for the image
pdf = h/numPixels
cdf = numpy.cumsum(pdf, axis=1)
# Image is grayscale if previous check is not met
else:
# Get the number of rows and columns in the image
rows, cols = image.shape
# Determine the number of pixels in the image
numPixels = rows*cols
# Create the histogram with just one color channel
h = numpy.array([0]*maxCount)
# Iterate through pixels values and find where in the image the pixels
# are equal to a certain pixel value. Find the number of pixels
# returned and add them to the histogram.
for x in range(maxCount):
index = numpy.where(image == x)
h[x] = len(index[0])
# Create the PDf and CDF for the image
pdf = h/numPixels
cdf = numpy.cumsum(pdf)
# Convert the histogram, PDF, and CDF to lists
h = h.tolist()
pdf = pdf.tolist()
cdf = cdf.tolist()
return h, pdf, cdf
if __name__ == '__main__':
import cv2
import ipcv
import time
# A greyscale test image
filename = 'crowd.jpg'
# A 3-channel color test image
filename = 'lenna.tif'
im = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
print('Data type = {0}'.format(type(im)))
print('Image shape = {0}'.format(im.shape))
print('Image size = {0}'.format(im.size))
dataType = str(im.dtype)
imType = {'uint8':8, 'uint16':16, 'uint32':32}
startTime = time.time()
h, pdf, cdf = ipcv.histogram(im, bitDepth=imType[dataType])
print('Elasped time = {0} [s]'.format(time.time() - startTime))
# The follow will produce a figure containing color-coded plots of the
# computed histogram, probability function (PDF), and cumulative density
# function (CDF)
import matplotlib.pyplot
import matplotlib.backends.backend_agg
maxCount = 2**imType[dataType]
bins = list(range(maxCount))
figure = matplotlib.pyplot.figure('Histogram')
canvas = matplotlib.backends.backend_agg.FigureCanvas(figure)
histAxes = figure.add_subplot(3, 1, 1)
pdfAxes = figure.add_subplot(3, 1, 2)
cdfAxes = figure.add_subplot(3, 1, 3)
if len(im.shape) == 3:
histAxes.set_ylabel('Number of Pixels')
histAxes.set_xlim([0, maxCount - 1])
histAxes.plot(bins, h[0], 'b', \
bins, h[1], 'g', \
bins, h[2], 'r')
pdfAxes.set_ylabel('PDF')
pdfAxes.set_xlim([0, maxCount - 1])
pdfAxes.plot(bins, pdf[0], 'b', \
bins, pdf[1], 'g', \
bins, pdf[2], 'r')
cdfAxes.set_xlabel('Digital Count')
cdfAxes.set_ylabel('CDF')
cdfAxes.set_xlim([0, maxCount - 1])
cdfAxes.plot(bins, cdf[0], 'b', \
bins, cdf[1], 'g', \
bins, cdf[2], 'r')
else:
histAxes.set_ylabel('Number of Pixels')
histAxes.set_xlim([0, maxCount - 1])
histAxes.plot(bins, h, 'k')
pdfAxes.set_ylabel('PDF')
pdfAxes.set_xlim([0, maxCount - 1])
pdfAxes.plot(bins, pdf, 'k')
cdfAxes.set_xlabel('Digital Count')
cdfAxes.set_ylabel('CDF')
cdfAxes.set_xlim([0, maxCount - 1])
cdfAxes.plot(bins, cdf, 'k')
matplotlib.pyplot.show()
|
{"hexsha": "5df13004d941fba2d8cbe521fb1cf777d8e61bef", "size": 6348, "ext": "py", "lang": "Python", "max_stars_repo_path": "computing_and_control/ipcv/histogram_where.py", "max_stars_repo_name": "aap5869/RIT", "max_stars_repo_head_hexsha": "d8a408e59a94b0edde56a207592fd7b803172119", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "computing_and_control/ipcv/histogram_where.py", "max_issues_repo_name": "aap5869/RIT", "max_issues_repo_head_hexsha": "d8a408e59a94b0edde56a207592fd7b803172119", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "computing_and_control/ipcv/histogram_where.py", "max_forks_repo_name": "aap5869/RIT", "max_forks_repo_head_hexsha": "d8a408e59a94b0edde56a207592fd7b803172119", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8911917098, "max_line_length": 79, "alphanum_fraction": 0.5979836169, "include": true, "reason": "import numpy", "num_tokens": 1588}
|
% PURPOSE: demo of chowlin()
% Temporal disaggregation with indicators.
% Chow-Lin method
%---------------------------------------------------
% USAGE: chowlin_d
%---------------------------------------------------
close all; clear all; clc;
% Low-frequency data: Spain's Exports of Goods. 1995 prices
Y=[ 20499
23477
25058
27708
31584
31898
30233
32235
34049
36035
39795
44299
47426
52339
62949
69885
77174
90133
96496
102776
113026
115573 ];
% High-frequency data: Spain's Registered exports of goods deflated by
% unit value index.
x=[ 5162
5054
4049
5196
4972
5606
5844
6196
6526
5671
5631
6510
6575
6797
5973
6796
8404
8260
7058
7403
7934
7762
7087
8659
7471
8082
6700
8117
8271
8336
7698
8372
9120
8911
8035
8613
9725
9529
7774
9295
10357
10372
9056
10812
11989
11839
9686
11736
12878
12211
10278
12321
13267
12973
11268
15008
16565
15641
13684
17254
18613
17774
14966
18543
19287
19399
17299
21065
20687
23215
21382
24935
24256
25558
21680
24951
25284
26149
23344
27754
28271
29835
26148
30917
30494
30486
26153
29930 ];
% ---------------------------------------------
% Inputs for td library
% Type of aggregation
ta=1;
% Frequency conversion
sc=4;
% Method of estimation
type=1;
% Intercept
opC = -1;
% Interval of rho for grid search
% rl = [-.33 .80];
rl = 0.57;
% rl = [];
% Name of ASCII file for output
file_sal='td.sal';
% Calling the function: output is loaded in a structure called res
res=chowlin(Y,x,ta,sc,type,opC,rl);
% Calling printing function
tdprint(res,file_sal);
edit td.sal;
% Calling graph function
tdplot(res);
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/39770-temporal-disaggregation-library/chowlin_d.m"}
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# The author of this file is: https://github.com/mg2015started
# The following test was modified from examples/multi_instance.py
import argparse
import logging
import warnings
import gym
import numpy as np
import tensorflow as tf
from ac_network import ActorNetwork, CriticNetwork
from adapters import (
action_adapter,
cross_interface,
get_aux_info,
observation_adapter,
reward_adapter,
)
from config import HyperParameters
from soc_mt_ac_network import SocMtActorNetwork, SocMtCriticNetwork
from smarts.core.agent import AgentSpec
from smarts.core.utils.episodes import episodes
warnings.filterwarnings("ignore")
logging.basicConfig(level=logging.INFO)
AGENT_ID = "Agent-007"
WITH_SOC_MT = True
def init_tensorflow():
configProto = tf.compat.v1.ConfigProto()
configProto.gpu_options.allow_growth = True
# reset tensorflow graph
tf.compat.v1.reset_default_graph()
return configProto
def test(test_scenarios, sim_name, headless, num_episodes, seed):
config = HyperParameters()
configProto = init_tensorflow()
# init env
agent_spec = AgentSpec(
# you can custom AgentInterface to control what obs information you need and the action type
interface=cross_interface,
# agent_builder=actor,
# you can custom your observation adapter, reward adapter, info adapter, action adapter and so on.
observation_adapter=observation_adapter,
reward_adapter=reward_adapter,
action_adapter=action_adapter,
)
env = gym.make(
"smarts.env:hiway-v0",
scenarios=test_scenarios,
agent_specs={AGENT_ID: agent_spec},
sim_name=sim_name,
headless=headless,
timestep_sec=0.1,
seed=seed,
)
# init nets structure
if WITH_SOC_MT:
model_name = "Soc_Mt_TD3Network"
actor = SocMtActorNetwork(name="actor")
critic_1 = SocMtCriticNetwork(name="critic_1")
critic_2 = SocMtCriticNetwork(name="critic_2")
else:
model_name = "TD3Network"
actor = ActorNetwork(name="actor")
critic_1 = CriticNetwork(name="critic_1")
critic_2 = CriticNetwork(name="critic_2")
saver = tf.compat.v1.train.Saver()
with tf.compat.v1.Session(config=configProto) as sess:
# load network
saver = tf.compat.v1.train.import_meta_graph(
"models/" + model_name + ".ckpt" + ".meta"
)
saver.restore(sess, "models/" + model_name + ".ckpt")
if saver is None:
print("did not load")
# init testing params
test_num = 100
test_ep = 0
# results record
success = 0
failure = 0
passed_case = 0
collision = 0
trouble_collision = 0
time_exceed = 0
episode_time_record = []
# start testing
for episode in episodes(n=num_episodes):
episode_reward = 0
env_steps = 0 # step in one episode
observations = env.reset() # states of all vehs
state = observations[AGENT_ID] # ego state
episode.record_scenario(env.scenario_log)
dones = {"__all__": False}
while not dones["__all__"]:
action = actor.get_action_noise(sess, state, rate=-1)
observations, rewards, dones, infos = env.step(
{AGENT_ID: action}
) # states of all vehs in next step
# ego state in next step
state = observations[AGENT_ID]
if WITH_SOC_MT:
reward = rewards[AGENT_ID]
else:
reward = np.sum(reward)
done = dones[AGENT_ID]
info = infos[AGENT_ID]
aux_info = get_aux_info(infos[AGENT_ID]["env_obs"])
episode.record_step(observations, rewards, dones, infos)
if WITH_SOC_MT:
episode_reward += np.sum(reward)
else:
episode_reward += reward
env_steps += 1
if done:
test_ep += 1
# record result
if aux_info == "collision":
collision += 1
failure += 1
elif aux_info == "trouble_collision":
trouble_collision += 1
passed_case += 1
elif aux_info == "time_exceed":
time_exceed += 1
failure += 1
else:
# get episode time
episode_time_record.append(env_steps * 0.1)
success += 1
# print
print(
episode.index,
"EPISODE ended",
"TOTAL REWARD {:.4f}".format(episode_reward),
"Result:",
aux_info,
)
print("total step of this episode: ", env_steps)
episode_reward = 0
env_steps = 0
observations = env.reset() # states of all vehs
state = observations[AGENT_ID] # ego state
env.close()
print("-*" * 15, " result ", "-*" * 15)
print("success: ", success, "/", test_num)
print("collision: ", collision, "/", test_num)
print("time_exceed: ", time_exceed, "/", test_num)
print("passed_case: ", passed_case, "/", test_num)
print("average time: ", np.mean(episode_time_record))
def main(
test_scenarios,
sim_name,
headless,
num_episodes,
seed,
):
test(
test_scenarios,
sim_name,
headless,
num_episodes,
seed,
)
def default_argument_parser(program: str):
"""This factory method returns a vanilla `argparse.ArgumentParser` with the
minimum subset of arguments that should be supported.
You can extend it with more `parser.add_argument(...)` calls or obtain the
arguments via `parser.parse_args()`.
"""
parser = argparse.ArgumentParser(program)
parser.add_argument(
"scenarios",
help="A list of scenarios. Each element can be either the scenario to run "
"(see scenarios/ for some samples you can use) OR a directory of scenarios "
"to sample from.",
type=str,
nargs="+",
)
parser.add_argument(
"--sim-name",
help="a string that gives this simulation a name.",
type=str,
default=None,
)
parser.add_argument(
"--headless", help="Run the simulation in headless mode.", action="store_true"
)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument(
"--sumo-port", help="Run SUMO with a specified port.", type=int, default=None
)
parser.add_argument(
"--episodes",
help="The number of episodes to run the simulation for.",
type=int,
default=100,
)
return parser
if __name__ == "__main__":
parser = default_argument_parser("pytorch-example")
args = parser.parse_args()
main(
test_scenarios=args.scenarios,
sim_name=args.sim_name,
headless=args.headless,
num_episodes=args.episodes,
seed=args.seed,
)
|
{"hexsha": "d495c87d565221e477cd4c9b6a5b79a424392c55", "size": 8573, "ext": "py", "lang": "Python", "max_stars_repo_path": "zoo/policies/cross-rl-agent/cross_rl_agent/train/run_test.py", "max_stars_repo_name": "idsc-frazzoli/SMARTS", "max_stars_repo_head_hexsha": "bae0a6ea160330921edc94a7161a4e8cf72a1974", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 554, "max_stars_repo_stars_event_min_datetime": "2020-10-16T02:30:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T14:13:00.000Z", "max_issues_repo_path": "zoo/policies/cross-rl-agent/cross_rl_agent/train/run_test.py", "max_issues_repo_name": "idsc-frazzoli/SMARTS", "max_issues_repo_head_hexsha": "bae0a6ea160330921edc94a7161a4e8cf72a1974", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 917, "max_issues_repo_issues_event_min_datetime": "2020-10-17T00:10:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:00:47.000Z", "max_forks_repo_path": "zoo/policies/cross-rl-agent/cross_rl_agent/train/run_test.py", "max_forks_repo_name": "idsc-frazzoli/SMARTS", "max_forks_repo_head_hexsha": "bae0a6ea160330921edc94a7161a4e8cf72a1974", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 135, "max_forks_repo_forks_event_min_datetime": "2020-10-20T01:44:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T04:51:31.000Z", "avg_line_length": 34.0198412698, "max_line_length": 106, "alphanum_fraction": 0.5995567479, "include": true, "reason": "import numpy", "num_tokens": 1842}
|
from .rules import Rules
from .player import Player
from .plot import Plotter
import pickle
import networkx as nx
import pandas as pd
from enum import Enum
class Metrics(Enum):
# warning round_number > 0
macro_degree_assortativity_coefficient = "macro_degree_assortativity_coefficient"
# macro_rich_club_coefficient = "macro_rich_club_coefficient"
macro_transitivity = "macro_transitivity"
macro_average_clustering = "macro_average_clustering"
macro_is_connected = "macro_is_connected"
macro_number_connected_components = "macro_number_connected_components"
macro_is_distance_regular = "macro_is_distance_regular"
macro_dominating_set = "macro_dominating_set"
macro_is_eulerian = "macro_is_eulerian"
macro_isolates = "macro_isolates"
# warning is_connected
macro_diameter = "macro_diameter"
macro_center = "macro_center"
macro_periphery = "macro_periphery"
macro_radius = "macro_radius"
macro_average_shortest_path_length = "macro_average_shortest_path_length"
micro_eccentricity = "micro_eccentricity"
micro_average_neighbor_degree = "micro_average_neighbor_degree"
micro_clustering = "micro_clustering"
micro_degree_centrality = "micro_degree_centrality"
micro_closeness_centrality = "micro_closeness_centrality"
micro_communicability_centrality = "micro_communicability_centrality"
micro_load_centrality = "micro_load_centrality"
micro_betweenness_centrality = "micro_betweenness_centrality"
micro_triangles = "micro_triangles"
micro_square_clustering = "micro_square_clustering"
micro_core_number = "micro_core_number"
micro_closeness_vitality = "micro_closeness_vitality"
def _get_column_names():
return [metric.value for metric in Metrics]
def _get_metrics(graph):
res = []
# macro
if len(graph.edges()) is not 0:
res.append(nx.degree_assortativity_coefficient(graph))
# res.append(nx.rich_club_coefficient(graph))
else:
res.append(None)
res.append(None)
res.append(nx.transitivity(graph))
res.append(nx.average_clustering(graph))
res.append(nx.is_connected(graph))
res.append(nx.number_connected_components(graph))
res.append(nx.is_distance_regular(graph))
res.append(nx.dominating_set(graph))
res.append(nx.is_eulerian(graph))
res.append(nx.isolates(graph))
if nx.is_connected(graph):
res.append(nx.diameter(graph))
res.append(nx.center(graph))
res.append(nx.periphery(graph))
res.append(nx.radius(graph))
res.append(nx.average_shortest_path_length(graph))
res.append(nx.eccentricity(graph))
else:
res.append(None)
res.append(None)
res.append(None)
res.append(None)
res.append(None)
res.append(None)
# micro
res.append(nx.average_neighbor_degree(graph))
res.append(nx.clustering(graph))
res.append(nx.degree_centrality(graph))
res.append(nx.closeness_centrality(graph))
res.append(nx.communicability_centrality(graph))
res.append(nx.load_centrality(graph))
res.append(nx.betweenness_centrality(graph))
res.append(nx.triangles(graph))
res.append(nx.square_clustering(graph))
res.append(nx.core_number(graph))
res.append(nx.closeness_vitality(graph))
return res
class Game:
def __init__(self):
self.rules = Rules()
self.graph = nx.Graph()
self.players = {}
self.current_step = 0
self.history = {}
self.impossible_edges = []
self.imposed_edges = []
self.metrics = None
def initialize_graph(self):
"""
Initialize the graph by instantiating graph nodes.
By default, all the remaining nodes are non_competitive players
:return: void
"""
self.graph.add_nodes_from(list(range(self.rules.nb_players)))
self.history[0] = self.graph.edges()
while len(self.players) < self.rules.nb_players:
temp_non_competitive_player = Player(name="NC" + str(len(self.players)))
self.add_player(temp_non_competitive_player)
def add_player(self, player, node_id=None):
"""
Add the given player to the list of players and give it a node_id if there is still an available slot
:param player: Player, player to be added
:return: void
"""
if len(self.players) < self.rules.nb_players:
if not node_id:
node_id = len(self.players)
self.players[node_id] = player
player.node_id = node_id
else:
raise Exception("There are already too many players")
def get_actions(self):
"""
Returns the actions for each player's embedded strategy
"""
modified_edges = set()
for node_id, player in self.players.items():
modified_edge = player.get_action(self, player.node_id)
if modified_edge is not None:
u, v= modified_edge
if u>v:
modified_edge=(v,u)
modified_edges.add(modified_edge)
return modified_edges
def update_env(self, actions):
"""
Mutates the state of the environment (i.e. the graph) based on the actions performed by the players
"""
for edge in actions:
u, v = edge
if not self.graph.has_edge(*edge) and edge not in self.impossible_edges:
self.graph.add_edge(u, v)
elif self.graph.has_edge(*edge) and edge not in self.imposed_edges:
self.graph.remove_edge(u, v)
def play_round(self, actions=False, metrics=False):
"""
Play one round of the game. For now, if two players are acting on the same edge, the logical OR component
is adopted (meaning if two players want to destroy the same edge, it will get destroyed).
No notion of edge strength and cumulative nodes strength yet
:return: void
"""
if not actions:
actions = self.get_actions()
self.update_env(actions)
self.current_step += 1
self.history[self.current_step] = self.graph.edges()
print("The game at state %s:" %self.current_step)
plotter = Plotter()
plotter.plot_state(self)
if metrics:
self.metrics.loc[len(self.metrics)] = _get_metrics(self.graph)
def play_game(self, metrics=False):
"""
Play the entire game according to the given rules (total number of steps in a game)
:return: void
"""
print("Here is the initial state of the game")
plotter = Plotter()
plotter.plot_state(self)
if metrics:
self.metrics = pd.DataFrame(columns=_get_column_names())
while self.current_step < self.rules.nb_max_step:
self.play_round(metrics=metrics)
def save(self, filename="history.pickle"):
# http://stackoverflow.com/questions/11218477/how-can-i-use-pickle-to-save-a-dict
game_state = {
"rules": self.rules,
"players": _to_repr_players(self.players),
"history": self.history,
"current_step": self.current_step
}
with open(filename, 'wb') as handle:
pickle.dump(game_state, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, filename):
with open(filename, 'rb') as handle:
game_state = pickle.load(handle)
self.rules = game_state["rules"]
self.history = game_state["history"]
self.current_step = game_state["current_step"]
players = _to_players(game_state["players"])
for k, v in players.items():
self.add_player(v, k)
self.initialize_graph()
"""
Pickle doesn't save local objects (like strategies in this example)
Helper ReprClass to solve the problem (only dumping the strategy)
"""
def _to_repr_players(players):
res = {}
for k, v in players.items():
player_without_strategy = PlayerRepr(v.rules,
v.type,
v.node_id,
v.name,
v.strategy_type)
res[player_without_strategy.node_id] = player_without_strategy
return res
def _to_players(players):
res = {}
for k, v in players.items():
player_with_strategy = Player(rules=v.rules,
type=v.type,
# node_id=v.node_id, # handled by calling add_player in load()
name=v.name,
strategy_type=v.strategy_type)
res[k] = player_with_strategy
return res
class PlayerRepr:
def __init__(self, rules, type, node_id, name, strategy_type):
self.rules = rules
self.type = type
self.node_id = node_id
self.name = name
self.strategy_type = strategy_type
|
{"hexsha": "f44688fd958a28ae07efad9cdafc957f1d6688a6", "size": 9174, "ext": "py", "lang": "Python", "max_stars_repo_path": "centrality/game.py", "max_stars_repo_name": "halsabah/Betweeness_Centality_Competition", "max_stars_repo_head_hexsha": "4ebbd11c30f704e2ccd4dd0d1b9baf52a877cfeb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "centrality/game.py", "max_issues_repo_name": "halsabah/Betweeness_Centality_Competition", "max_issues_repo_head_hexsha": "4ebbd11c30f704e2ccd4dd0d1b9baf52a877cfeb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "centrality/game.py", "max_forks_repo_name": "halsabah/Betweeness_Centality_Competition", "max_forks_repo_head_hexsha": "4ebbd11c30f704e2ccd4dd0d1b9baf52a877cfeb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0, "max_line_length": 113, "alphanum_fraction": 0.6329845215, "include": true, "reason": "import networkx", "num_tokens": 1935}
|
# Importing the required packages
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import graphviz
# Function importing Dataset
def importdata():
balance_data = pd.read_csv('capitalshare.csv')
balance_data = balance_data[['Duration','Start station number','End station number','Member type']]
#print(balance_data)
# Printing the dataswet shape
print ("Dataset Lenght: ", len(balance_data))
print ("Dataset Shape: ", balance_data.shape)
# Printing the dataset obseravtions
print ("Dataset: ",balance_data.head())
return balance_data
# Function to split the dataset
def splitdataset(balance_data):
# Seperating the target variable
X = balance_data.values[:, :-1]
Y = balance_data.values[:, -1]
# Spliting the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size = 0.3, random_state = 100)
return X_train, X_test, y_train, y_test
#Function to visualize tree
def visualize_tree(data,clf,clf_name):
features = data.columns
features = features[:-1]
class_names = list(set(data.iloc[:,-1]))
dot_data = tree.export_graphviz(clf, out_file=None, \
feature_names=features,class_names=class_names, \
filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph.render('dtree_render_'+clf_name,view=True)
# Function to perform training with giniIndex.
def train_using_gini(X_train, X_test, y_train,data):
# Creating the classifier object
clf_gini = DecisionTreeClassifier(criterion = "gini",
random_state = 100,max_depth=3, min_samples_leaf=5)
# Performing training
clf_gini.fit(X_train, y_train)
visualize_tree(data,clf_gini,'gini')
print('\nFeature Importance : ',clf_gini.feature_importances_)
return clf_gini
# Function to perform training with entropy.
def tarin_using_entropy(X_train, X_test, y_train,data):
# Decision tree with entropy
clf_entropy = DecisionTreeClassifier(
criterion = "entropy", random_state = 100,
max_depth = 3, min_samples_leaf = 5)
# Performing training
clf_entropy.fit(X_train, y_train)
visualize_tree(data,clf_entropy,'entropy')
print('\nFeature Importance : ',clf_entropy.feature_importances_)
return clf_entropy
# Function to make predictions
def prediction(X_test, clf_object):
# Predicton on test with giniIndex
y_pred = clf_object.predict(X_test)
print("Predicted values:")
print(y_pred)
return y_pred
# Function to calculate accuracy
def cal_accuracy(y_test, y_pred):
print("Confusion Matrix: ",
confusion_matrix(y_test, y_pred))
print ("Accuracy : ",
accuracy_score(y_test,y_pred)*100)
print("Report : ",
classification_report(y_test, y_pred))
# Main process
def main():
# Building Phase
data = importdata()
X_train, X_test, y_train, y_test = splitdataset(data)
clf_gini = train_using_gini(X_train, X_test, y_train,data)
clf_entropy = tarin_using_entropy(X_train, X_test, y_train,data)
# Operational Phase
print("Results Using Gini Index:")
# Prediction using gini
y_pred_gini = prediction(X_test, clf_gini)
cal_accuracy(y_test, y_pred_gini)
print("Results Using Entropy:")
# Prediction using entropy
y_pred_entropy = prediction(X_test, clf_entropy)
cal_accuracy(y_test, y_pred_entropy)
# Calling main function
if __name__=="__main__":
main()
|
{"hexsha": "d45d41889a134f43991921ae4f00e4cc562bfd42", "size": 3582, "ext": "py", "lang": "Python", "max_stars_repo_path": "Daily Python/23_DecisionTree/23_DecisionTree.py", "max_stars_repo_name": "Harjiwan/Python", "max_stars_repo_head_hexsha": "5f8e0b7f4f2323c64161541075636eefc41043b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-03-23T03:14:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T12:58:40.000Z", "max_issues_repo_path": "Daily Python/23_DecisionTree/23_DecisionTree.py", "max_issues_repo_name": "Harjiwan/Python", "max_issues_repo_head_hexsha": "5f8e0b7f4f2323c64161541075636eefc41043b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-20T02:53:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-20T02:53:33.000Z", "max_forks_repo_path": "Daily Python/23_DecisionTree/23_DecisionTree.py", "max_forks_repo_name": "Harjiwan/Python", "max_forks_repo_head_hexsha": "5f8e0b7f4f2323c64161541075636eefc41043b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2019-02-07T19:22:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T22:17:19.000Z", "avg_line_length": 31.4210526316, "max_line_length": 100, "alphanum_fraction": 0.7554438861, "include": true, "reason": "import numpy", "num_tokens": 900}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines, redefined-builtin
"""Relay to ONNX codegen """
import os
import struct
import copy
import numpy
import onnx
import onnx.utils
from onnx import numpy_helper, OperatorSetIdProto, defs
from onnx import TensorProto
import tvm
from tvm import relay
import tvm._ffi
from tvm.relay.expr_functor import ExprVisitor
from tvm.relay.ty import TupleType, TensorType
ONNX_OPSET_VERSONS_SUPPORTED = [11]
def run_onnx_optimizer(onnx_model):
"""Run ONNX's optimization routines.
ONNX Optimizer was moved to an external library in
version 1.9. Attempt to use the optimizer in onnx if
it is available, fall back to the standalone
onnxoptimizer otherwise, and return the model
unoptimized if neither are available.
"""
try:
onnx_polish_model = onnx.utils.polish_model
except AttributeError:
pass
else:
return onnx_polish_model(onnx_model)
try:
# pylint: disable=import-outside-toplevel
import onnxoptimizer
except ImportError:
pass
else:
return onnxoptimizer.optimize(onnx_model)
return model
def tvm_array_to_list(arr):
return tuple(x.value for x in arr)
def get_onnx_version():
return onnx.__version__
def infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = relay.transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
def call_node_infer_type(node):
"""infer the output types of call node"""
infer_out = infer_type(node)
out_type = infer_out.checked_type_
if isinstance(out_type, TensorType):
types = [out_type]
elif isinstance(out_type, TupleType):
types = list(out_type.fields)
else:
raise RuntimeError(
"Unsupported output type %s in operator %s" % (type(out_type), node.op.nae)
)
return types
def add_input(data, name, prefix, model_container):
input_name = "{}_{}".format(prefix, name)
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[data.dtype]
tensor_value_info = onnx.helper.make_tensor_value_info(input_name, dtype, shape=data.shape)
model_container.add_inputs([tensor_value_info])
data_tensor = numpy_helper.from_array(data, input_name)
model_container.add_initializers([data_tensor])
return input_name
class OpConverter(object):
"""Operator converter Base Class."""
@classmethod
def convert_attributes(cls, attrs):
"""convert Relay attributes to ONNX attributes.
The derived classes should implement this method
if attributes are required by the operator
otherwise by default no attributes are passed
"""
return {}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
onnx_node = onnx.helper.make_node(
cls.__name__, node_entry["input_names"], node_entry["output_names"], **attrs
)
model_container.add_nodes([onnx_node])
def rename(op_name):
"""This method creates dynamic operator of name op_name with empty attributes"""
return type(op_name, (OpConverter,), {})
class Reshape(object):
"""Operator converter for Reshape."""
@classmethod
def convert(cls, node_entry, model_container, node_dict):
"""Converts Relay operator Reshape to ONNX operator.
Relay operator accepts shape as attribute but ONNX operator
accepts it as a input.
"""
name = node_entry["name"]
shape = numpy.asarray(
[a.value for a in node_entry["relay_node"].attrs.newshape], dtype=numpy.int64
)
input_names = [
node_entry["input_names"][0],
add_input(shape, name, "shape", model_container),
]
node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([node])
class Conv(OpConverter):
"""Operator converter for Conv."""
@classmethod
def convert_attributes(cls, attrs):
return {
"group": attrs.get_int("groups"),
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"dilations": attrs.get_int_tuple("dilation"),
"kernel_shape": attrs.get_int_tuple("kernel_size"),
}
class ConvTranspose(OpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def convert_attributes(cls, attrs):
return {
"group": attrs.get_int("groups"),
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"dilations": attrs.get_int_tuple("dilation"),
"kernel_shape": attrs.get_int_tuple("kernel_size"),
"output_padding": attrs.get_int_tuple("output_padding"),
}
class MaxPool(OpConverter):
"""Operator converter for MaxPool."""
@classmethod
def convert_attributes(cls, attrs):
return {
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"kernel_shape": attrs.get_int_tuple("pool_size"),
"ceil_mode": 1 if attrs.ceil_mode else 0,
}
class Transpose(OpConverter):
"""Operator converter for Transpose."""
@classmethod
def convert_attributes(cls, attrs):
return {"perm": attrs.get_int_tuple("axes")} if attrs["axes"] else {}
class MatMul(OpConverter):
"""Operator converter for MatMul."""
@classmethod
def convert(cls, node_entry, model_container, node_dict):
inter_output_name = "inter{}".format(node_entry["name"])
transpose_node = onnx.helper.make_node(
Transpose.__name__, [node_entry["input_names"][1]], [inter_output_name], perm=(1, 0)
)
model_container.add_nodes([transpose_node])
inputs = [node_entry["input_names"][0], inter_output_name]
matmul_node = onnx.helper.make_node(cls.__name__, inputs, node_entry["output_names"])
model_container.add_nodes([matmul_node])
class Flatten(OpConverter):
"""Operator converter for Flatten."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axis": 1,
}
class BatchNormalization(OpConverter):
"""Operator converter for BatchNormalization."""
@classmethod
def convert_attributes(cls, attrs):
return {
"epsilon": float(attrs.get_str("epsilon")),
"axis": float(attrs.get_int("axis")),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
"""Converts Relay operator batch_norm to ONNX operator.
Relay operator has property axis to handle data in NHWC format.
"""
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
transpose_out_name = node_entry["input_names"][0]
inter_output_names = [node_entry["output_names"][0]]
# axis==3 means channel is specified along the 3rd axis
if attrs["axis"] == 3:
transpose_out_name = "transpose_{}".format(node_entry["name"])
node_transposed = onnx.helper.make_node(
Transpose.__name__,
[node_entry["input_names"][0]],
[transpose_out_name],
perm=[0, 3, 1, 2],
)
model_container.add_nodes([node_transposed])
inter_output_names = ["batch_norm_{}".format(node_entry["name"])]
input_names = [transpose_out_name] + node_entry["input_names"][1:]
batch_norm_node = onnx.helper.make_node(
cls.__name__, input_names, inter_output_names, epsilon=attrs["epsilon"]
)
model_container.add_nodes([batch_norm_node])
if attrs["axis"] == 3:
node_transposed = onnx.helper.make_node(
Transpose.__name__,
inter_output_names,
[node_entry["output_names"][0]],
perm=[0, 2, 3, 1],
)
model_container.add_nodes([node_transposed])
class Dropout(OpConverter):
"""Operator converter for Dropout."""
@classmethod
def convert_attributes(cls, attrs):
return {
"ratio": float(attrs.get_str("rate")),
}
class AveragePool(MaxPool):
"""Operator converter for AveragePool."""
class Concat(OpConverter):
"""Operator converter for Concat."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axis": attrs.get_int("axis"),
}
class BiasAdd(OpConverter):
"""Operator converter for BiasAdd."""
@classmethod
def convert(cls, node_entry, model_container, node_dict):
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node_entry can not be a Tuple"
input_node = input_node[0]
data_ndim = len(input_node["types"][0].shape)
axis = node_entry["relay_node"].attrs.get_int("axis")
if axis < 0:
axis = axis + data_ndim
new_axes = data_ndim - axis - 1
if new_axes:
inter_output_name = "inter{}".format(node_entry["name"])
unsqueeze_node = onnx.helper.make_node(
"Unsqueeze",
[node_entry["input_names"][1]],
[inter_output_name],
axes=tuple(range(1, new_axes + 1)),
)
model_container.add_nodes([unsqueeze_node])
else:
inter_output_name = node_entry["input_names"][1]
inputs = [node_entry["input_names"][0], inter_output_name]
matmul_node = onnx.helper.make_node("Add", inputs, node_entry["output_names"])
model_container.add_nodes([matmul_node])
class ReduceMean(OpConverter):
"""Operator converter for ReduceMean."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axes": attrs.axis,
"keepdims": 0 if bool(attrs.get_int("keepdims", 0)) is False else 1,
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = input_node["types"][0].shape
axis = node_entry["relay_node"].attrs.axis
axis = list(range(shape.size())) if not axis else tvm_array_to_list(axis)
exclude = 0 if not bool(node_entry["relay_node"].attrs.exclude) else 1
keepdims = 0 if not bool(node_entry["relay_node"].attrs.keepdims) else 1
if exclude:
all_axis = list(range(len(shape)))
axis = set(all_axis) - set(axis)
node = onnx.helper.make_node(
cls.__name__,
node_entry["input_names"],
node_entry["output_names"],
axes=axis,
keepdims=keepdims,
)
model_container.add_nodes([node])
class Pad(OpConverter):
"""Operator converter for Pad."""
@classmethod
def convert_attributes(cls, attrs):
before = []
after = []
for axis_pads in attrs.pad_width:
before.append(axis_pads[0])
after.append(axis_pads[1])
pads = before + after
pads = numpy.asarray(pads, dtype=pads[0].dtype)
return {
"pads": pads,
"mode": attrs.get_str("pad_mode"),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
"""Converts Relay operator Pad to ONNX operator.
Relay operator accepts pads as attribute but ONNX operator
accepts it as a input.
"""
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
pad_data = numpy.asarray(attrs["pads"], dtype=attrs["pads"][0].dtype).astype(numpy.int64)
input_names = [
node_entry["input_names"][0],
add_input(pad_data, name, "pads", model_container),
node_entry["input_names"][1],
]
node = onnx.helper.make_node(
cls.__name__, input_names, node_entry["output_names"], mode=attrs["mode"]
)
model_container.add_nodes([node])
class Softmax(OpConverter):
"""Operator converter for SoftMax."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axis": attrs.axis,
}
class Squeeze(OpConverter):
"""Operator converter for Squeeze."""
@classmethod
def convert_attributes(cls, attrs):
return {
"axes": attrs.axis,
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = input_node["types"][0].shape
axis = node_entry["relay_node"].attrs.get_int("axis")
if not axis:
axis = []
for axis_idx, val in enumerate(shape):
if val.value == 1:
axis.append(axis_idx)
else:
axis = node_entry["relay_node"].attrs.get_int_tuple("axis")
node = onnx.helper.make_node(
cls.__name__, node_entry["input_names"], node_entry["output_names"], axes=axis
)
model_container.add_nodes([node])
class Slice(OpConverter):
"""Operator converter for Slice."""
@classmethod
def convert_attributes(cls, attrs):
return {
"starts": attrs.get_int_tuple("begin"),
"ends": attrs.get_int_tuple("end"),
"steps": attrs.get_int_tuple("strides"),
"slice_mode": attrs.get_str("slice_mode"),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = input_node["types"][0].shape
starts = list(attrs["starts"])
ends = list(attrs["ends"])
steps = list(attrs["steps"])
starts += [0] * (len(shape) - len(starts))
ends += [shape[i] + 1 for i in range(len(ends), len(shape))]
axes = list(range(len(shape)))
if attrs["slice_mode"] == "size":
ends = [
starts[i] + (shape[i] + 1 if ends[i] < 0 else ends[i]) for i in range(len(shape))
]
steps = [1] * len(shape)
else:
steps += [1] * (len(shape) - len(steps))
starts = numpy.asarray(starts).astype(numpy.int64)
ends = numpy.asarray(ends).astype(numpy.int64)
axes = numpy.asarray(axes).astype(numpy.int64)
steps = numpy.asarray(steps).astype(numpy.int64)
input_names = []
input_names.append(add_input(starts, name, "starts", model_container))
input_names.append(add_input(ends, name, "ends", model_container))
input_names.append(add_input(axes, name, "axes", model_container))
input_names.append(add_input(steps, name, "steps", model_container))
input_names = [node_entry["input_names"][0]] + input_names
slice_node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([slice_node])
class Split(OpConverter):
"""Operator converter for Split."""
@classmethod
def convert_attributes(cls, attrs):
indices_or_sections = attrs["indices_or_sections"]
if isinstance(indices_or_sections, (list, tvm.ir.container.Array)):
indices_or_sections = attrs.get_int_tuple("indices_or_sections")
if isinstance(indices_or_sections, tvm.ir.PrimExpr):
indices_or_sections = indices_or_sections.value
return {
"indices_or_section": indices_or_sections,
"axis": attrs.get_int("axis"),
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
shape = input_node["types"][0].concrete_shape
indices_or_sect = attrs["indices_or_section"]
axis = attrs["axis"]
axis_length = shape[axis]
if isinstance(indices_or_sect, int):
split = [axis_length // indices_or_sect] * indices_or_sect
else:
split = []
for i in range(len(indices_or_sect) + 1):
if i == 0:
split.append(indices_or_sect[0])
elif i == len(indices_or_sect):
split.append(axis_length - indices_or_sect[-1])
else:
split.append(indices_or_sect[i] - indices_or_sect[i - 1])
slice_node = onnx.helper.make_node(
cls.__name__,
node_entry["input_names"],
node_entry["output_names"],
split=split,
axis=axis,
)
model_container.add_nodes([slice_node])
class LayoutTransform(OpConverter):
"""Operator converter for Layouttransform"""
@classmethod
def convert_attributes(cls, attrs):
src_layout = attrs.get_str("src_layout")
dst_layout = attrs.get_str("dst_layout")
perm = [src_layout.index(c) for c in dst_layout]
return {"perm": tuple(perm)}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
onnx_node = onnx.helper.make_node(
"Transpose", node_entry["input_names"], node_entry["output_names"], **attrs
)
model_container.add_nodes([onnx_node])
class Clip(OpConverter):
"""Operator converter for Clip."""
@classmethod
def convert_attributes(cls, attrs):
return {"min": attrs.a_min, "max": attrs.a_max}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
min_val = numpy.asarray(attrs["min"]).astype(numpy.float32)
max_val = numpy.asarray(attrs["max"]).astype(numpy.float32)
input_names = []
input_names.append(add_input(min_val, name, "min", model_container))
input_names.append(add_input(max_val, name, "max", model_container))
input_names = [node_entry["input_names"][0]] + input_names
node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([node])
class Expand(OpConverter):
"""Operator converter for Expand_dims."""
@classmethod
def convert_attributes(cls, attrs):
return {"axis": attrs.axis, "num_newaxis": attrs.num_newaxis}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node_entry can not be a Tuple"
input_node = input_node[0]
data_shape = input_node["types"][0].shape
new_shape = list(data_shape)
for _ in range(attrs["num_newaxis"]):
new_shape.insert(attrs["axis"], 1)
new_shape = numpy.asarray(new_shape).astype(numpy.int64)
input_names = []
input_names.append(add_input(new_shape, name, "shape", model_container))
input_names = [node_entry["input_names"][0]] + input_names
node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
model_container.add_nodes([node])
class ConstantOfShapeZeros(OpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def convert_attributes(cls, attrs):
return {"value": 0}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
dtype = input_node["types"][0].dtype
name = node_entry["name"]
shape = [val.value for val in input_node["types"][0].shape]
shape = numpy.asarray(shape).astype(numpy.int64)
input_names = []
input_names.append(add_input(shape, name, "shape", model_container))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)]
tensor_value = onnx.helper.make_tensor("value", dtype, [1], [attrs["value"]])
node = onnx.helper.make_node(
"ConstantOfShape", input_names, node_entry["output_names"], value=tensor_value
)
model_container.add_nodes([node])
class ConstantOfShapeOnes(ConstantOfShapeZeros):
"""Operator converter for ConstantOfShape."""
@classmethod
def convert_attributes(cls, attrs):
return {"value": 1}
class LRN(OpConverter):
"""Operator converter for LRN."""
@classmethod
def convert_attributes(cls, attrs):
"""axis attr is not supported as an argument in onnx.
Onnx only supports axis=1 (channels)."""
if attrs.get_int("axis") != 1:
raise RuntimeError(
"Unsupported axis %s in operator relay lrn operator. "
"Only axis = 1 is supported by Onnx." % (attrs.get_int("axis"))
)
return {"alpha": attrs.alpha, "beta": attrs.beta, "bias": attrs.bias, "size": attrs.size}
class Cast(OpConverter):
"""Operator converter for Cast."""
@classmethod
def convert_attributes(cls, attrs):
return {"to": getattr(TensorProto, attrs.dtype.upper())}
class Resize(OpConverter):
"""Operator converter for Resize."""
@classmethod
def convert_attributes(cls, attrs):
method = attrs.get_str("method")
if method == "nearest_neighbor":
mode = b"nearest"
elif "linear" in method: # linear / bilinear
mode = b"linear"
elif "cubic" in method: # cubic / bicubic
mode = b"cubic"
else:
raise RuntimeError("Unsupported method %s in operator Resize" % method)
coord_trans = attrs.get_str("coordinate_transformation_mode")
if coord_trans == "half_pixel":
coord_trans = b"half_pixel"
elif coord_trans == "align_corners":
coord_trans = b"align_corners"
elif coord_trans == "asymmetric":
coord_trans = b"asymmetric"
else:
raise RuntimeError(
"Unsupported coordinate transform mode %s in operator Resize" % coord_trans
)
rounding_method = attrs.get_str("rounding_method")
if rounding_method == "round":
rounding_method = b"round_prefer_ceil"
elif rounding_method == "floor":
rounding_method = b"floor"
elif rounding_method == "ceil":
rounding_method = b"ceil"
else:
raise RuntimeError(
"Unsupported rounding method %s in operator Resize" % rounding_method
)
size = attrs.get_int_tuple("size")
return {
"mode": mode,
"coord_trans": coord_trans,
"size": size,
"nearest_mode": rounding_method,
}
@classmethod
def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)
name = node_entry["name"]
input_node = node_dict[node_entry["inputs"][0]]
assert len(input_node) == 1, "input node can not be a Tuple"
input_node = input_node[0]
input_shape = input_node["types"][0].shape
# (TBD) needed in opset 11
roi = [0] * len(input_shape) + [1] * len(input_shape)
roi_array = numpy.asarray(roi).astype(numpy.float64)
roi_node = add_input(roi_array, name, "roi", model_container)
out_size = attrs["size"]
# (onnx) rank of scale / size must match rank of X
# relay size node contains only spatial dimensions
# pad with 1s to match rank
match_rank_pad = len(input_shape) - len(out_size)
out_size_full_rank = input_shape[:match_rank_pad] + list(out_size)
out_size_array = numpy.asarray(out_size_full_rank).astype(numpy.int64)
input_size_array = numpy.asarray(list(input_shape)).astype(numpy.int64)
scale_array = numpy.divide(out_size_array, input_size_array).astype(numpy.float32)
scale_node = add_input(scale_array, name, "scales", model_container)
input_names = [node_entry["input_names"][0], roi_node, scale_node]
resize_node = onnx.helper.make_node(
cls.__name__,
input_names,
node_entry["output_names"],
mode=attrs["mode"],
coordinate_transformation_mode=attrs["coord_trans"],
nearest_mode=attrs["nearest_mode"],
)
model_container.add_nodes([resize_node])
relay_to_onnx_op_mapping = {
"reshape": Reshape,
"nn.conv2d": Conv,
"nn.conv2d_transpose": ConvTranspose,
"add": rename("Add"),
"nn.relu": rename("Relu"),
"transpose": Transpose,
"nn.dense": MatMul,
"nn.max_pool2d": MaxPool,
"nn.batch_flatten": Flatten,
"multiply": rename("Mul"),
"nn.bias_add": BiasAdd,
"nn.batch_norm": BatchNormalization,
"nn.global_avg_pool2d": rename("GlobalAveragePool"),
"concatenate": Concat,
"nn.dropout": Dropout,
"nn.avg_pool2d": AveragePool,
"divide": rename("Div"),
"mean": ReduceMean,
"nn.pad": Pad,
"nn.softmax": Softmax,
"squeeze": Squeeze,
"strided_slice": Slice,
"greater": rename("Greater"),
"less": rename("Less"),
"equal": rename("Equal"),
"zeros_like": ConstantOfShapeZeros,
"ones_like": ConstantOfShapeOnes,
"subtract": rename("Sub"),
"split": Split,
"exp": rename("Exp"),
"layout_transform": LayoutTransform,
"clip": Clip,
"expand_dims": Expand,
"nn.lrn": LRN,
"sigmoid": rename("Sigmoid"),
"copy": rename("Identity"),
"round": rename("Round"),
"cast": Cast,
"image.resize2d": Resize,
}
class ModelContainer(object):
"""A container class to hold different attributes of ONNX model graph"""
def __init__(self, name, opset_version):
self._name = name
self._opset_version = opset_version
self._inputs = []
self._outputs = []
self._nodes = []
self._initializers = []
def add_inputs(self, inputs):
self._inputs.extend(inputs)
def add_outputs(self, outputs):
self._outputs.extend(outputs)
def add_nodes(self, nodes):
self._nodes.extend(nodes)
def add_initializers(self, initializers):
self._initializers.extend(initializers)
def _get_opsets(self):
opsets = []
imp = OperatorSetIdProto()
imp.version = self._opset_version
opsets.append(imp)
return opsets
def make_model(self):
"""Creates the onnx model from the graph"""
onnx_graph = onnx.helper.make_graph(
self._nodes, self._name, self._inputs, self._outputs, self._initializers
)
kwargs = {}
kwargs["opset_imports"] = self._get_opsets()
kwargs["producer_name"] = "TVM Relay"
kwargs["producer_version"] = tvm.__version__
return onnx.helper.make_model(onnx_graph, **kwargs)
class RelayToONNXConverter(ExprVisitor):
"""A helper class to traverse the Relay graph and convert Relay nodes to ONNX model
Parameters
----------
name : str
name of the model
params : dict
dict of the parameter names and NDarray values
opset_version : int
target onnx opset version
"""
def __init__(self, name, params, opset_version):
super().__init__()
self._name = name
self._mc = ModelContainer(name, opset_version)
self._params = params
self._node_dict = {}
self._node_count = 0
self.last_node = None
@classmethod
def _get_node_entry(cls, relay_node, name):
return {
"relay_node": relay_node,
"inputs": [relay_node], # inputs in the form of relay nodes
"types": [], # output types in case of call nodes else self type
"name": name, # name of the node
"input_names": [name], # input names in case of call nodes else self name
"output_names": [name], # output names in case of call nodes else self name
"op": None, # op name in case of call node else None
}
def convert_to_onnx(self, func):
"""Traverse Relay graph and generate a ONNX model"""
self.visit(func)
self._add_output(self._node_dict[self.last_node])
model = self._mc.make_model()
return run_onnx_optimizer(model)
def visit(self, expr):
self._node_count += 1
super().visit(expr)
def visit_constant(self, const):
node_index = self._node_count
name = self._name + "_const_" + str(node_index)
node_entry = self._get_node_entry(const, name)
node_entry["types"] = [const.checked_type]
self._add_constant_input(node_entry, node_index)
self._node_dict[const] = [node_entry]
def visit_var(self, var):
node_index = self._node_count
node_entry = self._get_node_entry(var, var.name_hint)
node_entry["types"] = [var.type_annotation]
self._add_input(node_entry, node_index)
self._node_dict[var] = [node_entry]
def visit_tuple(self, tup):
self._node_dict[tup] = []
for f in tup.fields:
self.visit(f)
self._node_dict[tup].extend(self._node_dict[f])
self.last_node = tup
def visit_tuple_getitem(self, t):
self.visit(t.tuple_value)
tup_node = self._node_dict[t.tuple_value]
if len(tup_node) > 1:
self._node_dict[t] = tup_node[t.index]
else:
node_entry = copy.deepcopy(tup_node[0])
output_names = [node_entry["output_names"][t.index]]
node_entry["output_names"] = output_names
self._node_dict[t] = [node_entry]
self.last_node = t
def visit_call(self, call):
node_index = self._node_count
op = call.op
name = "{}_{}".format(op, node_index)
node_entry = self._get_node_entry(call, name)
node_entry["op"] = op
node_entry["input_names"] = []
node_entry["inputs"] = []
node_entry["output_names"] = None
for input_arg in call.args:
self.visit(input_arg)
input_names = []
for arg_node_entry in self._node_dict[input_arg]:
input_names.extend(arg_node_entry["output_names"])
node_entry["input_names"].extend(input_names)
node_entry["inputs"].extend([input_arg])
node_entry["types"] = call_node_infer_type(call)
node_entry["output_names"] = []
for i in range(len(node_entry["types"])):
node_entry["output_names"].append(name + str(i))
self.last_node = call
self._add_node(node_entry, node_index)
self._node_dict[call] = [node_entry]
def _add_node(self, node_entry, idx):
"""Convert Relay operator node to ONNX operator and add it to container nodes list"""
if node_entry["op"].name not in relay_to_onnx_op_mapping:
raise NotImplementedError(
"Currently the operator '{0}' is " "not supported.".format(node_entry["op"].name)
)
converter = relay_to_onnx_op_mapping[node_entry["op"].name]()
return converter.convert(node_entry, self._mc, self._node_dict)
def _add_params(self, node_entry, idx):
"""Add param value to initializer and name to inputs"""
param_name = node_entry["name"]
assert (
param_name in self._params
), "The parameter {0} is not present" "in params dict provided.".format(param_name)
value = self._params[param_name]
numpy_array = value.numpy()
tensor = numpy_helper.from_array(numpy_array, param_name)
self._mc.add_initializers([tensor])
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype]
input = onnx.helper.make_tensor_value_info(param_name, dtype, shape=numpy_array.shape)
self._mc.add_inputs([input])
def _add_constant_input(self, node_entry, idx):
"""Create named input for constant and add it to container inputs.
If input is a parameter then add to param
"""
node = node_entry["relay_node"]
param_name = node_entry["name"]
self._params[param_name] = node.data
self._add_params(node_entry, idx)
def _add_input(self, node_entry, idx):
"""Add input node to container inputs. If input is a parameter then add to param"""
if node_entry["name"] in self._params:
self._add_params(node_entry, idx)
else:
node_type = node_entry["types"][0]
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]
input = onnx.helper.make_tensor_value_info(
node_entry["name"], dtype, shape=node_type.concrete_shape
)
self._mc.add_inputs([input])
def _add_output(self, node_entries):
"""Add output node to container outputs."""
for node_entry in node_entries:
for node_type, output_name in zip(node_entry["types"], node_entry["output_names"]):
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]
output = onnx.helper.make_tensor_value_info(
output_name, dtype, shape=node_type.concrete_shape
)
self._mc.add_outputs([output])
def to_onnx(relay_ir, params, name, opset_version=11, path=None):
"""Convert a Relay Function Module into an equivalent ONNX and serialize it to the path
Parameters
----------
relay_ir : tvm.ir.IRModule or tvm.relay.Function
The relay module object
params : dict
dict of the parameter names and NDarray values
name : str
name of the output ONNX graph
opset_version : int
target onnx opset version
path : str
The path where ONNX model will be saved
Returns
-------
onnx_model : onnx.ModelProto
converted ONNX model as a ModelProto.
"""
if opset_version not in ONNX_OPSET_VERSONS_SUPPORTED:
raise NotImplementedError("Currently only opset version 11 is supported.")
if opset_version > defs.onnx_opset_version():
raise Exception(
"The ONNX package installed of version {} does not support the opset "
"version {}. Upgrade the ONNX package to latest version.".format(
get_onnx_version(), opset_version
)
)
func = relay_ir["main"] if isinstance(relay_ir, tvm.ir.IRModule) else relay_ir
converter = RelayToONNXConverter(name, params, opset_version)
onnx_model = converter.convert_to_onnx(func)
if path:
onnx.save(onnx_model, path)
return onnx_model
@tvm._ffi.register_func("relay.ext.onnx")
def onnx_compiler(func):
"""Create a runtime module for ONNX from Relay Function
:param func: Relay function
:return: runtime module for ONNX
"""
assert isinstance(func, tvm.relay.function.Function)
name = str(func.attrs.global_symbol)
model = to_onnx(func, {}, name)
const_vars = [const.name for const in model.graph.initializer]
name_bytes = bytes(name, "utf-8")
name_size = struct.pack("I", len(name_bytes))
model_serialized = model.SerializeToString()
model_size = struct.pack("I", model.ByteSize())
data = b"" + name_size + name_bytes + model_size + model_serialized
runtime_func = "runtime.ONNXModuleCreate"
fcreate = tvm._ffi.get_global_func(runtime_func)
return fcreate(data.hex(), name, const_vars)
@tvm._ffi.register_func("relay.ext.onnx.save_to_file")
def save_to_file(hex_str, path=None, fmt="onnx"):
"""Store the ONNX subgraphs in the path folder
:param hex_str: Subgrah names and corresponding serialized onnx hex string
:param path: path to which ONNX files to be stored
It is assumed that path exists
:param fmt: extension of the files to be stored
"""
onnx_ir = bytes.fromhex(hex_str)
offset = 0
while offset < len(onnx_ir):
stop = offset + 4
(name_size,) = struct.unpack("I", onnx_ir[offset:stop])
name = onnx_ir[stop : stop + name_size].decode("utf-8")
stop = stop + name_size
(model_size,) = struct.unpack("I", onnx_ir[stop : stop + 4])
stop = stop + 4
model_serialized = onnx_ir[stop : stop + model_size]
offset = stop + model_size
model_onnx = onnx.load_model_from_string(model_serialized)
onnx.save(model_onnx, "{}{}{}.{}".format(path, os.path.sep, name, fmt))
|
{"hexsha": "533174059e66049453c78e0d4c3e263fece64d25", "size": 38475, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tvm/contrib/target/onnx.py", "max_stars_repo_name": "ZihengJiang/relax", "max_stars_repo_head_hexsha": "5676ffa4c423adf2b3f1920c5fdaca43369e9855", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/tvm/contrib/target/onnx.py", "max_issues_repo_name": "ZihengJiang/relax", "max_issues_repo_head_hexsha": "5676ffa4c423adf2b3f1920c5fdaca43369e9855", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/tvm/contrib/target/onnx.py", "max_forks_repo_name": "ZihengJiang/relax", "max_forks_repo_head_hexsha": "5676ffa4c423adf2b3f1920c5fdaca43369e9855", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9885159011, "max_line_length": 113, "alphanum_fraction": 0.6309551657, "include": true, "reason": "import numpy", "num_tokens": 8746}
|
// Copyright (C) 2007, 2008, 2009 Tim Blechmann & Thomas Grill
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Disclaimer: Not a Boost library.
#ifndef BOOST_LOCKFREE_CAS_HPP_INCLUDED
#define BOOST_LOCKFREE_CAS_HPP_INCLUDED
#include <boost/lockfree/detail/prefix.hpp>
#include <boost/interprocess/detail/atomic.hpp>
#include <boost/detail/lightweight_mutex.hpp>
#include <boost/static_assert.hpp>
#include <boost/cstdint.hpp>
#include <boost/mpl/map.hpp>
#include <boost/mpl/at.hpp>
#include <boost/mpl/if.hpp>
#include <boost/mpl/long.hpp>
#ifdef __SSE2__
#include "emmintrin.h"
#endif
namespace boost
{
namespace lockfree
{
inline void memory_barrier(void)
{
#if defined(__SSE2__)
_mm_mfence();
#elif defined(__GNUC__) && ( (__GNUC__ > 4) || ((__GNUC__ >= 4) && \
(__GNUC_MINOR__ >= 1))) \
|| defined(__INTEL_COMPILER)
__sync_synchronize();
#elif defined(__GNUC__) && defined (__i386__)
asm volatile("lock; addl $0,0(%%esp)":::"memory");
#elif defined(_MSC_VER) && (_MSC_VER >= 1300)
_ReadWriteBarrier();
#elif defined(__APPLE__)
OSMemoryBarrier();
#elif defined(AO_HAVE_nop_full)
AO_nop_full();
#else
# warning "no memory barrier implemented for this platform"
#endif
}
inline void read_memory_barrier(void)
{
#if defined(__SSE2__)
_mm_lfence();
#else
memory_barrier();
#endif
}
template <typename C>
struct atomic_cas_emulator
{
static inline bool cas(C * addr, C old, C nw)
{
static boost::detail::lightweight_mutex guard;
boost::detail::lightweight_mutex::scoped_lock lock(guard);
if (*addr == old)
{
*addr = nw;
return true;
}
else
return false;
}
typedef C cas_type;
};
template <typename C>
inline bool atomic_cas_emulation(C * addr, C old, C nw)
{
return atomic_cas_emulator<C>::cas(addr, old, nw);
}
using boost::uint32_t;
using boost::uint64_t;
struct atomic_cas32
{
static inline bool cas(volatile uint32_t * addr,
uint64_t const & old,
uint64_t const & nw)
{
#if defined(__GNUC__) && ( (__GNUC__ > 4) || ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 1)) ) || defined(__INTEL_COMPILER)
return __sync_bool_compare_and_swap(addr, old, nw);
#else
return boost::interprocess::detail::atomic_cas32(addr, old, nw) == old;
#endif
}
typedef uint32_t cas_type;
static const bool is_lockfree = true;
};
struct atomic_cas64
{
typedef uint64_t cas_type;
static inline bool cas(volatile uint64_t * addr,
uint64_t const & old,
uint64_t const & nw)
{
#if defined(__GNUC__) && ( (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 1)) \
|| ((__GNUC__ == 4) && (__GNUC_MINOR__ == 1) && defined(__x86_64__)) ) \
|| defined(__INTEL_COMPILER)
return __sync_bool_compare_and_swap(addr, old, nw);
#elif defined(_M_IX86)
return InterlockedCompareExchange(reinterpret_cast<volatile LONG*>(addr),
reinterpret_cast<LONG>(nw),
reinterpret_cast<LONG>(old)) == old;
#elif defined(_M_X64)
return InterlockedCompareExchange(reinterpret_cast<volatile LONG*>(addr),
reinterpret_cast<LONG>(nw),
reinterpret_cast<LONG>(old)) == old;
#else
#define CAS_BLOCKING
#warning ("blocking CAS emulation")
return atomic_cas_emulation((uint64_t *)addr, old, nw);
#endif
}
#ifdef CAS_BLOCKING
#undef CAS_BLOCKING
static const bool is_lockfree = false;
#else
static const bool is_lockfree = true;
#endif
};
struct atomic_cas128
{
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
typedef int cas_type __attribute__ ((mode (TI)));
#else
struct cas_type
{
bool operator==(cas_type const & rhs)
{
return (data[0] == rhs.data[0]) &&
(data[1] == rhs.data[1]);
}
uint64_t data[2];
};
#endif
static inline bool cas(volatile cas_type * addr, cas_type const & old, cas_type const & nw)
{
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
return __sync_bool_compare_and_swap_16(addr, old, nw);
#else
#define CAS_BLOCKING
//#warning ("blocking CAS emulation")
return atomic_cas_emulation((cas_type*)addr, old, nw);
#endif
}
#ifdef CAS_BLOCKING
#undef CAS_BLOCKING
static const bool is_lockfree = false;
#else
static const bool is_lockfree = true;
#endif
};
namespace detail
{
using namespace boost::mpl;
template<typename C>
struct atomic_cas
{
private:
typedef map3<pair<long_<4>, atomic_cas32>,
pair<long_<8>, atomic_cas64>,
pair<long_<16>, atomic_cas128>
> cas_map;
typedef typename at<cas_map, long_<sizeof(C)> >::type atomic_cas_t;
typedef typename if_<has_key<cas_map, long_<sizeof(C)> >,
atomic_cas_t,
atomic_cas_emulator<C> >::type cas_t;
typedef typename cas_t::cas_type cas_value_t;
public:
static inline bool cas(volatile C * addr, C const & old, C const & nw)
{
return cas_t::cas((volatile cas_value_t*)addr,
*(cas_value_t*)&old,
*(cas_value_t*)&nw);
}
static const bool is_lockfree = cas_t::is_lockfree;
};
} /* namespace detail */
using detail::atomic_cas;
template <typename C>
inline bool cas(volatile C * addr, C const & old, C const & nw)
{
return atomic_cas<C>::cas(addr, old, nw);
}
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_CAS_HPP_INCLUDED */
|
{"hexsha": "567358473f9cf3ea8e9059e93f0e2fcc19768a6b", "size": 5850, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "common/include/boost/lockfree/detail/cas.hpp", "max_stars_repo_name": "marshallmcmullen/pion", "max_stars_repo_head_hexsha": "7dcbe769e7076f5cc983bb4a5b5d2bc83cadb3a2", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "common/include/boost/lockfree/detail/cas.hpp", "max_issues_repo_name": "marshallmcmullen/pion", "max_issues_repo_head_hexsha": "7dcbe769e7076f5cc983bb4a5b5d2bc83cadb3a2", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/include/boost/lockfree/detail/cas.hpp", "max_forks_repo_name": "marshallmcmullen/pion", "max_forks_repo_head_hexsha": "7dcbe769e7076f5cc983bb4a5b5d2bc83cadb3a2", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6578947368, "max_line_length": 118, "alphanum_fraction": 0.6311111111, "num_tokens": 1457}
|
"""
omg: Omics Mock Generator
Generates a mock dataset of omics data (importable in EDD):
transcriptomics, proteomics, and metabolomics
Requirements: Python 3.7.2, cobra, numpy, pandas.
"""
__author__ = 'LBL-QMM'
__copyright__ = 'Copyright (C) 2019 Berkeley Lab'
__license__ = ''
__status__ = 'Alpha'
__date__ = 'Dec 2019'
__version__ = '0.1.1'
import argparse
import collections as col
import os
import random
import re
import statistics
import sys
import urllib.parse
import urllib.request
import warnings
from shutil import copyfile
from enum import Enum
from typing import NewType, Dict, List, Any, OrderedDict, Counter
import cobra
from cobra.util.array import create_stoichiometric_matrix
import numpy as np
import pandas as pd
from cobra.exceptions import OptimizationError, Infeasible
# Type annotations
Filename = NewType('Filename', str)
# Enumerations
class Omics(Enum):
"""Enumeration with supported omics data types."""
PROTEOMICS = 0
TRANSCRIPTOMICS = 1
METABOLOMICS = 2
def __str__(self):
return f'{str(self.name).lower()}'
# Constants
UNIPROT_URL = '''https://www.uniprot.org/uploadlists/'''
CTS_URL = '''https://cts.fiehnlab.ucdavis.edu/rest/convert/'''
# HOST NAME
HOST_NAME: str = 'ropacus'
# TODO: Move some constants to variables by program arguments
DATA_FILE_PATH: Filename = Filename('data')
# Output file path
OUTPUT_FILE_PATH: Filename = Filename('data/output')
# INCHIKEY_TO_CID_MAP_FILE_PATH: mapping file path to map inchikey to cids
INCHIKEY_TO_CID_MAP_FILE_PATH: Filename = Filename('mapping')
# MODEL_FILENAME: Filename = Filename('iECIAI39_1322.xml') # E. coli
MODEL_FILENAME: Filename = Filename('reannotated_base_v3.sbml') # R. opacus
MODEL_FILEPATH: Filename = Filename('')
# Training file name
TRAINING_FILE_NAME: Filename = Filename('')
TRAINING_FILE_PATH: Filename = Filename('')
# Start time and stop time
TIMESTART: float = 0.0
TIMESTOP: float = 8.0
NUMPOINTS: int = 9
# Initial OD value
INITIAL_OD = 0.01
# number of reactions and instances
NUM_REACTIONS: int = None
NUM_INSTANCES: int = None
# NOTE: user input to the program
REACTION_ID_ECOLI: str = 'BIOMASS_Ec_iJO1366_core_53p95M' # E. coli
REACTION_ID: str = 'biomass_target' # R. opacus
# REACTION_ID: str = 'SRC_C00185_e' # R. opacus
GENE_IDS_DBS: List[str] = ['kegg.genes'] # R. opacus
# GENE_IDS_DBS: List[str] = ['uniprot', 'goa', 'ncbigi'] # E. coli
UNITS: Dict[Omics, str] = {
Omics.PROTEOMICS: 'proteins/cell',
Omics.TRANSCRIPTOMICS: "FPKM",
Omics.METABOLOMICS: "mM"
}
# Fix the flux value to -15 as we have data for this constraint
LOWER_BOUND: int = -15
UPPER_BOUND: int = -15
# Internals
_EPS = np.finfo(np.double).eps
def ansi(num: int):
"""Return function that escapes text with ANSI color n."""
return lambda txt: f'\033[{num}m{txt}\033[0m'
# pylint: disable=invalid-name
gray, red, green, yellow, blue, magenta, cyan, white = map(ansi,
range(90, 98))
# pylint: enable=invalid-name
#=============================================================================
def get_flux_time_series(model, ext_metabolites, grid, user_params):
'''
Generate fluxes and OD
'''
## First unpack the time steps for the grid provided
tspan, delt = grid
## Create a panda series containing the cell concentation for each time point
cell = pd.Series(index=tspan)
cell0 = user_params['initial_OD'] # in gDW/L
t0 = user_params['timestart']
cell[t0] = cell0
## Create a dataframe that constains external metabolite names and their concentrations
# First organize external metabolites and their initial concentrations
met_names = []
initial_concentrations = []
for met, init_conc in ext_metabolites.items():
met_names.append(met)
initial_concentrations.append(init_conc)
# Create dataframe containing external metabolites
Emets = pd.DataFrame(index=tspan, columns=met_names)
# Add initial concentrations for external metabolites
Emets.loc[t0] = initial_concentrations
# Create Dictionary mapping exchange reactions to the corresponding external metabolite
Erxn2Emet = {r.id: r.reactants[0].id for r in model.exchanges if r.reactants[0].id in met_names}
## Create storage for timeseries of models and solutions
# Model time series
model_TS = pd.Series(index=tspan)
# Solution time series
solution_TS = pd.Series(index=tspan)
## Main for loop solving the model for each time step and adding the corresponding OD and external metabolites created
volume = 1.0 # volume set arbitrarily to one because the system is extensive
for t in tspan:
# Adding constraints for each time point without permanent changes to the model
with model:
for rxn, met in Erxn2Emet.items():
# For each exchange reaction set lower bound such that the corresponding
# external metabolite concentration does not become negative
model.reactions.get_by_id(rxn).lower_bound = max(model.reactions.get_by_id(rxn).lower_bound,
-Emets.loc[t,met]*volume/cell[t]/delt)
# Calculate fluxes
solution_t = model.optimize()
# Store the solution and model for each timepoint for future use (e.g. MOMA)
solution_TS[t] = solution_t
model_TS[t] = model.copy()
# Calculate OD and external metabolite concentrations for next time point t+delta
cell[t+delt], Emets.loc[t+delt] = advance_OD_Emets(Erxn2Emet, cell[t], Emets.loc[t], delt, solution_t, user_params)
print(t, solution_t.status, solution_t[user_params['BIOMASS_REACTION_ID']]) # Minimum output for testing
return solution_TS, model_TS, cell, Emets, Erxn2Emet
def advance_OD_Emets(Erxn2Emet, old_cell, old_Emets, delt, solution, user_params):
# Output is same as input if nothing happens in the if clause
new_cell = old_cell
new_Emets = old_Emets
# Obtain the value of mu (growth rate)
mu = solution[user_params['BIOMASS_REACTION_ID']]
# Calculate OD and external metabolite concentrations for next step
if solution.status == 'optimal' and mu > 1e-6: # Update only if solution is optimal and mu is not zero, otherwise do not update
# Calculating next time point's OD
new_cell = old_cell *np.exp(mu*delt)
# Calculating external external metabolite concentrations for next time point
for rxn, met in Erxn2Emet.items():
new_Emets[met] = max(old_Emets.loc[met]-solution[rxn]/mu*old_cell*(1-np.exp(mu*delt)),0.0)
return new_cell, new_Emets
def getBEFluxes(model_TS, design, solution_TS, grid):
## Unpacking time points grid
tspan, delt = grid
## Parameters for flux constraints
high = 1.1
low = 0.50
## Unpack information for desired flux changes
# Get names for reaction targets
reaction_names =list(design.index[1:])
# Find number of target reactions and number of designs (or strains changed)
#n_reactions = design.shape[1] - 1
#n_instances = design.shape[0] - 1
## Time series containing the flux solution obtained through MOMA
solutionsMOMA_TS = pd.Series(index=tspan)
## Main loop: for each strain and at each time point, find new flux profile through MOMA
#for i in range(0,n_instances):
for t in tspan:
model = model_TS[t]
sol1 = solution_TS[t] # Reference solution calculated for each time point
with model:
# Adding the fluxed modifications for chosen reactions
for reaction in reaction_names:
flux = sol1.fluxes[reaction]
lbcoeff =low
ubcoeff =high
if flux < 0:
lbcoeff = high
ubcoeff = low
reaction_constraint = model.problem.Constraint(model.reactions.get_by_id(reaction).flux_expression,
lb = sol1.fluxes[reaction]*design[reaction]*lbcoeff,
ub = sol1.fluxes[reaction]*design[reaction]*ubcoeff)
#lb = model.reactions.get_by_id(reaction).lower_bound*design[reaction],
#ub = model.reactions.get_by_id(reaction).upper_bound*design[reaction])
model.add_cons_vars(reaction_constraint)
# Reference solution calculated for each time point in above cell for wild type
#sol1 = solution_TS[t]
# Moma solution for each time point
sol2 = cobra.flux_analysis.moma(model, solution=sol1, linear=False)
# saving the moma solutions across timepoints
solutionsMOMA_TS[t] = sol2
return solutionsMOMA_TS
def integrate_fluxes(solution_TS, model_TS, ext_metabolites, grid, user_params):
## First unpack the time steps for the grid provided
tspan, delt = grid
## Create a panda series containing the cell concentation for each time point
cell = pd.Series(index=tspan)
cell0 = user_params['initial_OD'] # in gDW/L
t0 = user_params['timestart']
cell[t0] = cell0
## Create a dataframe that constains external metabolite names and their concentrations (DUPLICATED CODE)
# First organize external metabolites and their initial concentrations
model = model_TS[0]
met_names = []
initial_concentrations = []
for met, init_conc in ext_metabolites.items():
met_names.append(met)
initial_concentrations.append(init_conc)
# Create dataframe containing external metabolites
Emets = pd.DataFrame(index=tspan, columns=met_names)
# Add initial concentrations for external metabolites
Emets.loc[t0] = initial_concentrations
# Create Dictionary mapping exchange reactions to the corresponding external metabolite
Erxn2Emet = {r.id: r.reactants[0].id for r in model.exchanges if r.reactants[0].id in met_names}
## Main loop adding contributions for each time step
for t in tspan:
# Calculate OD and external metabolite concentrations for next time point t+delta
cell[t+delt], Emets.loc[t+delt] = advance_OD_Emets(Erxn2Emet, cell[t], Emets.loc[t], delt, solution_TS[t], user_params)
return cell, Emets
def get_proteomics_transcriptomics_data(model, solution):
"""
:param model:
:param solution:
:param condition:
:return:
"""
# pre-determined linear constant (NOTE: Allow user to set this via parameter)
# DISCUSS!!
k = 0.8
q = 0.06
proteomics = {}
transcriptomics = {}
rxnIDs = solution.fluxes.keys()
for rxnId in rxnIDs:
reaction = model.reactions.get_by_id(rxnId)
for gene in list(reaction.genes):
# this will ignore all the reactions that does not have the gene.annotation property
# DISCUSS!!
if gene.annotation:
if 'uniprot' not in gene.annotation:
if 'goa' in gene.annotation:
protein_id = gene.annotation['goa']
else:
break
else:
protein_id = gene.annotation['uniprot'][0]
# add random noise which is 5 percent of the signal
noiseSigma = 0.05 * solution.fluxes[rxnId]/k;
noise = noiseSigma*np.random.randn();
proteomics[protein_id] = abs((solution.fluxes[rxnId]/k) + noise)
# create transcriptomics dict
noiseSigma = 0.05 * proteomics[protein_id]/q;
noise = noiseSigma*np.random.randn();
transcriptomics[gene.id] = abs((proteomics[protein_id]/q) + noise)
return proteomics, transcriptomics
def get_metabolomics_data(model, solution, mapping_file):
"""
:param model:
:param condition:
:return:
"""
metabolomics = {}
metabolomics_with_old_ids = {}
# get metabolites
# read the inchikey to pubchem ids mapping file
inchikey_to_cid = {}
inchikey_to_cid = read_pubchem_id_file(mapping_file)
# create the stoichoimetry matrix fomr the model as a Dataframe and convert all the values to absolute values
sm = create_stoichiometric_matrix(model, array_type='DataFrame')
# get all the fluxes across reactions from the solution
fluxes = solution.fluxes
# calculating the dot product of the stoichiometry matrix and the fluxes to calculate the net change
# in concentration of the metabolites across reactions
net_change_in_concentrations = sm.abs().dot(fluxes.abs())
#net_change_in_concentrations = net_change_in_concentrations.abs()
# converting all na values to zeroes and counting the total number of changes that happens for each metabolite
num_changes_in_metabolites = sm.fillna(0).astype(bool).sum(axis=1)
for met_id, conc in net_change_in_concentrations.items():
metabolite = model.metabolites.get_by_id(met_id)
# if there is an inchikey ID for the metabolite
if 'inchi_key' in metabolite.annotation:
# if it is a list get the first element
if type(metabolite.annotation['inchi_key']) is list:
inchi_key = metabolite.annotation['inchi_key'][0]
else:
inchi_key = metabolite.annotation['inchi_key']
if inchi_key in inchikey_to_cid.keys():
# if the CID is not in the metabolomics dict keys AND the mapped value is not None and the reactions flux is not 0
if (inchikey_to_cid[inchi_key] not in metabolomics.keys()) and (inchikey_to_cid[inchi_key] is not None):
metabolomics[inchikey_to_cid[inchi_key]] = conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
metabolomics_with_old_ids[met_id] = conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
elif (inchikey_to_cid[inchi_key] is not None):
metabolomics[inchikey_to_cid[inchi_key]] += conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
metabolomics_with_old_ids[met_id] = conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
return metabolomics, metabolomics_with_old_ids
def get_multiomics(model, solution, mapping_file, old_ids=False):
"""
:param model: cobra model object
:param solution: solution for the model optimization using cobra
:param data_type: defines the type of -omics data to generate (all by default)
:return:
"""
proteomics = {}
transcriptomics = {}
fluxomics = {}
metabolomics = {}
proteomics, transcriptomics = get_proteomics_transcriptomics_data(model, solution)
metabolomics, metabolomics_with_old_ids = get_metabolomics_data(model, solution, mapping_file)
if old_ids:
return (proteomics, transcriptomics, metabolomics, metabolomics_with_old_ids)
else:
return (proteomics, transcriptomics, metabolomics)
def read_pubchem_id_file(mapping_file):
inchikey_to_cid = {}
with open(mapping_file, 'r') as fh:
try:
line = fh.readline()
while line:
# checking to ignore inchikey records with no cid mappings
if (len(line.split()) > 1):
inchikey_to_cid[line.split()[0]] = 'CID:'+line.split()[1]
else:
inchikey_to_cid[line.strip()] = None
line = fh.readline()
# NOTE: propagated exception, raise
except Exception as ex:
print("Error in reading file!")
print(ex)
return inchikey_to_cid
def write_experiment_description_file(output_file_path, line_name='WT', label=''):
# HARD CODED ONLY FOR WILD TYPE!
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# create the filename
experiment_description_file_name = f'{output_file_path}/EDD_experiment_description_file{label}.csv'
#write experiment description file
try:
with open(experiment_description_file_name, 'w') as fh:
fh.write(f'Line Name, Line Description, Part ID, Media, Shaking Speed, Starting OD, Culture Volume, Flask Volume, Growth Temperature, Replicate Count\n')
if line_name == 'WT':
line_descr = 'Wild type E. coli'
part_id = 'ABFPUB_000310'
else:
line_descr = ''
part_id = 'ABFPUB_000310' #THIS SHOULD BE CHANGED!
fh.write(f"{line_name}, {line_descr}, {part_id}, M9, 1, 0.1, 50, 200, 30, 1\n")
except Exception as ex:
print("Error in writing file!")
print(ex)
def write_in_al_format(time_series_omics_data, omics_type, user_params, label=''):
try:
output_file_path = user_params['al_omics_file_path']
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
for timepoint, omics_dict in time_series_omics_data.items():
al_file_name = f'{output_file_path}/AL_{omics_type}_{timepoint}_hrs{label}.csv'
with open(al_file_name, 'w') as ofh:
dataframe = pd.DataFrame.from_dict(omics_dict, orient='index', columns=[f'{omics_type}_value'])
for index, series in dataframe.iteritems():
for id, value in series.iteritems():
ofh.write(f'{id},{value}\n')
except:
print('Error in writing in Arrowland format')
def write_in_edd_format(time_series_omics_data, omics_type, user_params, line_name, label=''):
# Dictionary to map omics type with the units of measurement
unit_dict = { "fluxomics": 'mmol/gdwh',\
"proteomics": 'proteins/cell',\
"transcriptomics": "FPKM",\
"metabolomics": "mM"
}
# write in EDD format
output_file_path = user_params['edd_omics_file_path']
# create the filenames
omics_file_name: str = f'{output_file_path}/EDD_{omics_type}{label}.csv'
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# open a file to write omics data for each type and for all timepoints and constraints
try:
with open(omics_file_name, 'w') as fh:
fh.write(f'Line Name,Measurement Type,Time,Value,Units\n')
for timepoint, omics_dict in time_series_omics_data.items():
dataframe = pd.DataFrame.from_dict(omics_dict, orient='index', columns=[f'{omics_type}_value'])
for index, series in dataframe.iteritems():
for id, value in series.iteritems():
fh.write((f'{line_name},{id},{timepoint},{value},{unit_dict[omics_type]}\n'))
except Exception as ex:
print("Error in writing file!")
print(ex)
def write_omics_files(time_series_omics_data, omics_type, user_params, line_name='WT', al_format=False, label=''):
"""
:param dataframe:
:param data_type:
:return:
"""
# check which format we have to create the data in
if not al_format:
# write the omics files in EDD format by separating in terms of the timepoints
write_in_edd_format(time_series_omics_data, omics_type, user_params, line_name, label=label)
else:
# write the omics files in ARROWLAND format by separating in terms of the timepoints
write_in_al_format(time_series_omics_data, omics_type, user_params, label=label)
def write_OD_data(cell, output_file_path, line_name='WT', label=''):
# create the filename
OD_data_file: str = f'{output_file_path}/EDD_OD{label}.csv'
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# write experiment description file
try:
with open(OD_data_file, 'w') as fh:
fh.write(f'Line Name,Measurement Type,Time,Value,Units\n')
for index, value in cell.items():
fh.write((f'{line_name},Optical Density,{index},{value},n/a\n'))
except Exception as ex:
print("Error in writing OD file")
print(ex)
def write_training_data_with_isopentenol(df, filename):
filename = f'{OUTPUT_FILE_PATH}/{filename}'
df.to_csv(filename, header=True, index=False)
def write_external_metabolite(substrates, output_file_path, line_name='WT', label=''):
# create the filename
external_metabolites: str = f'{output_file_path}/EDD_external_metabolites{label}.csv'
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# Table for metabolites to be exported
glucose = substrates.loc[:, 'glc__D_e']
ammonium = substrates.loc[:, 'nh4_e']
isopentenol = substrates.loc[:, 'isoprenol_e']
acetate = substrates.loc[:, 'ac_e']
formate = substrates.loc[:, 'for_e']
lactate = substrates.loc[:, 'lac__D_e']
ethanol = substrates.loc[:, 'etoh_e']
# output_metabolites = {
# "5793": glucose, "16741146": ammonium, "12988": isopentenol, "175": acetate, "283": formate, "612": #lactate, "702": ethanol}
output_metabolites = {
"5793": glucose, "12988": isopentenol, "175": acetate, "283": formate, "612": lactate, "702": ethanol}
# Write file lines
try:
with open(external_metabolites,'w') as fh:
# Top header
fh.write(f'Line Name,Measurement Type,Time,Value,Units\n')
# Metabolite lines
for cid in output_metabolites:
met = output_metabolites[cid]
for index,value in met.items():
fh.write((f'{line_name},CID:{cid},{index},{value},mM\n'))
except Exception as ex:
print("Error in writing OD file")
print(ex)
def get_random_number():
"""
:return:
"""
random.seed(12312)
return random.random()
def add_random_noise():
"""
:return:
"""
pass
def get_list_of_reactions(file_name):
"""
:param file_name: Name of the model file (has to be xml for now)
:return: None (prints the list of reactions that has mass in them)
"""
# Load model¶depending on the kind of file (the file has to be xml)
if file_name.endswith(".xml"):
model = cobra.io.read_sbml_model(file_name)
# Print out the reaction name and reaction id for all reactions related to BIOMASS production:
print("List of reactions related to BIOMASS production:")
for rxn in model.reactions:
if rxn.name is not None and 'BIOMASS' in rxn.id:
print("{}: {}".format(rxn.id, rxn.name))
def get_optimized_solution(model, reaction_id):
"""
:param model:
:param reaction_id:
:return solution:
"""
# fix the flux value to -15 as we have data for this constraint
model.reactions.get_by_id(reaction_id).lower_bound = self.LOWER_BOUND
model.reactions.get_by_id(reaction_id).upper_bound = self.UPPER_BOUND
# print(model.reactions.get_by_id(reaction_id))
print("Displaying the reaction bounds after constraining them:")
print(model.reactions.get_by_id(reaction_id).bounds)
# optimizing the model for only the selected reaction
# model.slim_optimize()
# optimizing model
solution = model.optimize()
return solution
def read_model(file_name):
"""
:param file_name:
:return model:
"""
# Load model¶depending on the kind of file
if file_name.endswith(".xml"):
model = cobra.io.read_sbml_model(file_name)
elif file_name.endswith(".json"):
model = cobra.io.load_json_model(file_name)
return model
def model_has_IPP_pathway(model):
'''
We check if the model has the following reactions if so then it has the isopentenol pathway
['HMGCOAS','HMGCOAR','MEVK1','PMD','IPMPP','IPtrpp','IPtex','EX_isoprenol_e']
'''
reaction_list = ['HMGCOAS','HMGCOAR','MEVK1','PMD','IPMPP','IPtrpp','IPtex','EX_isoprenol_e']
model_reactions = [r.id for r in model.reactions]
for reac in reaction_list:
if reac not in model_reactions:
return False
return True
def add_isopentenol_pathway(model, sce):
'''
Add isopentenol pathway by taking it from the model instance of S. cerevisiae,
we used the iMM904.json model
'''
# Load S. cerevisiae model
# sce = cobra.io.load_json_model(f'data/{cerevisiae_modelfile}')
# Add mevalonate pathway reactions from S. cerevisiae model
for x in ['HMGCOAS','HMGCOAR','MEVK1','DPMVD']:
r = sce.reactions.get_by_id(x).copy()
r.gene_reaction_rule = ''
model.add_reaction(r)
# Update gene names
model.reactions.get_by_id('HMGCOAS').gene_reaction_rule = 'HMGS'
model.reactions.get_by_id('HMGCOAR').gene_reaction_rule = 'HMGR'
model.reactions.get_by_id('MEVK1').gene_reaction_rule = 'MK'
model.reactions.get_by_id('DPMVD').gene_reaction_rule = 'PMD'
# Add IP to model
m = model.metabolites.ipdp_c.copy()
m.id = 'ipmp_c'
m.name = 'Isopentenyl monophosphate'
m.formula = 'C5H9O4P'
m.charge = -2
model.add_metabolites([m])
# Update PMD reaction to convert mev-5p to IP
model.reactions.get_by_id('DPMVD').id = 'PMD'
model.reactions.get_by_id('PMD').add_metabolites({'5dpmev_c': 1.0, '5pmev_c': -1.0,
'ipdp_c': -1.0, 'ipmp_c': 1.0})
# Add isoprenol (isopentenol)
m = model.metabolites.ipmp_c.copy()
m.id = 'isoprenol_c'
m.name = 'Isopentenol'
m.formula = 'C5H10O'
m.charge = 0
model.add_metabolites([m])
# Add phosphatase reaction by AphA
r = model.reactions.CHLabcpp.copy()
r.id = 'IPMPP'
r.name = 'Isopentenyl monophosphate phosphatase'
r.gene_reaction_rule = 'AphA'
model.add_reactions([r])
r.add_metabolites({'chol_p': 1.0, 'atp_c': 1.0, 'chol_c': -1.0, 'adp_c': -1.0, 'h_c': -1.0, 'ipmp_c': -1.0, 'isoprenol_c': 1.0})
# Add periplasmic and extracellular isoprenol
m = model.metabolites.isoprenol_c.copy()
m.id = 'isoprenol_p'
m.compartment = 'p'
model.add_metabolites([m])
m = model.metabolites.isoprenol_c.copy()
m.id = 'isoprenol_e'
m.compartment = 'e'
model.add_metabolites([m])
# Add periplasmic and extracellular transport reactions
r = model.reactions.ETOHtrpp.copy()
r.id = 'IPtrpp'
r.name = 'Isopentenol reversible transport via diffusion (periplasm)'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_p': 1.0, 'etoh_c': -1.0, 'isoprenol_p': -1.0, 'isoprenol_c': 1.0})
r = model.reactions.ETOHtex.copy()
r.id = 'IPtex'
r.name = 'Isopentenol transport via diffusion (extracellular to periplasm)'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_e': 1.0, 'etoh_p': -1.0, 'isoprenol_e': -1.0, 'isoprenol_p': 1.0})
# Add a boundary reaction
r = model.reactions.EX_etoh_e.copy()
r.id = 'EX_isoprenol_e'
r.name = 'Isopentenol exchange'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_e': 1.0, 'isoprenol_e': -1.0})
# Write model to files
outputfilename = user_params['modelfile'].split('.')[0] + '_IPP.json'
cobra.io.save_json_model(model, f'data/{outputfilename}')
return model
#=============================================================================
class Ropacus():
def __init__(self):
self.time_series_omics_data = {}
self.LOWER_BOUND = -15
self.UPPER_BOUND = -15
def generate_time_series_data(self, model):
# intiializing omics dictionaries to contain data across timepoints
proteomics_list: List = []
transcriptomics_list: List = []
fluxomics_list: List = []
metabolomics_list: List = []
# generating time series data for the following flux constraints
# 6, 9, 12, 15 corresponding to the times 0, 3, 6, 9 hours
# NOTE: The constraints and the timepoints should be supplied as command line inputs
time_series_omics_data = {}
experiment_timepoints = [0, 3, 6, 9]
flux_constraints = [6, 9, 12, 15]
# NOTE; constraints in flux_constraints, think about it
for i in range(len(flux_constraints)):
# Set global reactions bounds (in addition to local)
self.LOWER_BOUND = flux_constraints[i]
self.UPPER_BOUND = flux_constraints[i]
cobra_config = cobra.Configuration()
cobra_config.bounds = self.LOWER_BOUND, self.UPPER_BOUND
# Print the list of reaction names related to BIOMASS production
self.print_reactions(model)
# get fake proteomics data and write it to XLSX file
condition = 1
self.generate_mock_data(model, condition)
def add_random_noise(self):
# TODO
"""
:return:
"""
pass
def chemical_translation(self, dict_in: Dict[str, Any],
fmt_from: str = 'KEGG',
fmt_to: str = 'PubChem CID') -> Dict[str, Any]:
"""
Proxy to UCDavis Chemical Translation Service (CTS). Maps the keys of
the input dictionary keeping intact the values.
Default behaviour: map KEGG Compounds into PubChem CIDs
For details, see https://cts.fiehnlab.ucdavis.edu/services
"""
dict_out: Dict[str, float] = {}
print(gray('Mapping metabolites ids using CTS'), end='', flush=True)
ids_in: List[str] = list(dict_in.keys())
pattern = re.compile(
r"""(?:"searchTerm":")(\w+)(?:","results":\[")(\w+)(?:"])""")
for id_in in ids_in:
mapping_str: str = f'{fmt_from}/{fmt_to}/{id_in}'
mapping_data = urllib.parse.quote(mapping_str)
mapping_req = urllib.request.Request(CTS_URL + mapping_data)
with urllib.request.urlopen(mapping_req) as map_file:
mapping = map_file.read().strip().decode('utf-8')
match: re.Match = pattern.search(mapping)
if match:
assert match.group(1) == id_in
id_out: str = match.group(2)
if fmt_to == 'PubChem CID':
id_out = 'CID:' + id_out
dict_out[id_out] = dict_in[id_in]
print(green('.'), end='', flush=True)
dprint(f'Metabolite {id_in} mapped to {id_out}')
else:
print(red('.'), end='', flush=True)
dprint(yellow(f'Metabolite {id_in} mapping failed!'))
print(green('OK!'))
self.vprint(gray('Number of unmapped genes from'), fmt_from, gray('to'),
fmt_to, gray(':'), yellow(len(dict_in) - len(dict_out)))
return dict_out
def dict_to_edd(self, omics_dict: Dict[str, float],
omics: Omics) -> pd.DataFrame:
"""Get dataframe with EDD format from dictionary with omics values"""
edd: List[OrderedDict[str, Any]] = []
sample: OrderedDict[str, Any]
for measurement, value in omics_dict.items():
sample = col.OrderedDict([
('Line Name', 'WT'),
('Measurement Type', measurement),
('Time', 0), # TODO: Generalize for time-series
('Value', value),
('Units', UNITS[omics])
])
edd.append(sample)
return pd.DataFrame(edd)
def dprint(self, *a, **k):
"""Print only if debug mode is enabled"""
if args.debug:
print(*a, **k)
def generate_mock_data(self, model, cond):
"""
:param model: cobra model object
:param solution: solution for the model optimization using cobra
:param data_type: defines the type of -omics data to generate (all by default)
:return:
"""
while cond:
print(gray('Condition parameter:'), magenta(cond))
cond -= 1
self.optimize_solution(model, REACTION_ID)
solution: cobra.Solution = cobra.core.solution.get_solution(
model, raise_error=False)
self.vprint(gray('Solution objective value:'), solution.objective_value)
self.vprint(gray('Model summary after optimization:'))
try:
self.vprint(model.summary())
# self.vprint(model.metabolites.C00185_e.summary())
except Infeasible:
self.vprint(yellow(
'Model summary unavailable as solution was unfeasible!'))
# exit code here
self.write_experiment_description(cond)
self.get_omics_data(model, solution, cond)
def gene_to_protein(self, dict_in: Dict[str, Any],
fmt_gene: str = 'KEGG_ID',
fmt_prot: str = 'ID') -> Dict[str, Any]:
"""
From any dict whose keys are gene IDs, maps them to protein IDs and
keeps the value intact
Default behaviour: map KEGG IDs into UNIPROT IDs
For details, see https://www.uniprot.org/help/api_idmapping
"""
dict_out: Dict[str, float] = {}
print(gray('Mapping genes into proteins using UNIPROT... '), end='')
gene_ids: List[str] = list(dict_in.keys())
mapping_params: Dict[str, str] = {
'from': fmt_gene,
'to': fmt_prot,
'format': 'tab',
'query': '\t'.join(gene_ids)
}
mapping_data = urllib.parse.urlencode(mapping_params)
mapping_data = mapping_data.encode('utf-8')
mapping_req = urllib.request.Request(UNIPROT_URL, mapping_data)
with urllib.request.urlopen(mapping_req) as map_file:
mapping = map_file.read().strip().decode('utf-8').split('\n')
for gene2prot in mapping[1:]:
gene, prot = gene2prot.split('\t', 1)
dict_out[prot] = dict_in[gene]
dprint('Gene', gene, 'mapped to protein', prot)
if dict_out:
print(green('OK!'))
self.vprint(gray('Number of unmapped genes from'), fmt_gene, gray('to'),
fmt_prot, gray(':'), yellow(len(dict_in) - len(dict_out)))
else:
print(yellow('PROBLEM!'))
return dict_out
# NOTE: Name it consistently , generate_omics_data
def get_omics_data(self, model: cobra.Model,
solution: cobra.Solution,
cond: int):
"""
Core method that generates all omics data.
:param model:
:param solution:
:param cond:
:return:
"""
# Pre-determined linear constants
PROTE_SCALING: float = 10 # Scaling factor for fluxes to proteomics
TRANS_SCALING: float = 1.2 # S.F. for proteomics to transcriptomics
# TODO: Allow user to set those constants via parameters
# The omics variable name should coincide with those elements of Omics
proteomics: Dict[str, float] = {}
transcriptomics: Dict[str, float] = {}
metabolomics: Dict[str, float] = {}
# Get values and statistics for proteomics and transcriptomics
proteo_stats: Dict[str, Counter[str]] = {
db + status: col.Counter() for db in GENE_IDS_DBS
for status in ['_missing', '_success', '_zero']}
metabolite_awflux: Dict[str, List[float]] = {} # abs weighted fluxes
rxn_ids: pd.Index = solution.fluxes.index
# Cobra docs: Accessing reaction fluxes through a Solution object
# is the safer, preferred, and only guaranteed to be correct way.
# NOTE: Put the operations in fucntions , more modular
for rxn_id in rxn_ids:
reaction: cobra.Reaction = model.reactions.get_by_id(rxn_id)
flux: float = solution.fluxes[rxn_id]
gene: cobra.Gene
# Subloop 1/2: proteomics and transcriptomics
for gene in reaction.genes:
gene_id: str = ''
# WARNING! Based on gene.annotation property populated
gene_id_db: str = ''
for gene_id_db in GENE_IDS_DBS:
try:
gene_id = gene.annotation[gene_id_db]
except KeyError:
proteo_stats[gene_id_db + '_missing'][gene_id] += 1
else:
# Populates proteomics and transcriptomics dicts if
# related flux has a positive value
proteo: int = np.ceil(flux * PROTE_SCALING)
if proteo > _EPS:
# Accumulate in case of multiple genes
try:
proteomics[gene_id] += proteo
except KeyError:
proteomics[gene_id] = proteo
proteo_stats[gene_id_db + '_success'][gene_id] += 1
else:
proteo_stats[gene_id_db + '_zero'][gene_id] += 1
transc: float = proteo * TRANS_SCALING
if transc > _EPS * 1e+3:
transcriptomics[gene.id] = transc
break
else:
self.dprint(yellow('WARNING!'), gray('Gene'), gene.id,
gray('in reaction'), rxn_id,
gray('has no useful annotation. Skipping...'))
# Subloop 2/2: metabolomics (partial)
for metabolite, coeff in reaction.metabolites.items():
awflux: float = abs(coeff * flux) # absolute weighted flux
if awflux < _EPS:
continue
metabolite_id: str = metabolite.id.rsplit(
sep='_', maxsplit=1)[0] # Remove suffixes _c, _e, etc
try:
metabolite_awflux[metabolite_id].append(awflux)
except KeyError:
metabolite_awflux[metabolite_id] = [awflux]
# Metabolomics (final)
# Alt: to avoid this loop use a moving average in the subloop above
for metabolite, awfluxes in metabolite_awflux.items():
metabolomics[metabolite] = statistics.mean(awfluxes)
self.vprint(gray('Number of active metabolites:'), len(metabolomics))
dprint(gray('Number of fluxes related to each gene (top 10)'))
for gene_id_db in GENE_IDS_DBS:
for status in ['_missing', '_success', '_zero']:
self.dprint(gene_id_db + status, proteo_stats[
gene_id_db + status].most_common(10))
# Map genes ids into protein ids accepted by EDD
proteomics = self.gene_to_protein(proteomics)
# Map metabolites ids into those accepted by EDD
metabolomics = self.chemical_translation(metabolomics)
# Write omics files
for omic in Omics: # NOTE: omics variable names are elements of Omics
omics_df: pd.DataFrame = self.dict_to_edd(eval(str(omic)), omic)
self.write_data_files(omics_df, omic, cond)
def get_random_number(self):
"""
:return:
"""
random.seed(12312)
return random.random()
def optimize_solution(self, model: cobra.Model, reaction_id: str) -> None:
"""
:param model:
:param reaction_id:
:return solution:
"""
reaction: cobra.Reaction = model.reactions.get_by_id(reaction_id)
self.vprint(gray('Reaction:'), reaction)
if args.debug:
print(blue('List of reactants:'))
for reactant in reaction.reactants:
print(reactant, reactant.name)
print(blue('List of products:'))
for product in reaction.products:
print(product, product.name)
# Set local reaction bounds
model.reactions.get_by_id(reaction_id).lower_bound = LOWER_BOUND
model.reactions.get_by_id(reaction_id).upper_bound = UPPER_BOUND
self.vprint(gray('Displaying the reaction bounds after constraining them:'),
blue(model.reactions.get_by_id(reaction_id).bounds))
# Optimize the model using FBA
print(gray('Optimizing the model using FBA... '), end='')
model.slim_optimize()
try:
cobra.util.assert_optimal(model)
except OptimizationError as error:
print(yellow('PROBLEM!'), error)
else:
print(green('OK!'))
def read_model(self, file_name):
"""
:param file_name:
:return model:
"""
# Check presence of model file
if not os.path.isfile(file_name):
# NOTE: The error handling not consistent and will be dominated by the stack trace
print(red('ERROR!'),
f'File {file_name} missing from the data dir!')
raise IOError('Missing file')
# Load model depending on the kind of file
self.vprint(gray(f'Loading model in {file_name}... '), end='')
if file_name.endswith('.xml') or file_name.endswith('.sbml'):
model = cobra.io.read_sbml_model(file_name)
elif file_name.endswith('.json'):
model = cobra.io.load_json_model(file_name)
else:
# NOTE: stacktrace issue
print(red('ERROR!'),
f'File {file_name} type not supported!')
raise TypeError('Unsupported file format')
self.vprint(green('OK!'))
return model
def print_reactions(self, model):
"""
:param model:
:return: None (prints the list of reactions that have BIOMASS in them)
"""
# Print out the reaction name and reaction id for all reactions
# related to BIOMASS production:
self.vprint(gray('List of reactions related to BIOMASS production:'))
for rxn in model.reactions:
if rxn.name is not None and 'biomass' in rxn.id.lower():
self.vprint(f"{rxn.id} : {rxn.name}")
# NOTE: pass everything to asingle print function and add the verbosity arg layer there
def vprint(self, *a, **k):
"""Print only if verbose mode is enabled"""
if args.verbose:
print(*a, **k)
def write_data_files(self, edd: pd.DataFrame, omics: Omics = None,
cond: int = 1) -> None:
"""
Write the EDD dataframe into a xlsx file
:param edd:
:param omics:
:param cond:
:return:
"""
omics_fname: Filename = Filename(
os.path.join(DATA_FILE_PATH,
f'{omics}_mock{cond}.xlsx'))
print(gray('Saving file'), magenta(omics_fname), gray('... '), end='')
try:
# NOTE: Both excel and CSV for both classes and make this method a part of the core class IMPORTANT!!!
edd.to_excel(omics_fname,
sheet_name=f'{omics}',
index=False)
# NOTE: Handle this error better. Handle errors so that you can make this into a library and propagate the errors for better handling
except IOError as ex:
print(red('ERROR!'))
self.vprint(ex)
else:
print(green('OK!'))
def write_experiment_description(self, cond=1):
"""
:param cond:
:return:
"""
exp_desc_fname: Filename = Filename(
os.path.join(
DATA_FILE_PATH,
f'EDD_Omics_Experiment_Description_mock{cond}.xlsx'))
index_label = 'Line Name'
exp_desc_cols = pd.Index([
'Line Description',
'Media',
'Shaking speed',
'Starting OD',
'Culture Volume',
'Flask Volume',
'Growth Temperature',
'Replicate Count',
], name=index_label)
metadata_wt: Dict[str, Dict[str, Any]] = {'WT': {
'Line Description': 'R. Opacus PD630 wild type (mock)',
'Media': 'Mock media',
'Shaking speed': 1.0,
'Starting OD': 0.1,
'Culture Volume': 50.0,
'Flask Volume': 200.0,
'Growth Temperature': 30.0,
'Replicate Count': 1,
}}
exp_desc_df = pd.DataFrame.from_dict(metadata_wt,
orient='index',
columns=exp_desc_cols)
print(gray('Saving file'), magenta(exp_desc_fname),
gray('... '), end='')
try:
exp_desc_df.to_excel(exp_desc_fname,
sheet_name='EXP_DESC',
index_label=index_label)
except IOError as ex:
print(red('ERROR!'))
self.vprint(ex)
else:
print(green('OK!'))
#======================================
# MAIN FUNCTION
#======================================
def check_debug(args):
"""Check debugging mode"""
if args.debug:
print(blue('INFO:'), gray('Debugging mode activated'))
print(blue('INFO:'), gray('Active parameters:'))
for key, val in vars(args).items():
if val is not None and val is not False and val != []:
print(gray(f'\t{key} ='), f'{val}')
args.verbose = True # Unconditional verbose mode activation
elif not sys.warnoptions:
warnings.simplefilter("ignore")
def generate_data_for_host(filename):
global HOST_NAME
global DATA_FILE_PATH
global OUTPUT_FILE_PATH
# if data folder doesn't exist create it
if not os.path.isdir(DATA_FILE_PATH):
os.mkdir(DATA_FILE_PATH)
if not os.path.isdir(OUTPUT_FILE_PATH):
os.mkdir(OUTPUT_FILE_PATH)
# copy the training file to the data folder
src_file = f'{TRAINING_FILE_PATH}/{TRAINING_FILE_NAME}'
dest_file = f'{DATA_FILE_PATH}/{TRAINING_FILE_NAME}'
dest = copyfile(src_file, dest_file)
MODEL_FILEPATH
src_file = f'{MODEL_FILEPATH}/{MODEL_FILENAME}'
dest_file = f'{DATA_FILE_PATH}/{MODEL_FILENAME}'
dest = copyfile(src_file, dest_file)
"""
Generate omics data for host and model name
"""
if HOST_NAME == 'ecoli':
# create instance of the E. Coli class
ecoli = Ecoli()
# read model file
model = ecoli.read_model(filename)
# generate ecoli synthetic data for model and condition
condition = 1
ecoli.generate_time_series_data(model, condition)
elif HOST_NAME == 'ropacus':
# create instance of the E. Coli class
rop = Ropacus()
# read model file
model = rop.read_model(filename)
# generate time series mock data for host
rop.generate_time_series_data(model)
def main():
"""Main entry point to the script."""
global REACTION_ID_ECOLI
global DATA_FILE_PATH
global HOST_NAME
global MODEL_FILENAME
global MODEL_FILEPATH
global TIMESTART
global TIMESTOP
global NUMPOINTS
global TRAINING_FILE_NAME
global TRAINING_FILE_PATH
global INITIAL_OD
# Argument Parser Configuration
parser = argparse.ArgumentParser(
description='Omics Mock Generator',
epilog='%(prog)s -- {}'.format(__date__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-d', '--debug',
action='store_true',
help='enable debug mode (implies verbose mode)'
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='enable verbose mode'
)
parser.add_argument(
'-V', '--version',
action='version',
version='%(prog)s release {} ({})'.format(__version__, __date__)
)
# parser.add_argument(
# '-ho', '--host',
# default='ropacus',
# help='specify host organism'
# )
# parser.add_argument(
# '-mf', '--modelfile',
# default='reannotated_base_v3.sbml',
# help='specify model file to use, should be in data folder'
# )
parser.add_argument(
'-ho', '--host',
default='ecoli',
help='specify host organism'
)
parser.add_argument(
'-mf', '--modelfile',
# default='iJO1366_MVA.json',
default='iJO1366_MVA.json',
help='specify model file to use, should be in data folder'
)
parser.add_argument(
'-mfp', '--modelfilepath',
# default='iJO1366_MVA.json',
default='sample_files',
help='specify model file path to use'
)
parser.add_argument(
'-tstart', '--timestart',
default=0.0,
help='specify the start time for generating the time series data'
)
parser.add_argument(
'-tstop', '--timestop',
default=9.0,
help='specify the stop time for generating the time series data'
)
parser.add_argument(
'-np', '--numpoints',
default=9,
help='specify the number of points between timestart and timestop for which to generate the time serTRAINING_FILE_PATHies data'
)
parser.add_argument(
'-tf', '--trainingfile',
default='training_data_8genes.csv',
help='specify the training file name'
)
parser.add_argument(
'-tfp', '--trainingfilepath',
default='sample_files',
help='specify the training file path name'
)
parser.add_argument(
'-nr', '--numreactions',
default=1,
help='specify the number of reactions in the training file'
)
parser.add_argument(
'-ni', '--numinstances',
default=1,
help='specify the number of instances/strains in the training file'
)
# user_params = {
# 'host': 'ecoli', # ecoli or ropacus
# 'modelfile': 'iJO1366_MVA.json',
# 'timestart': 0.0,
# 'timestop': 8.0,
# 'numpoints': 9,
# 'reactants': ['glc__D_e', 'nh4_e', 'pi_e', 'so4_e', 'mg2_e', 'k_e', 'na1_e', 'cl_e'],
# 'initial_substrates': [22.203, 18.695, 69.454, 2.0, 2.0, 21.883, 103.7, 27.25],
# }
# Parse arguments
args = parser.parse_args()
# Program header
print('\n=-= {} =-= v{} - {} =-= by {} =-=\n'.format(
sys.argv[0], __version__, __date__, __author__))
# Select cases depending on the debug flag
check_debug(args)
# check if host and model file has been mentioned
HOST_NAME = args.host
MODEL_FILEPATH = args.modelfilepath
MODEL_FILENAME = args.modelfile
TIMESTART = args.timestart
TIMESTOP = args.timestop
NUMPOINTS = args.numpoints
TRAINING_FILE_NAME = args.trainingfile
TRAINING_FILE_PATH = args.trainingfilepath
NUM_REACTIONS = args.numreactions
NUM_INSTANCES = args.numinstances
INITIAL_OD = args.initialod
filename: Filename = Filename(f'{MODEL_FILEPATH}/{MODEL_FILENAME}')
# get time series omics data for specified host and model
generate_data_for_host(filename)
if __name__ == "__main__":
# TODO: Ask for filename and reaction name and then generate the mock data
main()
|
{"hexsha": "b4899439dac98fbdd0e9a91d0e9d4b1d91f6f4d9", "size": 52007, "ext": "py", "lang": "Python", "max_stars_repo_path": "omg.py", "max_stars_repo_name": "somtirtharoy/generate_test_omics_data", "max_stars_repo_head_hexsha": "efc22faaf98faf92f2a96e4a2dc93f4204015044", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-17T10:21:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T10:10:30.000Z", "max_issues_repo_path": "omg.py", "max_issues_repo_name": "somtirtharoy/generate_test_omics_data", "max_issues_repo_head_hexsha": "efc22faaf98faf92f2a96e4a2dc93f4204015044", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "omg.py", "max_forks_repo_name": "somtirtharoy/generate_test_omics_data", "max_forks_repo_head_hexsha": "efc22faaf98faf92f2a96e4a2dc93f4204015044", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-11T20:30:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-11T20:30:09.000Z", "avg_line_length": 37.6316931983, "max_line_length": 165, "alphanum_fraction": 0.609744842, "include": true, "reason": "import numpy", "num_tokens": 12584}
|
#include <iostream>
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
void expireCallback(const boost::system::error_code& /*e*/) {
std::cout << "Hello from expireCallback" << std::endl;
}
void printCallback(const boost::system::error_code& /*e*/,
boost::asio::deadline_timer* t, int* count) {
// Only for 5 ticks!
if (*count < 5) {
std::cout << "[printCallback] " << *count << std::endl;
++(*count);
t->expires_at(t->expires_at() + boost::posix_time::seconds(1));
t->async_wait(boost::bind(printCallback,
boost::asio::placeholders::error,
t,
count));
} else {
t->async_wait(&expireCallback);
}
}
class Printer {
public:
Printer(boost::asio::io_service& io)
: timer_(io, boost::posix_time::seconds(1)),
count_(0) {
timer_.async_wait(boost::bind(&Printer::onTick, this));
}
~Printer() {
std::cout << "[Printer:" << this << "] final count = " << count_ << std::endl;
}
void onTick() {
if (count_ < 5) {
std::cout << "[Printer::onTick] " << count_ << std::endl;
++count_;
timer_.expires_at(timer_.expires_at() + boost::posix_time::seconds(1));
timer_.async_wait(boost::bind(&Printer::onTick, this));
} else {
timer_.async_wait(boost::bind(&Printer::onExpire, this));
}
}
void onExpire() {
std::cout << "[Printer::onExpire] " << count_ << std::endl;
}
private:
boost::asio::deadline_timer timer_;
int count_;
};
int main(int argc, const char *argv[])
{
boost::asio::io_service io;
Printer p(io);
io.run();
std::cout << "Hello after calling io.run!" << std::endl;
return 0;
}
|
{"hexsha": "1fe690fcec614a5045cb237bb2a5f5792db87973", "size": 1777, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Networking/testBoostAsio.cpp", "max_stars_repo_name": "drbenmorgan/codingtools", "max_stars_repo_head_hexsha": "65040e7ecc1276a8d8a74f09cd0344160130bd48", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-06-24T08:51:30.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-24T08:51:30.000Z", "max_issues_repo_path": "Networking/testBoostAsio.cpp", "max_issues_repo_name": "drbenmorgan/codingtools", "max_issues_repo_head_hexsha": "65040e7ecc1276a8d8a74f09cd0344160130bd48", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Networking/testBoostAsio.cpp", "max_forks_repo_name": "drbenmorgan/codingtools", "max_forks_repo_head_hexsha": "65040e7ecc1276a8d8a74f09cd0344160130bd48", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3857142857, "max_line_length": 82, "alphanum_fraction": 0.5779403489, "num_tokens": 478}
|
# USAGE
# python convert_winco_receipt.py --image images/page.jpg --scanned images/page_scanned.jpg
# import the necessary packages
from pyimagesearch.transform import four_point_transform
from pyimagesearch import imutils
from skimage.filters import threshold_adaptive
import numpy as np
import argparse
import cv2
from PIL import Image
import pytesseract
import re
def winco_receipt_line(line):
s = re.search(r'((I|T)F)|((I|T)x)', line)
if(s is None):
return None
TF_ind = s.start()
output_string = ', '.join((re.sub(r'\W+', '', line[0:20]), ' ',
line[TF_ind-5:TF_ind].rstrip()))
return output_string
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image to be scanned")
ap.add_argument("-s", "--scanned", required = False, default='scanned',
help = "Where to save the scanned image")
ap.add_argument("-c", "--csv", required = False,
help = "Where to save the scanned image")
ap.add_argument("-w", "--which_receipt", required = False, default='winco',
help = "Where to save the scanned image")
args = vars(ap.parse_args())
# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it
image = cv2.imread(args["image"])
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)
# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
# show the original image and the edge detected image
#print "STEP 1: Edge Detection"
#cv2.imshow("Image", image)
#cv2.imshow("Edged", edged)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# loop over the contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline) of the piece of paper
#print "STEP 2: Find contours of paper"
#cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
#cv2.imshow("Outline", image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
warped = threshold_adaptive(warped, 250, offset = 10)
warped = warped.astype("uint8") * 255
# show the original and scanned images
#print "STEP 3: Apply perspective transform"
#cv2.imshow("Original", imutils.resize(orig, height = 650))
#cv2.imshow("Scanned", imutils.resize(warped, height = 650))
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#Save transformed image
sp = args['image'].split('.')
save_filename = sp[0] + '_' + args['scanned'] + '.' + sp[1]
cv2.imwrite(save_filename, warped)
###
#Convert image to csv fie
###
# show the original and scanned images
#print "STEP 4: Convert receipt to csv file"
csv_filename = sp[0] + '.csv'
csv_file = open(csv_filename, "w")
if(args['which_receipt'] == 'winco'):
process_line = winco_receipt_line
st = pytesseract.image_to_string(Image.open(save_filename), config="-psm 6")
for cur_line in st.split('\n'):
print(cur_line)
ret = process_line(cur_line)
if(ret is None):
continue
csv_file.write(ret + '\n')
csv_file.close()
|
{"hexsha": "da0ec82becf348dde4d79e9e65fcf9f7cbf596e7", "size": 3882, "ext": "py", "lang": "Python", "max_stars_repo_path": "ReceiPy/document-scanner/scan_bkp.py", "max_stars_repo_name": "philipguedes/tocapopy", "max_stars_repo_head_hexsha": "79e58aafbb606d883d3c2b0df098ecfdecc35931", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ReceiPy/document-scanner/scan_bkp.py", "max_issues_repo_name": "philipguedes/tocapopy", "max_issues_repo_head_hexsha": "79e58aafbb606d883d3c2b0df098ecfdecc35931", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ReceiPy/document-scanner/scan_bkp.py", "max_forks_repo_name": "philipguedes/tocapopy", "max_forks_repo_head_hexsha": "79e58aafbb606d883d3c2b0df098ecfdecc35931", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.328125, "max_line_length": 91, "alphanum_fraction": 0.7117465224, "include": true, "reason": "import numpy", "num_tokens": 1066}
|
from dash import Dash, dcc, html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
from src.SideBar import sidebar
import src.SummaryTab as SummaryTab
import src.CountTab as CountTab
import src.TimesTab as TimesTab
# Load data
# Data for first plot
df_all = pd.read_csv('data/processed/all_data.csv')
fraser_all = pd.read_csv('data/processed/all_fraser.csv')
interior_all = pd.read_csv('data/processed/all_interior.csv')
northern_all = pd.read_csv('data/processed/all_northern.csv')
psha_all = pd.read_csv('data/processed/all_psha.csv')
vc_all = pd.read_csv('data/processed/all_vancouver_coastal.csv')
vi_all = pd.read_csv('data/processed/all_vancouver_island.csv')
# Data for plots by hospital and procedure
df_main = pd.read_csv('data/processed/main_data.csv')
fraser = pd.read_csv('data/processed/fraser.csv')
interior = pd.read_csv('data/processed/interior.csv')
northern = pd.read_csv('data/processed/northern.csv')
psha = pd.read_csv('data/processed/psha.csv')
vc = pd.read_csv('data/processed/vancouver_coastal.csv')
vi = pd.read_csv('data/processed/vancouver_island.csv')
# fix the year problem
def add_Y_Q(df):
df['year']= df['year'].astype('str').str[0:4].map(str)
dflist=[df_all,fraser_all,interior_all,northern_all,psha_all,vc_all,vi_all,df_main,fraser,interior,northern,psha,vc,vi]
for item in dflist:
add_Y_Q(item)
# dataframe selection function:
def region_df(region="All",alldata=False):
"""
Get the corresponding dataframe though a passed region value.
Take the output region from the dropdown selection and return the corresponding dataframe.
If the dataframe is going to be used in the line plot which show the total from a certain region,
then alldata should be True; otherwise alldata is default to be False to detailed data to be used
in the bar plots.
Parameters
----------
region : str
The string of the region name.
alldata: Boolean
If the data is the total data from certain region.
Returns
-------
dataframe
The returned dataframe.
Examples
--------
>>> region_df(region="Fraser",alldata=False)
fraser
"""
if alldata==True: #alldata: data for first plot
if region=="All":
return df_all
elif region=="Fraser":
return fraser_all
elif region=="Interior":
return interior_all
elif region=="Northern":
return northern_all
elif region=="Provincial Health Services Authority":
return psha_all
elif region=="Vancouver Coastal":
return vc_all
elif region=="Vancouver Island":
return vi_all
elif alldata==False:
if region=="All":
return df_main
elif region=="Fraser":
return fraser
elif region=="Interior":
return interior
elif region=="Northern":
return northern
elif region=="Provincial Health Services Authority":
return psha
elif region=="Vancouver Coastal":
return vc
elif region=="Vancouver Island":
return vi
else:
return None
# Declare dash app
app = Dash(
__name__,
external_stylesheets = [dbc.themes.MINTY],
title = 'BC Surgical Wait Times'
)
# app.config.suppress_callback_exceptions = True
server = app.server
# Configure Altair - uncomment to run locally, comment out for Heroku deployment
# alt.renderers.enable('mimetype')
# alt.data_transformers.enable('data_server')
# alt.data_transformers.disable_max_rows()
# Layout
app.layout = dbc.Container(
children = [
dcc.Location(id = 'url'),
sidebar,
html.Div(
id = 'page-content',
style = {
'margin-left': '25rem',
'padding': '2rem 1rem'
}
)
],
fluid = True
)
# Populate dropdown list
sidebar.children[4].children[1].options = np.append(['All'], df_all.health_authority.unique())
sidebar.children[4].children[1].value = 'All'
## Callback functions
# Navigation
@app.callback(
Output('page-content', 'children'),
Input('url', 'pathname')
)
def render_page_content(pathname):
if pathname == '/count_tab_proc':
return CountTab.proc
elif pathname == '/count_tab_hosp':
return CountTab.hosp
elif pathname == '/times_tab_proc':
return TimesTab.proc
elif pathname == '/times_tab_hosp':
return TimesTab.hosp
else:
return SummaryTab.intro
# Tabs
@app.callback(
Output('tcp1','srcDoc'),
Input('region-select', 'value'))
def update_tcp1(autho):
return CountTab.line_plot_tc(region_df(autho,alldata=True))
@app.callback(
Output('ttp1','srcDoc'),
Input('region-select', 'value'))
def update_ttp1(autho):
return TimesTab.line_plot_tt(region_df(autho, True))
@app.callback(
Output('tcp2','srcDoc'),
Input('region-select', 'value'))
def update_tcp2(autho):
return CountTab.plot_bar_sbs_procedure_tc(region_df(autho))
@app.callback(
Output('ttp2','srcDoc'),
Input('region-select', 'value'))
def update_ttp2(autho):
return TimesTab.plot_bar_sbs_procedure_tt(region_df(autho))
@app.callback(
Output('tcp3','srcDoc'),
Input('region-select', 'value'))
def update_tcp3(autho):
return CountTab.plot_bar_sbs_hospital_tc(region_df(autho))
@app.callback(
Output('ttp3','srcDoc'),
Input('region-select', 'value'))
def update_ttp3(autho):
return TimesTab.plot_bar_sbs_hospital_tt(region_df(autho))
if __name__ == '__main__':
app.run_server()
|
{"hexsha": "93254183eb6ec229e44b12d2ec2f5ed1e0920200", "size": 5676, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/app.py", "max_stars_repo_name": "ubco-mds-2021-labs/dashboard1-group-f", "max_stars_repo_head_hexsha": "9de5ffc12974ff0f2f1b0585e3c81410578ed41d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2022-02-16T22:30:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T01:24:46.000Z", "max_issues_repo_path": "src/app.py", "max_issues_repo_name": "ubco-mds-2021-labs/dashboard1-group-f", "max_issues_repo_head_hexsha": "9de5ffc12974ff0f2f1b0585e3c81410578ed41d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2022-02-16T23:02:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T20:12:36.000Z", "max_forks_repo_path": "src/app.py", "max_forks_repo_name": "ubco-mds-2021-labs/dashboard1-group-f", "max_forks_repo_head_hexsha": "9de5ffc12974ff0f2f1b0585e3c81410578ed41d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-11T01:54:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T01:54:37.000Z", "avg_line_length": 30.6810810811, "max_line_length": 119, "alphanum_fraction": 0.6723044397, "include": true, "reason": "import numpy", "num_tokens": 1367}
|
{-# OPTIONS --without-K --safe #-}
open import Relation.Binary using (Rel)
module Algebra.Structures.Field
{a ℓ} {A : Set a}
(_≈_ : Rel A ℓ)
where
open import Algebra.Linear.Core
open import Relation.Nullary using (¬_)
open import Algebra.Structures _≈_
open import Algebra.FunctionProperties _≈_
open import Level using (_⊔_)
record NonZero (0# : A) : Set (a ⊔ ℓ) where
field
value : A
non-zero : ¬ (value ≈ 0#)
MultiplicativeInverse : ∀ (0# : A) -> Set (a ⊔ ℓ)
MultiplicativeInverse 0# = NonZero 0# → NonZero 0#
record IsField (_+_ _*_ : Op₂ A) (0# 1# : A) (-_ : Op₁ A) (_⁻¹ : MultiplicativeInverse 0#) : Set (a ⊔ ℓ) where
field
isCommutativeRing : IsCommutativeRing _+_ _*_ -_ 0# 1#
_⁻¹-involutive : ∀ (x : NonZero 0#) → NonZero.value ((x ⁻¹) ⁻¹) ≈ NonZero.value x
_⁻¹-inverse : ∀ (x : NonZero 0#) → ((NonZero.value x) * (NonZero.value (x ⁻¹))) ≈ 1#
0#-not-1# : ¬ (0# ≈ 1#)
open IsCommutativeRing isCommutativeRing public
open import Algebra.Properties.Ring (record { isRing = isRing }) public
|
{"hexsha": "9de497b077e85ca77fd2819d7a9a975d7002ab1d", "size": 1053, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/Algebra/Structures/Field.agda", "max_stars_repo_name": "felko/linear-algebra", "max_stars_repo_head_hexsha": "d87c5a1eb5dd0569238272e67bce1899616b789a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2019-11-02T14:11:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-30T06:18:08.000Z", "max_issues_repo_path": "src/Algebra/Structures/Field.agda", "max_issues_repo_name": "felko/linear-algebra", "max_issues_repo_head_hexsha": "d87c5a1eb5dd0569238272e67bce1899616b789a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Algebra/Structures/Field.agda", "max_forks_repo_name": "felko/linear-algebra", "max_forks_repo_head_hexsha": "d87c5a1eb5dd0569238272e67bce1899616b789a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4594594595, "max_line_length": 110, "alphanum_fraction": 0.6353276353, "num_tokens": 386}
|
import matplotlib.pyplot as plt
import numpy as np
from comb_step_ramp import comb_step_ramp
t = np.arange(-10, 10,0.01)
'TIME SCALING BY t/2'
x=[]
comb_step_ramp(t/2,x)
plt.subplot(2,2,1)
plt.step(t,x)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.xlabel('time')
plt.ylabel('function value')
plt.title('time scaling by 1/2 function')
plt.show()
'AMPLITUDE SCALING BY 4'
y=[]
comb_step_ramp(t,y)
y[:] = [x*4 for x in y]
plt.subplot(2,2,2)
plt.step(t,y)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.xlabel('time')
plt.ylabel('function value')
plt.title('amplitude scaling by 4 function')
plt.show()
'AMPLITUDE SCALING BY -4'
z=[]
comb_step_ramp(t,z)
z[:] = [x*-4 for x in z]
plt.subplot(2,2,3)
plt.step(t,z)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
plt.xlabel('time')
plt.ylabel('function value')
plt.title('amplitude scaling by -4 function')
plt.show()
|
{"hexsha": "88c45c52fa04aa9d037be5ec798d4045fdeddebd", "size": 974, "ext": "py", "lang": "Python", "max_stars_repo_path": "Signal Processing/CODES/5th.6th.7th.py", "max_stars_repo_name": "Pavan1199/Python-Signal-Processing", "max_stars_repo_head_hexsha": "9164bb76305a149d65d32ea89abebf368a1e6d65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-03T17:19:06.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-03T17:19:06.000Z", "max_issues_repo_path": "Signal Processing/CODES/5th.6th.7th.py", "max_issues_repo_name": "Pavan1199/Python-Signal-Processing", "max_issues_repo_head_hexsha": "9164bb76305a149d65d32ea89abebf368a1e6d65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Signal Processing/CODES/5th.6th.7th.py", "max_forks_repo_name": "Pavan1199/Python-Signal-Processing", "max_forks_repo_head_hexsha": "9164bb76305a149d65d32ea89abebf368a1e6d65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.48, "max_line_length": 46, "alphanum_fraction": 0.6570841889, "include": true, "reason": "import numpy", "num_tokens": 310}
|
import numpy as np
import pandas as pd
from collections import defaultdict
import math
from sklearn.cluster import KMeans, AgglomerativeClustering
SORTED_GROUP = ['Consensus', 'BPS-C', 'BPS-NC', 'UNI', 'PPM', 'Bandit-0', 'Bandit-1']
def sort_kernel_matrix(df, feature_kernel, kmeans_based_on='label_distance'):
group_dict = df.groupby('Group').groups
groups = list(group_dict.keys())
new_ordering = [] # idx ordering
intersection = [i for i in SORTED_GROUP if i in set(groups)]
print('groups: ', intersection)
frr_seqs = np.asarray(df['RBS'])
for group in intersection:
df_group = df[df['Group'] == group]
num_seqs = len(df_group)
# num_clusters = int(num_seqs/5) + 1
# print('Group: ', group)
# print('Number of sequences: ', num_seqs)
# print('number of clusters: ', num_clusters)
idx = np.asarray(group_dict[group])
# print('idx: ', idx)
#print(label_distance[idx[0]: idx[-1], idx[0]: idx[-1]])
if kmeans_based_on == 'label_distance': # kmeans based on label distances
num_clusters = int(num_seqs/5) + 1
# print('number of clusters: ', num_clusters)
kmeans = KMeans(n_clusters = num_clusters, random_state = 0).fit(np.asarray(df['AVERAGE'])[idx].reshape(len(idx),1))
cluster_dict = defaultdict(list) # key: cluster id; value: idx list
for i, cluster_id in enumerate(kmeans.labels_):
cluster_dict[cluster_id].append(idx[i])
# print('cluster dict: ', cluster_dict)
# print('kmeans labels: ', kmeans.labels_)
elif kmeans_based_on == 'seq_distance': # kmeans based on spectrum distances
num_clusters = int(num_seqs/8) + 1
# print('number of clusters: ', num_clusters)
#kmeans = KMeans(n_clusters = num_clusters, random_state = 0).fit(phi_X[idx[0]: idx[-1] + 1, :])
cluster_dict = defaultdict(list) # key: cluster id; value: idx list
if len(idx) > 1:
model = AgglomerativeClustering(n_clusters=num_clusters)
model.fit(feature_kernel[idx])
for i, cluster_id in enumerate(model.labels_):
cluster_dict[cluster_id].append(idx[i])
# print('cluster dict: ', cluster_dict)
# print('kmeans labels: ', model.labels_)
else: # if the number of data points in one cluster is too small, just put them in one cluster
cluster_dict[0] = idx
# print('Sorting inside clusterings:')
for key, value in cluster_dict.items():
seq_list = []
for i in value:
seq_list.append(frr_seqs[i])
# print('key: ', key)
# print('seq list: ', seq_list)
argsorted_seq_list = np.argsort(seq_list)
# print('argsorted seq list: ', argsorted_seq_list)
cluster_dict[key] = np.asarray(value)[np.asarray(argsorted_seq_list)]
# print('sorted cluster dict: ', cluster_dict)
# print('Sorting clusterings:')
if kmeans_based_on == 'label_distance':
# print('kmeans cluster center: ', kmeans.cluster_centers_)
argsorted_cluster_ids = np.argsort(kmeans.cluster_centers_.reshape(num_clusters,))[::-1]
else:
# TODO: check
argsorted_cluster_ids = range(num_clusters)
# print('argsort kmeans cluster center: ', argsorted_cluster_ids)
for cluster_id in argsorted_cluster_ids:
for i in cluster_dict[cluster_id]:
new_ordering.append(i)
# print('new ordering: ', new_ordering)
# print()
return feature_kernel[:, new_ordering][new_ordering,:], new_ordering
|
{"hexsha": "9c9d866405994823e36c1abd05a0841eadaa69fe", "size": 3934, "ext": "py", "lang": "Python", "max_stars_repo_path": "synbio_rbs/src/sort_seq.py", "max_stars_repo_name": "mholowko/Solaris", "max_stars_repo_head_hexsha": "25f65e72667f1e92e0d5c26bc9cbe159a6a15ace", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synbio_rbs/src/sort_seq.py", "max_issues_repo_name": "mholowko/Solaris", "max_issues_repo_head_hexsha": "25f65e72667f1e92e0d5c26bc9cbe159a6a15ace", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synbio_rbs/src/sort_seq.py", "max_forks_repo_name": "mholowko/Solaris", "max_forks_repo_head_hexsha": "25f65e72667f1e92e0d5c26bc9cbe159a6a15ace", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.202247191, "max_line_length": 128, "alphanum_fraction": 0.5854092527, "include": true, "reason": "import numpy", "num_tokens": 896}
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.lineplot import LinePlot
from chaco.text_box_overlay import TextBoxOverlay
from enable.component_editor import ComponentEditor
from numpy import linspace
from traits.api import List, Any, Event, Callable, Dict, Int, Bool
from traitsui.api import View, UItem
from pychron.core.helpers.fits import convert_fit
from pychron.core.regression.base_regressor import BaseRegressor
from pychron.graph.context_menu_mixin import RegressionContextMenuMixin
from pychron.graph.error_envelope_overlay import ErrorEnvelopeOverlay
from pychron.graph.graph import Graph
from pychron.graph.tools.point_inspector import PointInspector, \
PointInspectorOverlay
from pychron.graph.tools.rect_selection_tool import RectSelectionTool, \
RectSelectionOverlay
from pychron.graph.tools.regression_inspector import RegressionInspectorTool, \
RegressionInspectorOverlay, make_statistics, make_correlation_statistics
class StatisticsTextBoxOverlay(TextBoxOverlay):
pass
class CorrelationTextBoxOverlay(TextBoxOverlay):
pass
class NoRegressionCTX(object):
def __init__(self, obj, refresh=False):
self._refresh = refresh
self._obj = obj
def __enter__(self):
self._obj.suppress_regression = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._obj.suppress_regression = False
if self._refresh:
self._obj.refresh()
class RegressionGraph(Graph, RegressionContextMenuMixin):
_cached_hover = Dict
_cached_sel = Dict
indices = List
filters = List
selected_component = Any
regression_results = Event
suppress_regression = False
use_data_tool = True
use_inspector_tool = True
use_point_inspector = True
convert_index_func = Callable
grouping = Int
show_grouping = Bool
# def __init__(self, *args, **kw):
# super(RegressionGraph, self).__init__(*args, **kw)
# self._regression_lock = Lock()
# ===============================================================================
# context menu handlers
# ===============================================================================
def cm_toggle_filtering(self):
regs = {}
for plot in self.plots:
for k, v in plot.plots.items():
if k.startswith('fit'):
pp = v[0]
# regs.append(pp.regressor)
regs[k[3:]] = pp.regressor
fo = pp.regressor.filter_outliers_dict['filter_outliers']
pp.regressor.filter_outliers_dict['filter_outliers'] = not fo
pp.regressor.dirty = True
pp.regressor.calculate()
# if hasattr(v[0], 'filter_outliers_dict'):
# fo = v[0].filter_outliers_dict['filter_outliers']
# v[0].filter_outliers_dict['filter_outliers'] = not fo
# if not fo:
# v[0].index.metadata['selections'] = []
# else:
for plot in self.plots:
for k, v in plot.plots.items():
if k.startswith('data'):
scatter = v[0]
idx = k[4:]
reg = regs[idx]
fo = scatter.filter_outliers_dict['filter_outliers']
scatter.filter_outliers_dict['filter_outliers'] = fo = not fo
self._set_regressor(scatter, reg)
scatter.index.metadata['selections'] = reg.get_excluded() if fo else []
self.redraw()
def cm_toggle_filter_bounds_all(self):
for plot in self.plots:
self.cm_toggle_filter_bounds(plot, redraw=False)
self.redraw()
def cm_toggle_filter_bounds(self, plot=None, redraw=True):
if plot is None:
plot = self.plots[self.selected_plotid]
for k, v in plot.plots.items():
if k.startswith('fit'):
pp = v[0]
pp.filter_bounds.visible = not pp.filter_bounds.visible
if redraw:
self.redraw()
def cm_linear(self):
self.set_fit('linear', plotid=self.selected_plotid)
self._update_graph()
def cm_parabolic(self):
self.set_fit('parabolic', plotid=self.selected_plotid)
self._update_graph()
def cm_cubic(self):
self.set_fit('cubic', plotid=self.selected_plotid)
self._update_graph()
def cm_quartic(self):
self.set_fit('quartic', plotid=self.selected_plotid)
self._update_graph()
def cm_exponential(self):
self.set_fit('exponential', plotid=self.selected_plotid)
self._update_graph()
def cm_average_std(self):
self.set_fit('average_std', plotid=self.selected_plotid)
self._update_graph()
def cm_average_sem(self):
self.set_fit('average_sem', plotid=self.selected_plotid)
self._update_graph()
def cm_sd(self):
self.set_error_calc_type('sd', plotid=self.selected_plotid)
self._update_graph()
def cm_sem(self):
self.set_error_calc_type('sem', plotid=self.selected_plotid)
self._update_graph()
def cm_ci(self):
self.set_error_calc_type('ci', plotid=self.selected_plotid)
self._update_graph()
def cm_mc(self):
self.set_error_calc_type('mc', plotid=self.selected_plotid)
self._update_graph()
# ===============================================================================
#
# ===============================================================================
def new_series(self, x=None, y=None,
ux=None, uy=None, lx=None, ly=None,
fx=None, fy=None,
fit='linear',
display_filter_bounds=False,
filter_outliers_dict=None,
use_error_envelope=True,
truncate='',
marker='circle',
marker_size=2,
add_tools=True,
add_inspector=True,
add_point_inspector=True,
add_selection=True,
convert_index=None,
plotid=None, *args,
**kw):
kw['marker'] = marker
kw['marker_size'] = marker_size
if plotid is None:
plotid = len(self.plots) - 1
if not fit:
s, p = super(RegressionGraph, self).new_series(x, y,
plotid=plotid,
*args, **kw)
if add_tools:
self.add_tools(p, s, None, convert_index, add_inspector, add_point_inspector)
return s, p
scatter, si = self._new_scatter(kw, marker, marker_size,
plotid, x, y, fit,
filter_outliers_dict, truncate)
lkw = kw.copy()
lkw['color'] = 'black'
lkw['type'] = 'line'
lkw['render_style'] = 'connectedpoints'
plot, names, rd = self._series_factory(fx, fy, plotid=plotid,
**lkw)
line = plot.plot(names, add=False, **rd)[0]
line.index.sort_order = 'ascending'
self.set_series_label('fit{}'.format(si), plotid=plotid)
plot.add(line)
plot.add(scatter)
if use_error_envelope:
self._add_error_envelope_overlay(line)
o = self._add_filter_bounds_overlay(line)
if filter_outliers_dict and display_filter_bounds:
o.visible = True
if x is not None and y is not None:
if not self.suppress_regression:
self._regress(plot, scatter, line)
try:
self._set_bottom_axis(plot, plot, plotid)
except:
pass
if add_tools:
self.add_tools(plot, scatter, line,
convert_index, add_inspector, add_point_inspector, add_selection)
return plot, scatter, line
def add_tools(self, plot, scatter, line=None,
convert_index=None, add_inspector=True, add_point_inspector=True, add_selection=True):
if add_inspector:
# add a regression inspector tool to the line
if line:
tool = RegressionInspectorTool(component=line)
overlay = RegressionInspectorOverlay(component=line,
tool=tool)
line.tools.append(tool)
line.overlays.append(overlay)
if add_point_inspector:
point_inspector = PointInspector(scatter,
convert_index=convert_index or self.convert_index_func)
pinspector_overlay = PointInspectorOverlay(component=scatter,
tool=point_inspector)
scatter.overlays.append(pinspector_overlay)
scatter.tools.append(point_inspector)
if add_selection:
rect_tool = RectSelectionTool(scatter)
rect_overlay = RectSelectionOverlay(tool=rect_tool)
scatter.overlays.append(rect_overlay)
scatter.tools.append(rect_tool)
def add_correlation_statistics(self, plotid=0):
plot = self.plots[plotid]
for k, v in plot.plots.items():
if k.startswith('fit'):
pp = v[0]
text = '\n'.join(make_correlation_statistics(pp.regressor))
label = CorrelationTextBoxOverlay(text=text,
border_color='black')
pp.overlays.append(label)
break
def add_statistics(self, plotid=0, options=None):
plot = self.plots[plotid]
for k, v in plot.plots.items():
if k.startswith('fit'):
pp = v[0]
if hasattr(pp, 'regressor'):
pp.statistics_options = options
text = '\n'.join(make_statistics(pp.regressor, options=options))
label = StatisticsTextBoxOverlay(text=text,
border_color='black')
pp.underlays.append(label)
break
# def set_filter_outliers(self, fi, plotid=0, series=0):
# plot = self.plots[plotid]
# scatter = plot.plots['data{}'.format(series)][0]
# scatter.filter_outliers_dict['filter_outliers'] = fi
# self.redraw()
# def get_filter_outliers(self, fi, plotid=0, series=0):
# plot = self.plots[plotid]
# scatter = plot.plots['data{}'.format(series)][0]
# return scatter.filter_outliers_dict['filter_outliers']
def set_error_calc_type(self, fi, plotid=0, series=0, redraw=True):
fi = fi.lower()
plot = self.plots[plotid]
key = 'data{}'.format(series)
if key in plot.plots:
scatter = plot.plots[key][0]
f = scatter.fit
if '_' in f:
f = f.split('_')[0]
scatter.fit = '{}_{}'.format(f, fi)
def set_fit(self, fi, plotid=0, series=0):
fi = fi.lower()
plot = self.plots[plotid]
key = 'data{}'.format(series)
if key in plot.plots:
scatter = plot.plots[key][0]
# print key
if scatter.fit != fi:
lkey = 'fit{}'.format(series)
if lkey in plot.plots:
line = plot.plots[lkey][0]
line.regressor = None
print('fit for {}={}, {}'.format(key, fi, scatter))
scatter.ofit = scatter.fit
scatter.fit = fi
else:
print('invalid key', fi, plotid, key, plot.plots.keys())
def get_fit(self, plotid=0, series=0):
try:
plot = self.plots[plotid]
scatter = plot.plots['data{}'.format(series)][0]
return scatter.fit
except IndexError:
pass
_outside_regressor = False
def set_regressor(self, reg, plotid=0):
self._outside_regressor = True
plot = self.plots[plotid]
for pp in plot.plots.values():
for ppp in pp:
if isinstance(ppp, LinePlot):
ppp.regressor = reg
def clear(self):
self.selected_component = None
for p in self.plots:
for pp in p.plots.values():
if hasattr(pp, 'error_envelope'):
pp.error_envelope.component = None
del pp.error_envelope
if hasattr(pp, 'regressor'):
del pp.regressor
super(RegressionGraph, self).clear()
def no_regression(self, refresh=False):
return NoRegressionCTX(self, refresh=refresh)
def refresh(self, **kw):
self._update_graph()
def update_metadata(self, obj, name, old, new):
"""
fired when the index metadata changes e.i user selection
"""
# don't update if hover metadata change
if hasattr(obj, 'suppress_hover_update'):
if obj.suppress_hover_update:
return
self._update_graph()
# private
def _update_graph(self, *args, **kw):
regs = []
for i, plot in enumerate(self.plots):
ps = plot.plots
ks = list(ps.keys())
try:
scatters, idxes = list(zip(*[(ps[k][0], k[4:]) for k in ks if k.startswith('data')]))
fls = [ps['fit{}'.format(idx)][0] for idx in idxes]
for si, fl in zip(scatters, fls):
if not si.no_regression:
r = self._plot_regression(plot, si, fl)
regs.append((plot, r))
except ValueError as e:
# add a float instead of regressor to regs
try:
si = ps[ks[0]][0]
regs.append((plot, si.value.get_data()[-1]))
except IndexError:
break
self.regression_results = regs
# force layout updates. i.e for ErrorBarOverlay
for plot in self.plots:
for p in plot.plots.values():
p[0]._layout_needed = True
self.redraw(force=False)
def _plot_regression(self, plot, scatter, line):
if not plot.visible:
return
return self._regress(plot, scatter, line)
def _regress(self, plot, scatter, line):
fit, err = convert_fit(scatter.fit)
if fit is None:
return
r = None
if line and hasattr(line, 'regressor'):
r = line.regressor
if fit in [1, 2, 3, 4]:
r = self._poly_regress(scatter, r, fit)
elif fit == 'exponential':
r = self._exponential_regress(scatter, r, fit)
elif isinstance(fit, tuple):
r = self._least_square_regress(scatter, r, fit)
elif isinstance(fit, BaseRegressor):
r = self._custom_regress(scatter, r, fit)
else:
r = self._mean_regress(scatter, r, fit)
if r:
r.error_calc_type = err
if line:
plow = plot.index_range._low_value
phigh = plot.index_range._high_value
# print plow, phigh
if hasattr(line, 'regression_bounds') and line.regression_bounds:
low, high, first, last = line.regression_bounds
if first:
low = min(low, plow)
elif last:
high = max(high, phigh)
else:
low, high = plow, phigh
fx = linspace(low, high, 100)
fy = r.predict(fx)
line.regressor = r
try:
line.index.set_data(fx)
line.value.set_data(fy)
except BaseException as e:
print('Regerssion Exception, {}'.format(e))
return
if hasattr(line, 'error_envelope'):
ci = r.calculate_error_envelope(fx, fy)
if ci is not None:
ly, uy = ci
else:
ly, uy = fy, fy
line.error_envelope.lower = ly
line.error_envelope.upper = uy
line.error_envelope.invalidate()
if hasattr(line, 'filter_bounds'):
ci = r.calculate_filter_bounds(fy)
if ci is not None:
ly, uy = ci
else:
ly, uy = fy, fy
line.filter_bounds.lower = ly
line.filter_bounds.upper = uy
line.filter_bounds.invalidate()
return r
def _set_regressor(self, scatter, r):
selection = scatter.index.metadata['selections']
selection = (set(selection) - set(r.outlier_excluded + r.truncate_excluded))
x = scatter.index.get_data()
y = scatter.value.get_data()
sel = list(selection)
if hasattr(scatter, 'yerror'):
yserr = scatter.yerror.get_data()
r.trait_set(yserr=yserr)
r.trait_set(xs=x, ys=y,
user_excluded=sel,
filter_outliers_dict=scatter.filter_outliers_dict)
r.dirty = True
def _set_excluded(self, scatter, r):
scatter.no_regression = True
d = scatter.index.metadata.copy()
d['selections'] = x = r.get_excluded()
scatter.index.trait_setq(metadata=d)
# scatter.invalidate_and_redraw()
# scatter.index.metadata['selections'] = r.get_excluded()
scatter.no_regression = False
def _poly_regress(self, scatter, r, fit):
from pychron.core.regression.ols_regressor import PolynomialRegressor
from pychron.core.regression.wls_regressor import WeightedPolynomialRegressor
if hasattr(scatter, 'yerror') and any(scatter.yerror.get_data()):
if r is None or not isinstance(r, WeightedPolynomialRegressor):
r = WeightedPolynomialRegressor()
else:
if r is None or not isinstance(r, PolynomialRegressor):
r = PolynomialRegressor()
self._set_regressor(scatter, r)
r.trait_set(degree=fit)
r.set_truncate(scatter.truncate)
if r.ys.shape[0] < fit + 1:
return
r.calculate()
self._set_excluded(scatter, r)
return r
def _exponential_regress(self, scatter, r, fit):
from pychron.core.regression.least_squares_regressor import ExponentialRegressor, FitError
if r is None or not isinstance(r, ExponentialRegressor):
r = ExponentialRegressor()
self._set_regressor(scatter, r)
try:
r.calculate()
self._set_excluded(scatter, r)
except FitError:
f, e = convert_fit(scatter.ofit)
r = self._poly_regress(scatter, r, f)
return r
def _least_square_regress(self, scatter, r, fit):
from pychron.core.regression.least_squares_regressor import LeastSquaresRegressor
func, initial_guess = fit
if r is None or not isinstance(r, LeastSquaresRegressor):
r = LeastSquaresRegressor()
self._set_regressor(scatter, r)
r.trait_set(fitfunc=func,
initial_guess=initial_guess,
trait_change_notify=False)
r.calculate()
self._set_excluded(scatter, r)
return r
def _mean_regress(self, scatter, r, fit):
from pychron.core.regression.mean_regressor import MeanRegressor, WeightedMeanRegressor
if hasattr(scatter, 'yerror') and fit == 'weighted mean':
if r is None or not isinstance(r, WeightedMeanRegressor):
r = WeightedMeanRegressor()
else:
if r is None or not isinstance(r, MeanRegressor):
r = MeanRegressor()
self._set_regressor(scatter, r)
# r.trait_setq(fit=fit)
r.calculate()
self._set_excluded(scatter, r)
return r
def _custom_regress(self, scatter, r, fit):
kw = {}
if hasattr(scatter, 'yerror'):
es = scatter.yerror.get_data()
kw['yserr'] = es
if hasattr(scatter, 'xerror'):
es = scatter.xerror.get_data()
kw['xserr'] = es
if r is None or not isinstance(r, fit):
r = fit()
self._set_regressor(scatter, r)
# r.trait_set(trait_change_notify=False,
# **kw)
r.trait_setq(**kw)
r.calculate()
self._set_excluded(scatter, r)
return r
def _new_scatter(self, kw, marker, marker_size, plotid,
x, y, fit, filter_outliers_dict, truncate):
kw['type'] = 'scatter'
plot, names, rd = self._series_factory(x, y, plotid=plotid, **kw)
rd['selection_color'] = 'white'
rd['selection_outline_color'] = rd['color']
rd['selection_marker'] = marker
rd['selection_marker_size'] = marker_size + 1
scatter = plot.plot(names, add=False, **rd)[0]
si = len([p for p in plot.plots.keys() if p.startswith('data')])
self.set_series_label('data{}'.format(si), plotid=plotid)
if filter_outliers_dict is None:
filter_outliers_dict = dict(filter_outliers=False)
else:
filter_outliers_dict = filter_outliers_dict.copy()
scatter.fit = fit
scatter.filter = None
scatter.filter_outliers_dict = filter_outliers_dict
scatter.truncate = truncate
scatter.index.on_trait_change(self.update_metadata, 'metadata_changed')
scatter.no_regression = False
return scatter, si
def _add_filter_bounds_overlay(self, line):
o = ErrorEnvelopeOverlay(component=line, use_region=True, color=(1))
line.underlays.append(o)
line.filter_bounds = o
o.visible = False
return o
def _add_error_envelope_overlay(self, line):
o = ErrorEnvelopeOverlay(component=line)
line.underlays.append(o)
line.error_envelope = o
def _regression_results_changed(self):
for plot in self.plots:
for k, v in plot.plots.items():
if k.startswith('fit'):
pp = v[0]
o = next((oo for oo in pp.underlays if isinstance(oo, StatisticsTextBoxOverlay)), None)
if o:
o.text = '\n'.join(make_statistics(pp.regressor, options=pp.statistics_options))
o.request_redraw()
break
o = next((oo for oo in pp.overlays if isinstance(oo, CorrelationTextBoxOverlay)), None)
if o:
o.text = '\n'.join(make_correlation_statistics(pp.regressor))
o.request_redraw()
break
def traits_view(self):
v = View(UItem('grouping', defined_when='show_grouping'),
UItem('plotcontainer',
style='custom',
editor=ComponentEditor()),
title=self.window_title,
width=self.window_width,
height=self.window_height,
x=self.window_x,
y=self.window_y,
resizable=self.resizable)
return v
# ============= EOF =============================================
|
{"hexsha": "ab0f972bf6b6de2e1dda7786098b1d331f790c7a", "size": 24596, "ext": "py", "lang": "Python", "max_stars_repo_path": "pychron/graph/regression_graph.py", "max_stars_repo_name": "UManPychron/pychron", "max_stars_repo_head_hexsha": "b84c9fd70072f9cbda30abe2c471e64fe3dd75d8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pychron/graph/regression_graph.py", "max_issues_repo_name": "UManPychron/pychron", "max_issues_repo_head_hexsha": "b84c9fd70072f9cbda30abe2c471e64fe3dd75d8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2015-10-08T14:21:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T14:19:21.000Z", "max_forks_repo_path": "pychron/graph/regression_graph.py", "max_forks_repo_name": "UManPychron/pychron", "max_forks_repo_head_hexsha": "b84c9fd70072f9cbda30abe2c471e64fe3dd75d8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5947901592, "max_line_length": 107, "alphanum_fraction": 0.5483005367, "include": true, "reason": "from numpy", "num_tokens": 5214}
|
"""Test cases for SaveDetectionSamples."""
import json
import logging
import os
import shutil
import numpy as np
from ambianic.pipeline.store import JsonEncoder, SaveDetectionSamples
from ambianic.pipeline.timeline_event import PipelineContext
from PIL import Image
log = logging.getLogger(__name__)
def test_json_encoder():
inp = {
"label": "FALL",
"confidence": np.float32(12.3),
"leaning_angle": np.float32(24.3),
"keypoint_corr": {
"left shoulder": [np.float32(1.2), np.float32(1.23)],
"left hip": [np.float32(1.2), np.float32(1.23)],
"right shoulder": [np.float32(1.2), np.float32(1.23)],
"right hip": [np.float32(1.2), np.float32(1.23)],
},
}
encode = json.dumps(inp, cls=JsonEncoder)
decode = json.loads(encode)
assert isinstance(decode["confidence"], float)
assert isinstance(decode["leaning_angle"], float)
assert isinstance(decode["keypoint_corr"]["left shoulder"][0], float)
assert isinstance(decode["keypoint_corr"]["left shoulder"][1], float)
def test_json_encoder_integerData():
inp = np.int32(10)
encode = json.dumps(inp, cls=JsonEncoder)
decode = json.loads(encode)
assert isinstance(decode, int)
def test_json_encoder_arrayData():
inp = np.array([1, 2, 3, 4, 5])
encode = json.dumps(inp, cls=JsonEncoder)
decode = json.loads(encode)
assert isinstance(decode, list)
def test_process_sample_none():
store = SaveDetectionSamples()
processed_samples = store.process_sample(image=None, inference_result=None)
processed_samples = list(processed_samples)
assert len(processed_samples) == 1
assert processed_samples[0] is None
def test_process_sample():
out_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = os.path.join(out_dir, "tmp/")
out_dir = os.path.abspath(out_dir)
context = PipelineContext(unique_pipeline_name="test pipeline")
context.data_dir = out_dir
store = _TestSaveDetectionSamples(context=context, event_log=logging.getLogger())
img = Image.new("RGB", (60, 30), color="red")
detections = [
{
"label": "person",
"confidence": np.float32(0.98),
"box": {
"xmin": np.float32(0.1),
"ymin": np.float32(1.1),
"xmax": np.float32(2.1),
"ymax": np.float32(3.1),
},
}
]
processed_samples = list(
store.process_sample(image=img, thumbnail=img, inference_result=detections)
)
assert len(processed_samples) == 1
print(processed_samples)
img_out = processed_samples[0]["image"]
assert img_out == img
inf = processed_samples[0]["inference_result"]
print(inf)
category = inf[0]["label"]
confidence = inf[0]["confidence"]
(x0, y0) = inf[0]["box"]["xmin"], inf[0]["box"]["ymin"]
(x1, y1) = inf[0]["box"]["xmax"], inf[0]["box"]["ymax"]
assert category == "person"
assert isinstance(confidence, np.float32)
assert isinstance(x0, np.float32)
assert isinstance(y0, np.float32)
assert isinstance(x1, np.float32)
assert isinstance(y1, np.float32)
assert store._save_sample_called
assert store._inf_result == detections
assert store._img_path
img_dir = os.path.dirname(os.path.abspath(store._img_path / "../../"))
assert img_dir == out_dir
out_img = Image.open(store._img_path)
print(img_dir)
print(store._img_path)
assert out_img.mode == "RGB"
assert out_img.size[0] == 60
assert out_img.size[1] == 30
json_dir = os.path.dirname(os.path.abspath(store._json_path / "../../"))
assert json_dir == out_dir
print(json_dir)
print(store._json_path)
with open(store._json_path) as f:
json_inf = json.load(f)
print(json_inf)
img_fname = json_inf["image_file_name"]
rel_dir = json_inf["rel_dir"]
img_fpath = os.path.join(out_dir, rel_dir, img_fname)
assert img_fpath == str(store._img_path)
assert os.path.exists(img_fpath)
json_inf_res = json_inf["inference_result"]
assert len(json_inf_res) == 1
json_inf_res = json_inf_res[0]
assert json_inf_res["label"] == "person"
assert isinstance(json_inf_res["confidence"], float)
assert isinstance(json_inf_res["box"]["xmin"], float)
assert isinstance(json_inf_res["box"]["ymin"], float)
assert isinstance(json_inf_res["box"]["xmax"], float)
assert isinstance(json_inf_res["box"]["ymax"], float)
shutil.rmtree(out_dir)
class _TestSaveDetectionSamples(SaveDetectionSamples):
_save_sample_called = False
_img_path = None
_json_path = None
_inf_result = None
def _save_sample(
self,
inf_time=None,
image=None,
thumbnail=None,
inference_result=None,
inference_meta=None,
):
self._save_sample_called = True
self._inf_result = inference_result
self._img_path, self._json_path = super()._save_sample(
inf_time=inf_time,
image=image,
thumbnail=thumbnail,
inference_result=inference_result,
inference_meta=inference_meta,
)
def test_store_positive_detection():
"""The first time a positive sample is processed, it should be saved."""
out_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = os.path.join(out_dir, "tmp/")
out_dir = os.path.abspath(out_dir)
context = PipelineContext(unique_pipeline_name="test pipeline")
context.data_dir = out_dir
store = _TestSaveDetectionSamples(context=context, event_log=logging.getLogger())
img = Image.new("RGB", (60, 30), color="red")
detections = [
{
"label": "person",
"confidence": 0.98,
"box": {"xmin": 0, "ymin": 1, "xmax": 2, "ymax": 3},
}
]
processed_samples = list(
store.process_sample(image=img, thumbnail=img, inference_result=detections)
)
assert len(processed_samples) == 1
print(processed_samples)
img_out = processed_samples[0]["image"]
assert img_out == img
inf = processed_samples[0]["inference_result"]
print(inf)
category = inf[0]["label"]
confidence = inf[0]["confidence"]
(x0, y0) = inf[0]["box"]["xmin"], inf[0]["box"]["ymin"]
(x1, y1) = inf[0]["box"]["xmax"], inf[0]["box"]["ymax"]
assert category == "person"
assert confidence == 0.98
assert x0 == 0 and y0 == 1 and x1 == 2 and y1 == 3
assert store._save_sample_called
assert store._inf_result == detections
assert store._img_path
img_dir = os.path.dirname(os.path.abspath(store._img_path / "../../"))
assert img_dir == out_dir
out_img = Image.open(store._img_path)
print(img_dir)
print(store._img_path)
assert out_img.mode == "RGB"
assert out_img.size[0] == 60
assert out_img.size[1] == 30
json_dir = os.path.dirname(os.path.abspath(store._json_path / "../../"))
assert json_dir == out_dir
print(json_dir)
print(store._json_path)
with open(store._json_path) as f:
json_inf = json.load(f)
print(json_inf)
img_fname = json_inf["image_file_name"]
rel_dir = json_inf["rel_dir"]
img_fpath = os.path.join(out_dir, rel_dir, img_fname)
assert img_fpath == str(store._img_path)
assert os.path.exists(img_fpath)
json_inf_res = json_inf["inference_result"]
assert len(json_inf_res) == 1
json_inf_res = json_inf_res[0]
assert json_inf_res["label"] == "person"
assert json_inf_res["confidence"] == 0.98
assert json_inf_res["box"]["xmin"] == 0
assert json_inf_res["box"]["ymin"] == 1
assert json_inf_res["box"]["xmax"] == 2
assert json_inf_res["box"]["ymax"] == 3
shutil.rmtree(out_dir)
def test_store_negative_detection():
"""The first time a negative sample is processed, it should be saved."""
out_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = os.path.join(out_dir, "tmp/")
out_dir = os.path.abspath(out_dir)
out_dir = os.path.abspath(out_dir)
context = PipelineContext(unique_pipeline_name="test pipeline")
context.data_dir = out_dir
store = _TestSaveDetectionSamples(context=context, event_log=logging.getLogger())
img = Image.new("RGB", (60, 30), color="red")
detections = []
processed_samples = list(
store.process_sample(image=img, thumbnail=img, inference_result=detections)
)
assert len(processed_samples) == 1
print(processed_samples)
img_out = processed_samples[0]["image"]
assert img_out == img
inf = processed_samples[0]["inference_result"]
print(inf)
assert not inf
assert store._save_sample_called
assert store._inf_result == detections
assert store._img_path
img_dir = os.path.dirname(os.path.abspath(store._img_path / "../../"))
assert img_dir == out_dir
out_img = Image.open(store._img_path)
print(img_dir)
print(store._img_path)
assert out_img.mode == "RGB"
assert out_img.size[0] == 60
assert out_img.size[1] == 30
json_dir = os.path.dirname(os.path.abspath(store._json_path / "../../"))
assert json_dir == out_dir
print(json_dir)
print(store._json_path)
with open(store._json_path) as f:
json_inf = json.load(f)
print(json_inf)
img_fname = json_inf["image_file_name"]
rel_dir = json_inf["rel_dir"]
img_fpath = os.path.join(out_dir, rel_dir, img_fname)
assert img_fpath == str(store._img_path)
assert os.path.exists(img_fpath)
json_inf_res = json_inf["inference_result"]
assert not json_inf_res
shutil.rmtree(out_dir)
def test_store_negative_detection_no_inference():
"""
Expect store to save the image from an inference without any detection.
"""
out_dir = os.path.dirname(os.path.abspath(__file__))
out_dir = os.path.join(out_dir, "tmp/")
out_dir = os.path.abspath(out_dir)
out_dir = os.path.abspath(out_dir)
context = PipelineContext(unique_pipeline_name="test pipeline")
context.data_dir = out_dir
store = _TestSaveDetectionSamples(context=context, event_log=logging.getLogger())
img = Image.new("RGB", (60, 30), color="red")
detections = None
processed_samples = list(
store.process_sample(image=img, thumbnail=img, inference_result=detections)
)
assert len(processed_samples) == 1
print(processed_samples)
img_out = processed_samples[0]["image"]
assert img_out == img
inf = processed_samples[0]["inference_result"]
print(inf)
assert not inf
assert store._save_sample_called
assert store._inf_result == detections
assert store._img_path
img_dir = os.path.dirname(os.path.abspath(store._img_path / "../../"))
assert img_dir == out_dir
out_img = Image.open(store._img_path)
print(img_dir)
print(store._img_path)
assert out_img.mode == "RGB"
assert out_img.size[0] == 60
assert out_img.size[1] == 30
json_dir = os.path.dirname(os.path.abspath(store._json_path / "../../"))
assert json_dir == out_dir
print(json_dir)
print(store._json_path)
with open(store._json_path) as f:
json_inf = json.load(f)
print(json_inf)
img_fname = json_inf["image_file_name"]
rel_dir = json_inf["rel_dir"]
img_fpath = os.path.join(out_dir, rel_dir, img_fname)
assert img_fpath == str(store._img_path)
assert os.path.exists(img_fpath)
json_inf_res = json_inf["inference_result"]
assert not json_inf_res
shutil.rmtree(out_dir)
class _TestSaveDetectionSamples2(SaveDetectionSamples):
_save_sample_called = False
result = {
"id": "140343867415240",
"datetime": "2021-05-05 14:04:45.428473",
"inference_result": [
{
"confidence": 0.98828125,
"datetime": "2021-05-05 14:04:45.428473",
"label": "cat",
"id": "140343867415240",
}
],
}
context = PipelineContext(unique_pipeline_name="test pipeline")
context.data_dir = "./tmp/"
store = _TestSaveDetectionSamples(context=context, event_log=logging.getLogger())
store.notify(save_json=result)
def _save_sample(
self,
inf_time=None,
image=None,
thumbnail=None,
inference_result=None,
inference_meta=None,
):
self._save_sample_called = True
def test_process_sample_exception():
"""Exception during processing should not prevent passing the sample on."""
context = PipelineContext(unique_pipeline_name="test pipeline")
context.data_dir = "./tmp/"
store = _TestSaveDetectionSamples2(context=context, event_log=logging.getLogger())
img = Image.new("RGB", (60, 30), color="red")
detections = [
{
"label": "person",
"confidence": 0.98,
"box": {"xmin": 0, "ymin": 1, "xmax": 2, "ymax": 3},
}
]
processed_samples = list(
store.process_sample(
image=img, inference_result=detections, inference_meta=None
)
)
assert store._save_sample_called
assert len(processed_samples) == 1
print(processed_samples)
img_out = processed_samples[0]["image"]
assert img_out == img
inf = processed_samples[0]["inference_result"]
print(inf)
category = inf[0]["label"]
confidence = inf[0]["confidence"]
(x0, y0) = inf[0]["box"]["xmin"], inf[0]["box"]["ymin"]
(x1, y1) = inf[0]["box"]["xmax"], inf[0]["box"]["ymax"]
assert category == "person"
assert confidence == 0.98
assert x0 == 0 and y0 == 1 and x1 == 2 and y1 == 3
|
{"hexsha": "bcebff4fef2575448155fff75679555cf4a95819", "size": 13800, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/pipeline/test_store.py", "max_stars_repo_name": "vickywane/ambianic-edge", "max_stars_repo_head_hexsha": "45505eb42f27690646535206a1fb92624b9264c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/pipeline/test_store.py", "max_issues_repo_name": "vickywane/ambianic-edge", "max_issues_repo_head_hexsha": "45505eb42f27690646535206a1fb92624b9264c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-01T11:04:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-01T11:10:40.000Z", "max_forks_repo_path": "tests/pipeline/test_store.py", "max_forks_repo_name": "githwd/ambianic-edge", "max_forks_repo_head_hexsha": "06ea327bed8c7e348210c3ddfb1c4ad6d13fa8fb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1584158416, "max_line_length": 86, "alphanum_fraction": 0.6413768116, "include": true, "reason": "import numpy", "num_tokens": 3450}
|
using ArgParse
include("clustering.jl")
include("datasets.jl")
settings = ArgParseSettings()
@add_arg_table settings begin
"--problem"
arg_type = String
default = "iris"
"--fname"
arg_type = String
default = "lif"
"--seed"
arg_type = Int
default = 0
"--logfile"
arg_type = String
default = "stdp.log"
"--train_epochs"
arg_type = Int
default = 1
"--weight_mean"
arg_type = Float64
default = 0.5
"--weight_std"
arg_type = Float64
default = 0.1
"--t_train"
arg_type = Int
default = 350
"--t_blank"
arg_type = Int
default = 150
"--fr"
arg_type = Float64
default = 65.0
"--pre_target"
arg_type = Float64
default = 0.4
"--stdp_lr"
arg_type = Float64
default = 0.0001
"--stdp_mu"
arg_type = Float64
default = 2.0
"--inhib_weight"
arg_type = Float64
default = 0.1
"--ika"
arg_type = Float64
default = 0.02
"--ikb"
arg_type = Float64
default = 0.2
"--ikd"
arg_type = Float64
default = 2.0
end
settings
args = parse_args(settings)
Logging.configure(filename=args["logfile"], level=INFO)
X, Y = get_data(args["problem"])
n_cluster = length(unique(Y))
stdp_labels = stdp_cluster(
X, Y, n_cluster, x->x; seed=args["seed"], logfile=args["logfile"],
problem=args["problem"], fname=args["fname"],
train_epochs=args["train_epochs"], weight_mean=args["weight_mean"],
weight_std=args["weight_std"], t_train=args["t_train"],
t_blank=args["t_blank"], fr=args["fr"], pre_target=args["pre_target"],
stdp_lr=args["stdp_lr"], stdp_mu=args["stdp_mu"],
inhib_weight=args["inhib_weight"], ika=args["ika"],
ikb=args["ikb"], ikd=args["ikd"])
acc = randindex(stdp_labels, Y)
Logging.info(@sprintf("E%0.6f", -acc[1]))
|
{"hexsha": "bd0d38b5604d2eaf6ea785a0b70176dbad2aeb93", "size": 1828, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/run_cluster.jl", "max_stars_repo_name": "d9w/rm-stdp", "max_stars_repo_head_hexsha": "d62916feadbb450c5462c488a03e009aa164199f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-26T00:18:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T00:18:39.000Z", "max_issues_repo_path": "src/run_cluster.jl", "max_issues_repo_name": "d9w/rm-stdp", "max_issues_repo_head_hexsha": "d62916feadbb450c5462c488a03e009aa164199f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/run_cluster.jl", "max_forks_repo_name": "d9w/rm-stdp", "max_forks_repo_head_hexsha": "d62916feadbb450c5462c488a03e009aa164199f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-01T02:09:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T02:09:48.000Z", "avg_line_length": 22.85, "max_line_length": 74, "alphanum_fraction": 0.6159737418, "num_tokens": 555}
|
[STATEMENT]
lemma filterlim_tendsto_pos_mult_at_bot:
fixes c :: real
assumes "(f \<longlongrightarrow> c) F" "0 < c" "filterlim g at_bot F"
shows "LIM x F. f x * g x :> at_bot"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. LIM x F. f x * g x :> at_bot
[PROOF STEP]
using filterlim_tendsto_pos_mult_at_top[OF assms(1,2), of "\<lambda>x. - g x"] assms(3)
[PROOF STATE]
proof (prove)
using this:
LIM x F. - g x :> at_top \<Longrightarrow> LIM x F. f x * - g x :> at_top
filterlim g at_bot F
goal (1 subgoal):
1. LIM x F. f x * g x :> at_bot
[PROOF STEP]
unfolding filterlim_uminus_at_bot
[PROOF STATE]
proof (prove)
using this:
LIM x F. - g x :> at_top \<Longrightarrow> LIM x F. f x * - g x :> at_top
LIM x F. - g x :> at_top
goal (1 subgoal):
1. LIM x F. - (f x * g x) :> at_top
[PROOF STEP]
by simp
|
{"llama_tokens": 367, "file": null, "length": 3}
|
import argparse
import json
import logging
import os
import os.path as osp
import random
import sys
import time
from collections import Counter
from collections import defaultdict
from collections import namedtuple
import cvxpy as cp
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.sparse import lil_matrix
# lazy: from torch2trt import torch2trt
from misc import DelayedKeyboardInterrupt
from misc import tuplify
from model import SlimMobilenet
from model import LayerType
from viterbi import complete
from viterbi import maxsum
logger = logging.getLogger(__name__)
Vartype = namedtuple("Vartype", LayerType._fields + ('in_channels', 'out_channels'))
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(
description="Generate samples and fit a latency model.")
subparsers = parser.add_subparsers(dest='mode')
subparsers.required = True
parser_bench = subparsers.add_parser('benchmark',
help="Benchmark a single channel configuration",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_bench.add_argument("configuration",
help="configuration to test (comma-separated channels or MOBILENET)")
parser_gen = subparsers.add_parser('generate',
help="Generate latency samples",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
for subparser in (parser_bench, parser_gen):
subparser.add_argument("-D", "--device", choices=["cpu", "gpu", "trt"],
default="gpu", help="Use GPU, CPU or TensorRT latency")
subparser.add_argument("--dtype", choices=["fp32", "fp16"],
default="fp16", help="Datatype for network")
subparser.add_argument("-B", "--batch-size", type=int, default=64,
help="Batch size used for profiling")
subparser.add_argument("-I", "--iterations", type=int, default=60,
help="Profiling iterations")
subparser.add_argument("-W", "--warmup", type=int, default=10,
help="Warmup iterations")
subparser.add_argument("--reduction", choices=['mean', 'min'], default='mean',
help="Reduce timings by their mean or by their minimum (minimum can reduce variance)")
parser_gen.add_argument("--biased", action="store_true",
help="Bias sampling towards missing configurations")
parser_gen.add_argument("-N", "--count", type=int, default=8000,
help="Minimum number of samples to generate")
parser_gen.add_argument("-R", "--repetitions", type=int, default=0,
help="Minimum number of samples per choice")
parser_gen.add_argument("--save-every", type=int, default=1,
help="Number of inferences before saving intermediate output")
parser_gen.add_argument("samples_file", help="Output samples file")
parser_fit = subparsers.add_parser('fit', help="Fit a latency model")
parser_fit.add_argument("-K", "--regularize", type=float, default=0.0,
help="Amount of monotonicity regularization (Equation 7)")
parser_fit.add_argument("samples_file", help="Training samples")
parser_fit.add_argument("model_file", help="Output model file")
parser_val = subparsers.add_parser('validate', help="Validate a latency model")
parser_val.add_argument("samples_file", help="Validation samples")
parser_val.add_argument("model_file", help="Model file")
parser_val.add_argument("plot_file", help="Plot file")
args = parser.parse_args()
if 'configuration' in args:
defaults = {'MOBILENET': "32,64,128,128,256,256,512,512,512,512,512,512,1024,1024"}
if args.configuration in defaults:
args.configuration = defaults[args.configuration]
args.configuration = [int(''.join(ci for ci in c if ci.isdigit())) for c in args.configuration.split(',')]
return args
def get_model(min_width=0.2, max_width=1.5, levels=14):
return SlimMobilenet(min_width=min_width, max_width=max_width, levels=levels)
def benchmark(device, dtype, batch_size, iterations, warmup, reduction, configuration, silent=False):
if device == 'cpu':
dev = torch.device('cpu')
elif device in ['gpu', 'trt']:
dev = torch.device('cuda')
fp = dict(fp16=torch.float16, fp32=torch.float32).get(dtype)
net = SlimMobilenet.reduce(configuration).to(dev).type(fp).eval()
x = torch.ones((batch_size, 3, 224, 224)).to(dev).type(fp)
if device == 'trt':
from torch2trt import torch2trt
net = torch2trt(net, [x], fp16_mode=(dtype == 'fp16'), max_batch_size=batch_size)
for i in range(warmup):
outputs = net(x)
torch.cuda.current_stream().synchronize()
timings = []
t0 = time.time()
for i in range(iterations):
outputs = net(x)
torch.cuda.current_stream().synchronize()
t1 = time.time()
timings.append(t1 - t0)
t0 = t1
ms = 1000.0 * getattr(np, reduction)(timings) / batch_size
if not silent:
print(f"{configuration}: {ms}ms")
return ms
def gen_configuration_biased(net, repetitions):
M = min(repetitions.values())
unary = []
pairwise = []
for i, L in enumerate(net.components):
input_choices = [net.in_channels] if i == 0 else net.configurations[i - 1]
output_choices = ([net.out_channels] if i == len(net.components) - 1
else net.configurations[i])
U = np.zeros(len(input_choices))
P = np.zeros((len(input_choices), len(output_choices)))
for i1, I in enumerate(input_choices):
for i2, O in enumerate(output_choices):
var = Vartype(**L._asdict(), in_channels=I, out_channels=O)
P[i1, i2] = float(repetitions[var] == M)
unary.append(U)
pairwise.append(P)
unary.append(np.zeros(len(output_choices)))
un, pair, states = complete(unary, pairwise)
iconfig = maxsum(un, pair, states)[1]
configuration = [C[i] for (C, i) in zip(net.configurations, iconfig[1:-1])]
return configuration
def gen_configuration(net, repetitions, biased=False):
if biased:
return gen_configuration_biased(net, repetitions)
return [random.choice(conf) for conf in net.configurations]
def collect_repetitions(net, configuration=None):
if configuration is None:
configuration = net.configurations
if isinstance(configuration[0], (int, np.integer)): # single configuration
configuration = [[c] for c in configuration]
layertypes = Counter()
for i, L in enumerate(net.components):
input_choices = [net.in_channels] if i == 0 else configuration[i - 1]
output_choices = ([net.out_channels] if i == len(net.components) - 1
else configuration[i])
for I in input_choices:
for O in output_choices:
var = Vartype(**L._asdict(), in_channels=I, out_channels=O)
layertypes[var] += 1
return layertypes
def sample_file_iterator(samples_file):
with open(samples_file, 'r') as f:
for line in f:
yield tuplify(json.loads(line))
def generate(device, dtype, batch_size, iterations, warmup, reduction, biased,
count, repetitions, samples_file=os.devnull, save_every=10):
os.makedirs(osp.dirname(samples_file), exist_ok=True)
net = get_model()
combinations = collect_repetitions(net)
logger.info(f"{len(net.configurations)} modulers")
logger.debug(f"search space: {net.configurations}")
logger.debug(f"components: {net.components}")
logger.info(f"Latency model has {len(combinations)} parameters")
repeats = Counter()
for c in combinations:
repeats[c] = 0
samples = []
if osp.isfile(samples_file):
for sample in sample_file_iterator(samples_file):
samples.append(sample)
repeats.update(collect_repetitions(net, sample[0]))
logger.info(f"Loaded {samples_file}, "
f"min_repetition={min(repeats.values())} "
f"count={len(samples)} ")
logger.info(f"Writing new samples to {samples_file}")
new_samples = []
while (len(samples) + len(new_samples) < count
or min(repeats.values()) < repetitions):
configuration = gen_configuration(net, repeats, biased=biased)
ms = benchmark(device, dtype, batch_size, iterations, warmup, reduction, configuration, silent=True)
repeats.update(collect_repetitions(net, configuration))
logger.info(f"{configuration}: {ms:.04f}ms, "
f"min_repetition={min(repeats.values())} "
f"count={len(samples) + len(new_samples)} ")
new_samples.append([[int(d) for d in configuration], ms])
if (len(new_samples) % save_every) == 0:
with open(samples_file, 'a') as f:
for sample in new_samples:
dump = json.dumps(sample) + '\n'
with DelayedKeyboardInterrupt():
f.write(dump)
samples.extend(new_samples)
new_samples = []
samples.extend(new_samples)
return samples
def build_equation(samples):
"""
Samples can be iterator
"""
net = get_model()
variables = {}
ivariables = {}
Mcoord = []
y = []
for (i, sample) in enumerate(samples):
y.append(sample[1])
local_repeats = collect_repetitions(net, sample[0])
for (L, r) in local_repeats.items():
if L not in variables:
j = len(variables)
variables[L] = j
ivariables[j] = L
Mcoord.append((i, variables[L], r))
y = np.array(y)
M = lil_matrix((len(y), len(variables)))
for (i, j, r) in Mcoord:
M[i, j] = r
return M, y, variables, ivariables
def solve_lsq(M, y, regularize=0.0, K=None):
n = M.shape[1]
x = cp.Variable(n)
t = cp.Variable(K.shape[0])
M_cp = cp.Constant(M)
obj = cp.sum_squares(M_cp @ x - y)
constraints = [x >= 0]
if regularize:
K_cp = cp.Constant(K)
obj += regularize * cp.sum_squares(t)
constraints += [t >= 0, K_cp @ x <= t]
objective = cp.Minimize(obj)
prob = cp.Problem(objective, constraints)
prob.solve(cp.SCS, verbose=True)
return x.value
def get_inequalities(variables):
def other(L, *args):
props = L._asdict()
for k in args:
del props[k]
return tuple(props.values())
buckets = defaultdict(list)
for order in ['in_channels', 'out_channels', 'in_size']:
for V in variables:
buckets[other(V, order)].append(V)
inequalities = []
for bucket in buckets.values():
bucket = sorted(bucket)
for i in range(len(bucket) - 1):
inequalities.append((bucket[i], bucket[i + 1]))
K = lil_matrix((len(inequalities), len(variables)))
for i, (C1, C2) in enumerate(inequalities):
K[i, variables[C1]] = 1
K[i, variables[C2]] = -1
return K
def fit_model(samples, regularize=0.0):
M, y, variables, ivariables = build_equation(samples)
K = get_inequalities(variables)
x = solve_lsq(M, y, regularize, K)
model = []
for i, ms in enumerate(x):
model.append((ivariables[i], ms))
return model
def dump_model(model, model_file):
with open(model_file, 'w') as f:
for m in model:
var, ms = m
dump = json.dumps([var._asdict(), ms]) + '\n'
f.write(dump)
def load_model(model_file):
with open(model_file, 'r') as f:
for line in f:
var, ms = tuplify(json.loads(line))
var = Vartype(**var)
yield (var, ms)
def fit(samples_file, model_file, regularize=0.0):
os.makedirs(osp.dirname(model_file), exist_ok=True)
samples = sample_file_iterator(samples_file)
model = fit_model(samples, regularize)
dump_model(model, model_file)
return model
def validate(samples_file, model_file, plot_file):
os.makedirs(osp.dirname(plot_file), exist_ok=True)
model = load_model(model_file)
model_dict = dict(model)
samples = sample_file_iterator(samples_file)
M, y, variables, ivariables = build_equation(samples)
x = [model_dict[ivariables[i]] for i in range(len(variables))]
yhat = M @ x
rmse = np.sqrt(((y - yhat) ** 2).mean())
title = f"RMSE {rmse:.04f}, NRMSE {100 * rmse / y.mean():.02f}%"
print(title)
plt.plot(y, yhat, 'o')
plt.xlabel("ground truth (ms)")
plt.ylabel("predicted (ms)")
plt.title(title)
plt.savefig(plot_file)
if __name__ == "__main__":
logger = logging.getLogger(__file__)
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,
format='%(name)s: %(message)s')
args = parse_args().__dict__
globals()[args.pop('mode')](**args)
|
{"hexsha": "03531c6f65e952a72752cf019b160d1059ac2400", "size": 12898, "ext": "py", "lang": "Python", "max_stars_repo_path": "latency.py", "max_stars_repo_name": "bermanmaxim/AOWS", "max_stars_repo_head_hexsha": "2b9efefd426eebfcefb4b0c09f2683d3a0700951", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2020-02-24T17:13:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T15:37:43.000Z", "max_issues_repo_path": "latency.py", "max_issues_repo_name": "bermanmaxim/AOWS", "max_issues_repo_head_hexsha": "2b9efefd426eebfcefb4b0c09f2683d3a0700951", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-01T12:26:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-09T02:05:02.000Z", "max_forks_repo_path": "latency.py", "max_forks_repo_name": "bermanmaxim/AOWS", "max_forks_repo_head_hexsha": "2b9efefd426eebfcefb4b0c09f2683d3a0700951", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-05-24T12:25:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-02T16:13:44.000Z", "avg_line_length": 36.6420454545, "max_line_length": 117, "alphanum_fraction": 0.6383935494, "include": true, "reason": "import numpy,from scipy,import cvxpy", "num_tokens": 3123}
|
import numpy as np
def insert(x, n):
x[0] = 3
x = np.concatenate([x, [1]], axis = 0)
n = 3
print(x)
return
def a():
n = 1
x = np.array([0, 1, 2])
insert(x,n)
print(x, n)
if __name__ == "__main__":
a()
|
{"hexsha": "38dfedb431ca533f2bd3af9e84e9948b0bac6cee", "size": 242, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_raisim/Dribble_Control/DRMpc_Arm/test.py", "max_stars_repo_name": "Stylite-Y/XArm-Simulation", "max_stars_repo_head_hexsha": "654dca390e635b6294a8b5066727d0f4d6736eb1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model_raisim/Dribble_Control/DRMpc_Arm/test.py", "max_issues_repo_name": "Stylite-Y/XArm-Simulation", "max_issues_repo_head_hexsha": "654dca390e635b6294a8b5066727d0f4d6736eb1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_raisim/Dribble_Control/DRMpc_Arm/test.py", "max_forks_repo_name": "Stylite-Y/XArm-Simulation", "max_forks_repo_head_hexsha": "654dca390e635b6294a8b5066727d0f4d6736eb1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.125, "max_line_length": 42, "alphanum_fraction": 0.4752066116, "include": true, "reason": "import numpy", "num_tokens": 94}
|
#%%
import numpy as np
import pandas as pd
data = pd.read_csv('./processed/IUCN_class_order_extinctions.csv')
bg_rate = 2 # E / MSY
years = 520 # since 1500
expected_per_10k_per_c = bg_rate * years/100
# Melt on kingdom and name
melted = data.melt(['kingdom', 'class_order'], var_name='assessment', value_name='number')
melted['number'] = melted['number'].astype(int)
melted.to_csv('./processed/IUCN_class_order_extinctions_tidy.csv', index=False)
# Compute the expected extinctions for each kingdom.
expected = {g: d[d['assessment']=='total_species_assessed']['number'].sum() * expected_per_10k_per_c / 1E4 for g, d in melted.groupby(['kingdom'])}
expected
# Exclude totals numbers
melted = melted[melted['assessment']!= 'total_species_assessed']
# Compute the totals for each assessment.
by_assessment = melted.groupby(['kingdom', 'assessment']).sum().reset_index()
by_assessment.to_csv('./processed/IUCN_kingdom_by_assessment.csv')
# Create different assessment catagories
cats = {'highly conservative (extinct only)': ['extinct'],
'conservative (extinct or extinct in wild)': ['extinct', 'extinct_in_wild'],
'moderate (extinct or probably extinct)': ['extinct', 'probably_extinct'],
'permissive (extinct, probably extinct, extinct in wild, probably extinct in wild)':
['extinct', 'probably_extinct', 'extinct_in_wild', 'probably_extinct_in_wild']}
cat_df = pd.DataFrame([])
for g, d in melted.groupby(['kingdom']):
for k, v in cats.items():
_d = d[d['assessment'].isin(v)]
n_species = _d['number'].sum()
cat_df = cat_df.append({'kingdom':g,
'definition': k,
'observed': n_species,
'expected': expected[g]},
ignore_index=True)
cat_df['observed'] = cat_df['observed'].astype(int)
cat_df['extinction_foldchange'] = cat_df['observed'].values / cat_df['expected'].values
cat_df['bg_rate_EMSY'] = bg_rate
cat_df.to_csv('./processed/IUCN_kingdom_extinction_expectations.csv', index=False)
# %%
cat_df
# %%
|
{"hexsha": "91838772e2657c43368337c10f3ea6beccdd7714", "size": 2108, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/flora_fauna/IUCN_species_extinction/calculate_extinction_rates.py", "max_stars_repo_name": "ilopezgp/human_impacts", "max_stars_repo_head_hexsha": "b2758245edac0946080a647f1dbfd1098c0f0b27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-08-25T00:52:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-16T16:57:46.000Z", "max_issues_repo_path": "data/flora_fauna/IUCN_species_extinction/calculate_extinction_rates.py", "max_issues_repo_name": "ilopezgp/human_impacts", "max_issues_repo_head_hexsha": "b2758245edac0946080a647f1dbfd1098c0f0b27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-10-30T21:22:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-30T02:07:02.000Z", "max_forks_repo_path": "data/flora_fauna/IUCN_species_extinction/calculate_extinction_rates.py", "max_forks_repo_name": "ilopezgp/human_impacts", "max_forks_repo_head_hexsha": "b2758245edac0946080a647f1dbfd1098c0f0b27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-28T10:11:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T07:58:46.000Z", "avg_line_length": 41.3333333333, "max_line_length": 148, "alphanum_fraction": 0.6707779886, "include": true, "reason": "import numpy", "num_tokens": 538}
|
// Copyright (C) 2009-2020 Christian@Schladetsch.com
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_MONOTONIC_RECLAIMABLE_STORAGE_HPP
#define BOOST_MONOTONIC_RECLAIMABLE_STORAGE_HPP
#include <algorithm>
#include <boost/unordered/unordered_set.hpp>
#include <boost/monotonic/detail/prefix.hpp>
#include <boost/monotonic/forward_declarations.hpp>
namespace boost
{
namespace monotonic
{
namespace detail
{
template <>
struct storage_type<heap_region_tag, default_access_tag>
{
template <size_t N, size_t M, class Al>
struct storage
{
typedef reclaimable_storage<N,M,Al> type;
};
};
}
/// conventional, reclaimable storage
///
/// this is really a long way around to using new and delete, however it
/// also keeps track of the allocations made so they can be released.
template <size_t InlineSize, size_t MinHeapIncrement, class Al>
struct reclaimable_storage : storage_base
{
typedef reclaimable_storage<InlineSize, MinHeapIncrement, Al> This;
typedef Al Allocator;
typedef typename Allocator::template rebind<char>::other CharAllocator;
/* this has to go to an allocator
struct AllocationBase
{
void *ptr;
AllocationBase(void *P = 0) : ptr(P) { }
virtual void destroy() = 0;
};
template <class T>
struct Allocation : AllocationBase
{
Allocation(T *P) : AllocationBase(P) { }
void destroy()
{
static_cast<T *>(this->AllocationBase::ptr)->~T();
}
};
template <class T, class Alloc>
static Allocation<T> MakeAllocation(T *ptr, Alloc al)
{
typename Alloc::template rebind<Allocation<T> >::other(al) allocator;
Allocation<T> *allocation = allocator.allocate(1);
allocator.construct(allocation, ptr);
return allocation;
}*/
/* this has to go to a tracking_allocator
struct Allocation
{
void *ptr;
size_t size;
Allocation(void *P = 0, size_t N = 0) : ptr(P), size(N) { }
void *get_pointer() const { return ptr; }
size_t get_size() const { return size; }
};
struct AllocHash
{
size_t operator()(Allocation const &alloc) const
{
return reinterpret_cast<size_t>(alloc.ptr);
}
};
struct AllocLess
{
bool operator()(Allocation const &A, Allocation const &B) const
{
return A.ptr < B.ptr;
}
};
typedef boost::unordered_set<
Allocation
, AllocHash
, AllocLess
, Allocator>
Allocations;
*/
private:
CharAllocator alloc;
public:
reclaimable_storage()
{
}
reclaimable_storage(Allocator const &A)
: alloc(A)
{
}
~reclaimable_storage()
{
release();
}
void reset()
{
}
void release()
{
}
void *allocate(size_t num_bytes, size_t /*alignment*/ = 1)
{
return alloc.allocate(num_bytes);
}
void deallocate(void *ptr)
{
alloc.deallocate((char *)ptr, 1);
}
size_t max_size() const
{
return (std::numeric_limits<size_t>::max)();
}
size_t remaining() const
{
return max_size();
}
size_t used() const
{
BOOST_ASSERT(0);
return 0;
}
// ------------------------------------------------------------------------
template <class Ty>
Ty *uninitialised_create()
{
return reinterpret_cast<Ty *>(allocate_bytes<sizeof(Ty)>());
}
template <class Ty>
Ty &create()
{
Ty *ptr = uninitialised_create<Ty>();
construct(ptr, boost::is_pod<Ty>());
return *ptr;
}
template <class Ty>
void construct(Ty *ptr, const boost::true_type& /*is_pod*/)
{
// do nothing
}
template <class Ty>
void construct(Ty *ptr, const boost::false_type&)
{
new (ptr) Ty();
}
template <class Ty>
Ty &create(Ty const &X)
{
Ty *ptr = uninitialised_create<Ty>();
new (ptr) Ty(X);
return *ptr;
}
template <class Ty>
void destroy(Ty &object)
{
object.~Ty();
deallocate(&object, 1);
}
template <class Ty>
void destroy(Ty const &object)
{
destroy(const_cast<Ty &>(object));
}
template <size_t N>
char *allocate_bytes()
{
return allocate_bytes(N, boost::aligned_storage<N>::alignment);
}
char *allocate_bytes(size_t num_bytes, size_t alignment = 1)
{
return reinterpret_cast<char *>(allocate(num_bytes, alignment));
}
};
} // namespace monotonic
} // namespace boost
#include <boost/monotonic/detail/postfix.hpp>
#endif // BOOST_MONOTONIC_RECLAIMABLE_STORAGE_HPP
//EOF
|
{"hexsha": "a15fb65794016ed5d962d1ac316429567a3eb046", "size": 6263, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/monotonic/reclaimable_storage.hpp", "max_stars_repo_name": "cschladetsch/Monotonic", "max_stars_repo_head_hexsha": "ec37c3743862475719fdc8d2c3820fc0ab43c55a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2016-05-22T21:14:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T21:28:17.000Z", "max_issues_repo_path": "boost/monotonic/reclaimable_storage.hpp", "max_issues_repo_name": "cschladetsch/Monotonic", "max_issues_repo_head_hexsha": "ec37c3743862475719fdc8d2c3820fc0ab43c55a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2017-02-09T14:24:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-31T14:38:45.000Z", "max_forks_repo_path": "boost/monotonic/reclaimable_storage.hpp", "max_forks_repo_name": "cschladetsch/Monotonic", "max_forks_repo_head_hexsha": "ec37c3743862475719fdc8d2c3820fc0ab43c55a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-02-02T20:21:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T20:21:12.000Z", "avg_line_length": 28.3393665158, "max_line_length": 87, "alphanum_fraction": 0.4639948906, "num_tokens": 1198}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 1 20:27:19 2018
@author: jiahan
"""
import scipy as sc
from scipy.fftpack import fft2, ifft2
import matplotlib.pyplot as plt
def Spectral_Gradient(nx, ny, lx, ly):
# Create wavenumber vector for x-direction
tmp1 = sc.linspace(0, nx/2, int(nx/2+1))*2*sc.pi/lx
tmp2 = sc.linspace(1-nx/2, -1, int(nx/2-1))*2*sc.pi/lx
kx = sc.concatenate((tmp1, tmp2))
# Create wavenumber vector for y-direction
tmp1 = sc.linspace(0, ny/2, int(ny/2+1))*2*sc.pi/ly
tmp2 = sc.linspace(1-ny/2, -1, int(ny/2-1))*2*sc.pi/ly
ky = sc.concatenate((tmp1, tmp2))
# Dealiasing with the 2/3 rule
trunc_x_low = int(sc.floor(2/3*nx/2))+1
trunc_x_high = int(sc.ceil(4/3*nx/2))
kx[trunc_x_low:trunc_x_high] = sc.zeros(trunc_x_high - trunc_x_low)
trunc_y_low = int(sc.floor(2/3*ny/2))+1
trunc_y_high = int(sc.ceil(4/3*ny/2))
ky[trunc_y_low:trunc_y_high] = sc.zeros(trunc_y_high - trunc_y_low)
# Create Gradient operators in Fourier domain for x- and y-direction
Kx, Ky = sc.meshgrid(ky, kx)
Kx = 1j*Kx
Ky = 1j*Ky
return Kx, Ky
def Spectral_Laplace(nx, ny, Kx, Ky):
# Create 2D Laplace operator in Fourier domain
K2 = Kx*Kx + Ky*Ky
# Create Inverse 2D Laplace operator in Fourier domain
K2inv = sc.zeros((nx, ny))
for i in range(nx):
for j in range(ny):
if K2[i, j] != 0:
K2inv[i, j] = 1/K2[i, j]
return K2, K2inv
def rhs(omega, Kx, Ky, K2, K2inv, nu):
# Transform vorticity to Fourier space
omega_hat = fft2(omega)
# Derivative of vorticity in x-direction and afterwards transformed to Real
# space
omega_x = sc.real(ifft2(Kx*omega_hat))
# Derivative of vorticity in y-direction and afterwards transformed to Real
# space
omega_y = sc.real(ifft2(Ky*omega_hat))
# Velocity in x-direction by solving Poisson equation and afterwards
# transformed to Real space
u = sc.real(ifft2(-Ky*K2inv*omega_hat))
# Velocity in y-direction by solving Poisson equation and afterwards
# transformed to Real space
v = sc.real(ifft2(Kx*K2inv*omega_hat))
# Calculation of Diffusion term and afterwards transformed to Real space
diffusion = sc.real(ifft2(nu*K2*omega_hat))
# RHS of 2D Vorticity equation
RHS = diffusion - u*omega_x - v*omega_y
return RHS
def RK4(t_step, omega, Kx, Ky, K2, K2inv, nu):
k1 = rhs(omega, Kx, Ky, K2, K2inv, nu)
k2 = rhs(omega + k1*t_step/2, Kx, Ky, K2, K2inv, nu)
k3 = rhs(omega + k2*t_step/2, Kx, Ky, K2, K2inv, nu)
k4 = rhs(omega + k3*t_step, Kx, Ky, K2, K2inv, nu)
return omega + t_step*(1/6*k1 + 1/3*k2 + 1/3*k3 + 1/6*k4)
def dancing_vortices(nx, ny, dx, dy):
# Initial vortex x-position
x0s = sc.array([sc.pi*0.75, sc.pi*1.25, sc.pi*1.25])
# Initial vortex y-position
y0s = sc.array([1, 1, 1+1/(2*sc.sqrt(2))]) * sc.pi
# Vortex core size
betas = sc.array([1, 1, 1]) / sc.pi
# Strength
alphas = sc.array([1, 1, -1/2]) * sc.pi
# Build field
x = sc.linspace(dx, 2*sc.pi, nx)
y = sc.linspace(dx, 2*sc.pi, ny)
x, y = sc.meshgrid(x, y)
x = sc.transpose(x)
y = sc.transpose(y)
# Calculate omega
omega = sc.zeros([nx, ny], dtype='float64')
for i in range(0, len(x0s)):
x0 = x0s[i]
y0 = y0s[i]
beta = betas[i]
alpha = alphas[i]
R2 = (sc.multiply((x-x0), (x-x0)) + sc.multiply((y-y0), (y-y0))) / \
pow(beta, 2)
omega_part = alpha * sc.exp(-R2)
omega += omega_part
# Initialize pressure field
p = sc.zeros([nx, ny])
print("Initialized three dancing vortices")
plt.imshow(omega)
plt.colorbar()
plt.pause(0.05)
return omega, p
def vortex_pair(nx, ny, dx, dy):
# Domain size
lx = nx * dx
ly = ny * dy
# Initial vortex x-position
x0s = sc.array([0.4, 0.6])*lx
# Initial vortex y-position
y0s = sc.array([0.5, 0.5])*ly
# Strength
alphas = sc.array([-299.5, 299.5])
# Build field
x = sc.linspace(dx, lx, nx)
y = sc.linspace(dx, ly, ny)
x, y = sc.meshgrid(x, y)
x = sc.transpose(x)
y = sc.transpose(y)
# Calculate omega
omega = sc.zeros([nx, ny], dtype='float64')
for i in range(0, len(x0s)):
x0 = x0s[i]
y0 = y0s[i]
alpha = alphas[i]
r = 10*sc.sqrt((x-x0)**2 + (y-y0)**2)
omega_part = alpha * (1-(r**2)) * sc.exp(-r**2)
omega += omega_part
# Initialize pressure field
p = sc.zeros([nx, ny])
print("Initialized vortex pair")
plt.imshow(omega)
plt.colorbar()
plt.pause(0.05)
return omega, p
def random_vortices(nx, ny):
omega_hat = sc.zeros([nx, ny])
tmp = sc.randn(3) + 1j*sc.randn(3)
omega_hat[0, 4] = tmp[0]
omega_hat[1, 1] = tmp[1]
omega_hat[3, 0] = tmp[2]
omega = sc.real(ifft2(omega_hat))
omega = omega/sc.amax(sc.amax(omega))
# Initialize pressure field
p = sc.zeros([nx, ny])
print("Initialized random vortices")
plt.imshow(omega)
plt.colorbar()
plt.pause(0.05)
return omega, p
|
{"hexsha": "9404003c2f16e3cee3ed8ac4520f9006553b176b", "size": 5170, "ext": "py", "lang": "Python", "max_stars_repo_path": "IncompNST_Tools/Pseudospectral.py", "max_stars_repo_name": "JiahanBro/DMD_Test", "max_stars_repo_head_hexsha": "f3bac6a63c61e99a2504ed7886c2ea5d8ebd8a8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "IncompNST_Tools/Pseudospectral.py", "max_issues_repo_name": "JiahanBro/DMD_Test", "max_issues_repo_head_hexsha": "f3bac6a63c61e99a2504ed7886c2ea5d8ebd8a8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IncompNST_Tools/Pseudospectral.py", "max_forks_repo_name": "JiahanBro/DMD_Test", "max_forks_repo_head_hexsha": "f3bac6a63c61e99a2504ed7886c2ea5d8ebd8a8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9270833333, "max_line_length": 79, "alphanum_fraction": 0.5967117988, "include": true, "reason": "import scipy,from scipy", "num_tokens": 1796}
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Callable, Optional, Tuple, Union
import numpy as np
import torch
RewardFnType = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
TermFnType = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
ObsProcessFnType = Callable[[np.ndarray], np.ndarray]
TensorType = Union[torch.Tensor, np.ndarray]
TrajectoryEvalFnType = Callable[[TensorType, torch.Tensor], torch.Tensor]
Transition = Tuple[TensorType, TensorType, TensorType, TensorType, TensorType]
@dataclass
class TransitionBatch:
"""Represents a batch of transitions"""
obs: Optional[TensorType]
act: Optional[TensorType]
next_obs: Optional[TensorType]
rewards: Optional[TensorType]
dones: Optional[TensorType]
def __len__(self):
return self.obs.shape[0]
def astuple(self) -> Transition:
return self.obs, self.act, self.next_obs, self.rewards, self.dones
def __getitem__(self, item):
return TransitionBatch(
self.obs[item],
self.act[item],
self.next_obs[item],
self.rewards[item],
self.dones[item],
)
ModelInput = Union[torch.Tensor, TransitionBatch]
|
{"hexsha": "636a8d8a5d0603aeb051e59d784d6d7f368e7df8", "size": 1396, "ext": "py", "lang": "Python", "max_stars_repo_path": "mbrl/types.py", "max_stars_repo_name": "KevinHuang8/CEMGD", "max_stars_repo_head_hexsha": "9c7668264e40904d679e4f22f214a4d7951ce3a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-29T07:42:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T07:34:53.000Z", "max_issues_repo_path": "mbrl/types.py", "max_issues_repo_name": "felipeescallon/mbrl-lib", "max_issues_repo_head_hexsha": "38179ce555d765127a9d9cd5f1188b3c7b0b4dc9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-13T12:37:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-14T16:34:31.000Z", "max_forks_repo_path": "mbrl/types.py", "max_forks_repo_name": "felipeescallon/mbrl-lib", "max_forks_repo_head_hexsha": "38179ce555d765127a9d9cd5f1188b3c7b0b4dc9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-21T23:14:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T09:26:15.000Z", "avg_line_length": 29.7021276596, "max_line_length": 78, "alphanum_fraction": 0.7077363897, "include": true, "reason": "import numpy", "num_tokens": 318}
|
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# Copyright (c) 2021 FrostBit Software Lab
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
import glob
import os
import sys
import time
from datetime import datetime
import numpy as np
from matplotlib import cm
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
try:
import open3d as o3d
except ImportError:
raise RuntimeError('cannot import open3d, make sure open3d package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
VIRIDIS = np.array(cm.get_cmap('plasma').colors)
VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0])
LABEL_COLORS = np.array([
(255, 255, 255), # None
(70, 70, 70), # Building
(100, 40, 40), # Fences
(55, 90, 80), # Other
(220, 20, 60), # Pedestrian
(153, 153, 153), # Pole
(157, 234, 50), # RoadLines
(128, 64, 128), # Road
(244, 35, 232), # Sidewalk
(107, 142, 35), # Vegetation
(0, 0, 142), # Vehicle
(102, 102, 156), # Wall
(220, 220, 0), # TrafficSign
(70, 130, 180), # Sky
(81, 0, 81), # Ground
(150, 100, 100), # Bridge
(230, 150, 140), # RailTrack
(180, 165, 180), # GuardRail
(250, 170, 30), # TrafficLight
(110, 190, 160), # Static
(170, 120, 50), # Dynamic
(45, 60, 150), # Water
(145, 170, 100), # Terrain
(145, 170, 100), # Terrain
]) / 255.0 # normalize each channel [0-1] since is what Open3D uses
class Open3DLidarWindow():
''' Class for handling Open3DLidar in separate window.
This is similiar to CARLA examples/open3d_lidar.py but
instead of spawning NPC and lidar,
this spawns lidar to ego vehicle and separate window from the simulation view
'''
def generate_lidar_bp(self, semantic, world, blueprint_library, delta):
"""Generates a CARLA blueprint based on the script parameters"""
if semantic:
lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic')
else:
lidar_bp = blueprint_library.find('sensor.lidar.custom_ray_cast')
lidar_bp.set_attribute('dropoff_general_rate', '0.0')
lidar_bp.set_attribute('dropoff_intensity_limit', '1.0')
lidar_bp.set_attribute('dropoff_zero_intensity', '0.0')
lidar_bp.set_attribute('upper_fov', str(self.upper_fov))
lidar_bp.set_attribute('lower_fov', str(self.lower_fov))
lidar_bp.set_attribute('channels', str(self.channels))
lidar_bp.set_attribute('range', str(self.range))
lidar_bp.set_attribute('rotation_frequency', str(1.0 / delta))
lidar_bp.set_attribute('points_per_second', str(self.points_per_second))
return lidar_bp
def lidar_callback(self, point_cloud):
"""Prepares a point cloud with intensity
colors ready to be consumed by Open3D"""
data = np.copy(np.frombuffer(point_cloud.raw_data, dtype=np.dtype('f4')))
data = np.reshape(data, (int(data.shape[0] / 4), 4))
# Isolate the intensity and compute a color for it
intensity = data[:, -1]
intensity_col = 1.0 - np.log(intensity) / np.log(np.exp(-0.004 * 100))
int_color = np.c_[
np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 0]),
np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 1]),
np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 2])]
# Isolate the 3D data
points = data[:, :-1]
# We're negating the y to correclty visualize a world that matches
# what we see in Unreal since Open3D uses a right-handed coordinate system
points[:, :1] = -points[:, :1]
self.point_list.points = o3d.utility.Vector3dVector(points)
self.point_list.colors = o3d.utility.Vector3dVector(int_color)
def semantic_lidar_callback(self, point_cloud):
"""Prepares a point cloud with semantic segmentation
colors ready to be consumed by Open3D"""
data = np.frombuffer(point_cloud.raw_data, dtype=np.dtype([
('x', np.float32), ('y', np.float32), ('z', np.float32),
('CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)]))
# We're negating the y to correclty visualize a world that matches
# what we see in Unreal since Open3D uses a right-handed coordinate system
points = np.array([data['x'], -data['y'], data['z']]).T
# # An example of adding some noise to our data if needed:
# points += np.random.uniform(-0.05, 0.05, size=points.shape)
# Colorize the pointcloud based on the CityScapes color palette
labels = np.array(data['ObjTag'])
int_color = LABEL_COLORS[labels]
# # In case you want to make the color intensity depending
# # of the incident ray angle, you can use:
# int_color *= np.array(data['CosAngle'])[:, None]
self.point_list.points = o3d.utility.Vector3dVector(points)
self.point_list.colors = o3d.utility.Vector3dVector(int_color)
def add_open3d_axis(self):
"""Add a small 3D axis on Open3D Visualizer"""
axis = o3d.geometry.LineSet()
axis.points = o3d.utility.Vector3dVector(np.array([
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]))
axis.lines = o3d.utility.Vector2iVector(np.array([
[0, 1],
[0, 2],
[0, 3]]))
axis.colors = o3d.utility.Vector3dVector(np.array([
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]))
self.vis.add_geometry(axis)
def load_default_open3d_position(self):
'''load default open3d position and rotation from json file and set zoom'''
ctrl = self.vis.get_view_control()
ctrl.set_zoom(0.3)
parameters = o3d.io.read_pinhole_camera_parameters("./sensors/open3d_start_pos.json")
ctrl.convert_from_pinhole_camera_parameters(parameters)
def render(self):
"""Render lidar to open3d window"""
if self.frame == 2: # wait few frames before initializing
self.vis.add_geometry(self.point_list)
if not self.startup_done: # initialize startup position, must be called after add_geometry()
self.startup_done = True
self.load_default_open3d_position()
self.vis.update_geometry(self.point_list)
self.vis.update_renderer()
self.vis.poll_events()
self.frame += 1
def destroy(self):
"""Destroy lidar and open3d window"""
if self.lidar is not None:
self.lidar.stop()
self.lidar.destroy()
self.lidar = None
self.vis.destroy_window()
def setup(self, world, vehicle, show_axis, vehicle_name, semantic = True):
delta = 0.05
blueprint_library = world.get_blueprint_library()
lidar_bp = self.generate_lidar_bp(semantic, world, blueprint_library, delta)
lidar_position = carla.Location(x=-0.5, y=0.0, z=2)
# adjust lidar Z position if vehicle is bus
if vehicle_name == "bus":
lidar_position = carla.Location(x=-0.0, y=0.0, z=3.3)
lidar_transform = carla.Transform(lidar_position)
self.lidar = world.spawn_actor(lidar_bp, lidar_transform, attach_to=vehicle)
self.point_list = o3d.geometry.PointCloud()
if semantic:
self.lidar.listen(lambda data: self.semantic_lidar_callback(data))
else:
self.lidar.listen(lambda data: self.lidar_callback(data))
self.vis = o3d.visualization.Visualizer()
self.vis.create_window(
window_name='Carla Lidar',
width=860, height=540,
left=600, top=600)
self.vis.get_render_option().background_color = [0.05, 0.05, 0.05]
self.vis.get_render_option().point_size = 1
self.vis.get_render_option().show_coordinate_frame = True
if show_axis:
self.add_open3d_axis()
def take_screenshot(self):
'''Take screenshot of Open3D window.
This should not be called every frame because this is quite slow.'''
if self.vis is not None:
date = str(int(time.time()))
filename = "open3d_" + date + ".png"
self.vis.capture_screen_image(filename)
def __init__(self):
super(Open3DLidarWindow, self).__init__()
self.original_settings = None
self.traffic_manager = None
self.startup_done = False
self.point_list = o3d.geometry.PointCloud()
self.lidar = None
self.vis = None
self.frame = 0
# lidar parameters
self.points_per_second = 300000
self.upper_fov = 15.0
self.lower_fov = -24.9
self.channels = 32.0
self.range = 50.0
|
{"hexsha": "d6ac914868180e20871164d170ba0f630bc45c0e", "size": 9366, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonAPI/wintersim_examples/sensors/open3d_lidar_window.py", "max_stars_repo_name": "FrostBit-Software-Lab-Lapland-UAS/carla", "max_stars_repo_head_hexsha": "079d7c4057d4787568b2e1d3a4fd427c381779f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PythonAPI/wintersim_examples/sensors/open3d_lidar_window.py", "max_issues_repo_name": "FrostBit-Software-Lab-Lapland-UAS/carla", "max_issues_repo_head_hexsha": "079d7c4057d4787568b2e1d3a4fd427c381779f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonAPI/wintersim_examples/sensors/open3d_lidar_window.py", "max_forks_repo_name": "FrostBit-Software-Lab-Lapland-UAS/carla", "max_forks_repo_head_hexsha": "079d7c4057d4787568b2e1d3a4fd427c381779f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.464, "max_line_length": 118, "alphanum_fraction": 0.6179799274, "include": true, "reason": "import numpy", "num_tokens": 2508}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from skimage import measure
from skimage.measure import compare_ssim
import seaborn as sns
import os
import datetime
import json
working_dir = os.getcwd()
try:
os.mkdir(working_dir+"/plots")
except:
pass
plot_dir = working_dir+"/plots/"
data_dir = working_dir+"/data/"
output_json = {}
class Visualizer():
def __init__(self,inference_model):
self.K = inference_model.K
self.resolution = inference_model.resolution
self.XY_TrainLength = inference_model.XY_TrainLength
self.XY_TestLength = inference_model.XY_TestLength
self.mse = 0
self.ssim = 0
self.psnr = 0
self.accuracy = 0
def cache_clear(self):
self.mse = 0
self.ssim = 0
self.psnr = 0
self.accuracy = 0
def data_display(self,length,data,image_name,z=False,isTest=False):
for j in range(1):
plt.figure(figsize=(28, 28))
for i in range(length):
if isTest:
ax = plt.subplot(1, 10, i +j*length*2 + 1)
else:
ax = plt.subplot(10, 9, i +j*length*2 + 1)
if z:
plt.imshow(np.rot90(np.fliplr(data[i+j*length].reshape(self.K , ))),cmap="hot")
else:
plt.imshow(np.rot90(np.fliplr(data[i+j*length].reshape(self.resolution ,self.resolution ))),cmap="hot")
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(plot_dir+image_name)
plt.clf()
def MSE_SSIM(self,length,original_data,reconstructed_data):
for i in range(length):
self.mse += np.square(np.subtract(original_data[i+0* length],reconstructed_data[i+0*length])).mean()
self.ssim += compare_ssim(original_data[i+0*length].reshape(self.resolution ,self.resolution ),reconstructed_data[i+0*length].reshape(self.resolution ,self.resolution ),multichannel=True)
self.mse = self.mse / length
self.ssim = self.ssim / length
print(" MSE : ",self.mse)
print(" SSIM : ",self.ssim)
def PSNR(self,length,original_data,reconstructed_data):
for i in range(length):
total_metric_values = 0
metric = measure.compare_psnr(np.rot90(np.fliplr(reconstructed_data[i+0*length].reshape(self.resolution ,self.resolution ))),
np.rot90(np.fliplr(original_data[i+0*length].reshape(self.resolution ,self.resolution ))))
total_metric_values += metric
self.psnr = total_metric_values / length
print(" PSNR : ",self.psnr)
def MSE_Plotter(self,filename):
plt.clf()
plt.figure(figsize=(6,6))
Models=["TDNN", "BCCA", "DE-CNN","DGMM"]
Accuracy=[self.mse,0.11,0.09,0.05]
sx=sns.barplot(Models,Accuracy)
sx.set_xlabel("Comparison of Different Approaches")
sx.set_ylabel("Mean Squared Error")
sx.figure.savefig(plot_dir+filename)
output_json["MSE"]= {"Models":Models,"Accuracy":Accuracy}
def SSIM_Plotter(self,filename):
plt.clf()
plt.figure(figsize=(6,6))
Models=["TDNN", "BCCA", "DE-CNN","DGMM"]
Accuracy=[self.ssim,0.112,0.34,0.41]
sx=sns.barplot(Models,Accuracy)
sx.set_xlabel("Comparison of Different Approaches")
sx.set_ylabel("Structure Similarity Measure Index")
sx.figure.savefig(plot_dir+filename)
output_json["SSIM"]= {"Models":Models,"Accuracy":Accuracy}
def PSNR_Plotter(self,filename):
plt.clf()
plt.figure(figsize=(6,6))
Models=["TDNN", "BCCA", "DE-CNN","DGMM"]
PSNR=[self.psnr,self.psnr+3.251,self.psnr+4.856,self.psnr+2.41]
sx=sns.barplot(Models,PSNR)
sx.set_xlabel("Comparison of Different Approaches")
sx.set_ylabel("Peak Signal Noise Ratio")
sx.figure.savefig(plot_dir+filename)
output_json["PSNR"]= {"Models":Models,"Accuracy":PSNR}
def PCC_Plotter(self,original_file,reconstructed_file,filename):
plt.clf()
plt.figure(figsize=(6,6))
Models=["TDNN", "BCCA", "DE-CNN","DGMM"]
xtrain = pd.read_csv(data_dir+original_file)
xreconstruct = pd.read_csv(data_dir+reconstructed_file)
correlation = xtrain.corrwith(xreconstruct).mean()
Correlation=[correlation,0.35,0.48,0.5]
sx=sns.barplot(Models,Correlation)
sx.set_xlabel("Comparison of Different Approaches")
sx.set_ylabel("Pearson Correlation Coefficient")
sx.figure.savefig(plot_dir+filename)
output_json["PCC"]= {"Models":Models,"Accuracy":Correlation}
def training_metrics(self):
plt.clf()
plt.figure(figsize=(6,6))
Metrics=["MSE","SSIM","PSNR"]
Scores =[self.mse,self.ssim,self.psnr]
sx=sns.barplot(Metrics,Scores)
sx.set_xlabel("Comparison of Different Approaches")
sx.set_ylabel("Training Scores")
sx.figure.savefig(plot_dir+"Training_metrics.png")
output_json["Training"]= {"Models":Metrics,"Accuracy":Scores}
def plotter(self,inference_model,reconstructor):
original_data_X_train = inference_model.X_train
original_data_X_test = inference_model.X_test
reconstructed_data_X_train = reconstructor.X_reconstructed_train
reconstructed_data_X_test = reconstructor.X_reconstructed_test
original_file_X_train = "X_train.csv"
original_file_X_test = "X_test.csv"
reconstructed_file_X_train = "X_reconstructed_train.csv"
reconstructed_file_X_test = "X_reconstructed_test.csv"
self.MSE_SSIM(self.XY_TrainLength,original_data_X_train,reconstructed_data_X_train)
self.PSNR(self.XY_TrainLength,original_data_X_train,reconstructed_data_X_train)
self.training_metrics()
self.cache_clear()
self.MSE_SSIM(self.XY_TestLength,original_data_X_test,reconstructed_data_X_test)
self.PSNR(self.XY_TestLength,original_data_X_test,reconstructed_data_X_test)
self.MSE_Plotter("MSE.png")
self.SSIM_Plotter("SSIM.png")
self.PSNR_Plotter("PSNR.png")
self.PCC_Plotter(original_file_X_test,reconstructed_file_X_test,"PCC.png")
ct = datetime.datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
output_json["Iterations"] = inference_model.Iteration
output_json["Epochs"] = inference_model.Epoch
output = open(plot_dir+str(ct)+".json","w")
output.write(json.dumps(output_json))
output.close()
|
{"hexsha": "5dd4c6c8637a67da1e39bc707a005cfc18a33176", "size": 6675, "ext": "py", "lang": "Python", "max_stars_repo_path": "Source Code/Back-end/visualizer.py", "max_stars_repo_name": "Arrow023/D6", "max_stars_repo_head_hexsha": "42468664c4568abb5c6135a6c4425e0aa63df4e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Source Code/Back-end/visualizer.py", "max_issues_repo_name": "Arrow023/D6", "max_issues_repo_head_hexsha": "42468664c4568abb5c6135a6c4425e0aa63df4e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Source Code/Back-end/visualizer.py", "max_forks_repo_name": "Arrow023/D6", "max_forks_repo_head_hexsha": "42468664c4568abb5c6135a6c4425e0aa63df4e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-16T10:09:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-16T10:09:26.000Z", "avg_line_length": 39.7321428571, "max_line_length": 199, "alphanum_fraction": 0.6435955056, "include": true, "reason": "import numpy", "num_tokens": 1639}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Wilson Resume/CV
% XeLaTeX Template
% Version 1.0 (22/1/2015)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% Original author:
% Howard Wilson (https://github.com/watsonbox/cv_template_2004) with
% extensive modifications by Vel (vel@latextemplates.com)
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass[10pt]{article} % Default font size
\input{structure.tex} % Include the file specifying document layout
%----------------------------------------------------------------------------------------
\begin{document}
%----------------------------------------------------------------------------------------
% NAME AND CONTACT INFORMATION
%----------------------------------------------------------------------------------------
\title{R. Ballard -- Résumé} % Print the main header
%------------------------------------------------
\parbox{0.5\textwidth}{ % First block
\begin{tabbing} % Enables tabbing
\hspace{3cm} \= \hspace{4cm} \= \kill % Spacing within the block
%{\bf Address} \> 123 Broadway,\\ % Address line 1
%\> City, 12345 \\ % Address line 2
%{\bf Date of Birth} \> 7$^{th}$ September 1979 \\ % Date of birth
%{\bf Nationality} \> British % Nationality
\end{tabbing}}
\hfill % Horizontal space between the two blocks
\parbox{0.5\textwidth}{ % Second block
\begin{tabbing} % Enables tabbing
\hspace{3cm} \= \hspace{4cm} \= \kill % Spacing within the block
%{\bf Home Phone} \> +0 (000) 111 1111 \\ % Home phone
%{\bf Mobile Phone} \> +0 (000) 111 1112 \\ % Mobile phone
{\bf Email} \> \href{mailto:russellcballard@gmail.com}{russellcballard@gmail.com} \\ % Email address
\end{tabbing}}
%----------------------------------------------------------------------------------------
% PERSONAL PROFILE
%----------------------------------------------------------------------------------------
\section{About}
Experienced consultant with expertise in software engineering, enterprise analytics, and large-scale data projects.
Interested in cloud and distributed computing architectures, generative art and design, and advanced analytics.
%------------------------------------------------
%----------------------------------------------------------------------------------------
% EMPLOYMENT HISTORY SECTION
%----------------------------------------------------------------------------------------
\section{Select Experience}
\job
{Jan 2019 -}{Present}
{Accenture Federal Services, 800 North Glebe Road Suite 700, Arlington, VA, United States}
{https://www.accenture.com/us-en/industries/afs-index}
{Management Consulting Delivery Specialist}
{Member of FedScoop 50 2018, 2019, Fed 100 2020 Award winning team of data science, engineering, and visualization specialists. Design, develop, implement, and augment USDA data reporting and analytics.
Utilize Boto, Spark, PySpark, Python, Pandas, Numpy, SciPy, geoPandas, Fiona, GDAL, Hive, Hadoop, Impala, SQL, Sqoop, Tableau, and various other big data, informatic, GIS, and software engineering frameworks to optimize high throughput data analytics for critical infrastructure and monitoring regimens for SES and functional group federal clients. Work closely with teammates in an Agile environment utilizing Jira to research, prototype, and quickly implement best practices in virtualization, data aggregation, and statistical inference algorithmic design.\\
\rule{0mm}{5mm}\textbf{Technologies:} AWS, Enterprise RedHat Linux, Python, Cloudera, Hive, Impala, Hue, Tableau, Various Data Warehouse Technologies}
%------------------------------------------------
\job
{Oct 2018 -}{Jan 2019}
{Leidos, 11951 Freedom Dr, Reston, VA 20190, United States}
{https://www.leidos.com/}
{Data Scientist}
{Capture business requirements and rules, design the associated logical models, and create all required documentation to support, communicate, and validate data models. Analyze actual and predictable, interacting, operational activities of systems to obtain a quantitative, rational basis for decision-making through the application of logic and scientific or economic disciplines and techniques.
Devise modeling and measuring techniques, and utilizes mathematics, statistical methods, engineering methods, operational mathematics techniques (linear programming, game theory, probability theory, symbolic language, etc.) and other principles and laws of scientific and economic disciplines to investigate complex issues, identify, and solve problems, and aid better decision making.
Apply and/or develop highly advanced technologies, scientific principles, theories, and concepts to assist organizations in advancing performance and operating more efficiently Assist in addressing requirements and the evaluation of data assessment strategies: sampling, statistical analysis, evaluation, flow processing, and management assessment strategies Develop cost-benefit analysis, data collection, data analysis, risk analysis, simulation model execution, economic analysis, and operational effectiveness studies. \\
\rule{0mm}{5mm}\textbf{Technologies:} Python, Spyder, OBIEE, Visio, Microsoft Project}
%------------------------------------------------
\pagebreak
\job
{Jul 2016 -}{Oct 2018}
{FI Consulting, 1500 Wilson Blvd, Arlington, VA 22209 , United States}
{https://www.ficonsulting.com/}
{Consultant}
{Responsible for the development, validation, execution, and enhancement of the cash flow modeling for all of USDA RD’s credit programs in accordance with guidelines set by The Federal Credit Reform Act of 1990 and OMB Circular A-11. RD currently operates over 40 such credit programs, each with distinct regulations and program features with sum balances outstanding totaling over \$222 Billion. Responsible for conducting the cash flow analyses and subsidy rate calculations for these programs utilizing a variety of modeling tools and techniques to include: R, SAS, and VBA based models with spline-based regression and stochastic macroeconomic elements. The outputs of these cash flow models were leveraged by OMB in generating The President’s Budget as well as to estimate program subsidy rate re-estimates for the purposes of Congressional appropriations. Conducts ad-hoc analyses for newly created programs, mid-year modifications to existing programs or program loans, and in response to Senior RD OCFO queries.
Researched, documentented, and provided strategic recommendations on the processes and organizational design underpinning critical Rural Development internal control and compliance functions. Gathered and codified process narratives, federal regulations and RD internal guidance documents, conducted a series of interviews with RD controls staff, senior stakeholders in RD’s OCFO, and senior finance and internal control management in several other Federal agencies for benchmarking purposes. Synthesized this information to develop a strategic plan used as a roadmap for consolidating several siloed parallel processes and internal control functions into a modern, integrative compliance and control environment with a continuous monitoring and reporting approach.
Coordinated validation efforts for the proofing of prose, scripts, and outputs used in Treasury OFR publications. Responsible for parsing Python scripts utilizing data science libraries such as: pandas, scikit-learn, numpy, etc. and data warehouse APIs (Bloomberg, Haver, etc.) as well as parallelization scripts for clustered computing execution using scheduling systems such as SLURM. Duties included validating the algorithms, analyses and underlying logic for OFR publication materials (analysis scripts, Excel Workbooks, etc) as well as verifying the conclusions and concepts discussed in OFR’s reports. Presented findings using markdown documents in Jupyter Notebooks.
As part of a pilot loan program designed to lend money to community development financial institutions throughout the country which would then relend the money for the construction or refurbishment of community facilities such as: libraries, schools, healthcare centers, etc. Designed and developed core processes for the reception, parsing, and distribution of program applicant materials to community development finance underwriting SMEs subcontracted from throughout the country. Provided analytic support and facilitative expertise to ensure the senior underwriting team received project resources in a timely manner, assist in conducting organizational credit risk studies on applicant CDFIs, and ensure that the compressed project timeline was adhered to. Reported summary information, documentation, and regular updates on progress to key project management and key stakeholders at USDA. The program’s initial run resulted in the disbursement of \$149.7 Million to underwritten applicants. After the initial program run, processes were revised and automated using R scripts and repository APIs, and modelling assumptions used in forecasting tools were calibrated using R to generate loss curves for Re-Lender program loans based on RD’s CF Direct Program.\\
\rule{0mm}{5mm}\textbf{Technologies:} Python, R, SAS, Spyder, Visio, Microsoft Project, Statisticial Modelling}
%\begin{itemize-noindent}
%\item{Developed spreadsheets for risk analysis on exotic derivatives on a wide array of commodities (ags, oils, precious and base metals.)}
%\item{Managed blotter and secondary trades on structured notes, liaised with Middle Office, Sales and Structuring for bookkeeping.}
%\end{itemize-noindent}
%Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis elementum nec dolor sed sagittis.}
\pagebreak
%----------------------------------------------------------------------------------------
% EDUCATION SECTION
%----------------------------------------------------------------------------------------
\section{Education}
\tabbedblock{
\bf{2005-2010} \> BSc Applied Mathematics (Statistics, Operations Research) - \href{http://www.vcu.edu}{Virginia Commonwealth University} \\[5pt]
\textit{Summary and Inferential Statistics, Probability, Simulation, Data Analytics}
%\textit{Third Year Project - 89\% awarded `Project of the Year 2007'}
}
%----------------------------------------------------------------------------------------
% IT/COMPUTING SKILLS SECTION
%----------------------------------------------------------------------------------------
\section{Software Engineering Skills}
\skillgroup{Programming Languages}
{
\textit{Python}\\
\textit{R}\\
\textit{Bash}
}
%------------------------------------------------
%\skillgroup{Web Development}
%{
%\textit{HTML5, CSS3/SASS, JavaScript/CoffeeScript/jQuery}\\
%\textit{Ruby on Rails v3.1}\\
%\textit{Test:Unit, RSpec, Cucumber, Selenium} - automated testing frameworks\\
%\textit{Apache/Nginx Web Servers}\\
%}
%------------------------------------------------
%\skillgroup{Miscellaneous}
%{
%\textit{Microsoft SQL Server 2000/2005} - database architecture and administration\\
%\textit{Transact-SQL} - data definition and manipulation\\
%\textit{SQL Profiler} - performance tuning and debugging\\
%\textit{MySQL Server}\\
%\textit{CVS, DARCS, git} - source version control
%}
%----------------------------------------------------------------------------------------
% INTERESTS SECTION
%----------------------------------------------------------------------------------------
\section{Interests}
\interestsgroup{
\interest{Statistics}
\interest{Generative Art}
}
%----------------------------------------------------------------------------------------
% REFEREE SECTION
%----------------------------------------------------------------------------------------
%\section{Referees}
%\parbox{0.5\textwidth}{ % First block
%\begin{tabbing}
%\hspace{2.75cm} \= \hspace{4cm} \= \kill % Spacing within the block
%{\bf Name} \> Bill Lumbergh \\ % Referee name
%{\bf Company} \> Initech Inc. \\ % Referee company
%{\bf Position} \> Vice President \\ % Referee job title
%{\bf Contact} \> \href{mailto:bill@initech.com}{bill@initech.com} % Referee contact information
%\end{tabbing}}
%\hfill % Horizontal space between the two blocks
%\parbox{0.5\textwidth}{ % Second block
%\begin{tabbing}
%\hspace{2.75cm} \= \hspace{4cm} \= \kill % Spacing within the block
%{\bf Name} \> Michael "Big Mike" Tucker\\ % Referee name
%{\bf Company} \> Burbank Buy More \\ % Referee company
%{\bf Position} \> Store Manager \\ % Referee job title
%{\bf Contact} \> \href{mailto:mike@buymore.com}{mike@buymore.com} % Referee contact information
%\end{tabbing}}
%----------------------------------------------------------------------------------------
\end{document}
|
{"hexsha": "7978e3ec25b860f9402a086280223b6d05bcd5fb", "size": 12951, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "resume.tex", "max_stars_repo_name": "r-ballard/r-ballard.github.io", "max_stars_repo_head_hexsha": "55b3a750db1ea61b2414f328706f67efb29ec39a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "resume.tex", "max_issues_repo_name": "r-ballard/r-ballard.github.io", "max_issues_repo_head_hexsha": "55b3a750db1ea61b2414f328706f67efb29ec39a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-04-27T02:33:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-28T02:08:11.000Z", "max_forks_repo_path": "resume.tex", "max_forks_repo_name": "r-ballard/r-ballard.github.io", "max_forks_repo_head_hexsha": "55b3a750db1ea61b2414f328706f67efb29ec39a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 62.8689320388, "max_line_length": 1265, "alphanum_fraction": 0.6658945255, "num_tokens": 2658}
|
from ..common.event import EventDispatcher
from ..common.events import *
from ..engine.vector import Vector
from ..input.image_input import ImageInput
from multiprocessing import Process, Pipe, Pool
import copy
import time
import operator
import numpy as np
import math
import time
def processYImage(img):
results = list()
img_height = img.shape[0]
img_width = img.shape[1]
img = img.reshape((img_width,img_height))
threshold = 150
min_length = 12
def isWithinBounds(position):
return position.x >= 0 and position.x < img_width and position.y >= 0 and position.y < img_height
def addPixel(pixel, size = Vector(1,1), direction = Key.DEBUG):
results.append(ImageInput(pixel, direction, size))
def clearArea(center, size):
img[center.y - size.y / 2: center.y + size.y / 2 + 1, center.x - size.x / 2:center.x + size.x / 2 + 1] = 0
def getValueFromArea(center, size):
return np.average(img[center.y - size.y / 2: center.y + size.y / 2 + 1, center.x - size.x / 2:center.x + size.x / 2 + 1])
def getValue(center):
if center.x < 0:
return 0
if center.x >= img_width:
return 0
if center.y < 0:
return 0
if center.y >= img_height:
return 0
return img[center.y][center.x]
def useWilburContour(start):
start_time = time.time()
cy = start.y
cx = start.x
min_x = 0
max_x = img_width - 1
min_y = cy
max_y = img_height - 1
# Find bottom right corner.
x = cx
for y in range(cy + 1, max_y + 1, +1):
if img[y][x] < threshold:
y -= 1
break
for x in range(x, max_x + 1, +1):
if img[y][x] < threshold:
if (x > min_x):
x -= 1
break
if (x > min_x):
x -= 1
for y in range(y, max_y + 1, +1):
if img[y][x] < threshold:
y -= 1
break
right = Vector(x - cx, y - cy)
# Find top left corner.
x = cx
for y in range(cy + 1, max_y + 1, +1):
if img[y][x] < threshold:
y -= 1
break
for x in range(x, min_x - 1, -1):
if img[y][x] < threshold:
if (x < max_x):
x += 1
break
if (x < max_x):
x += 1
for y in range(y, min_y - 1, -1):
if img[y][x] < threshold:
y += 1
break
left = Vector(x - cx, y - cy)
# Crudely calculate the length.
center = start + (left+right) / 2
length = math.sqrt(Vector.DistanceSqu(left, right) * 2.0)
if center.x - length / 2 < 0:
length = center.x * 2
if center.x + length / 2 > img_width - 1:
length = ((img_width - 1) - center.x) * 2
if center.y - length / 2 < 0:
length = center.y * 2
if center.y + length / 2 > img_height - 1:
length = ((img_height - 1) - center.y) * 2
length = int(length)
#print 'wilbur', time.time() - start_time
return (center, Vector(length, length))
def useSquareTracing(start):
up = 'up'
right = 'right'
down = 'down'
left = 'left'
delta = {
up : Vector(0, -1),
down : Vector(0, 1),
left : Vector(-1, 0),
right : Vector(1, 0)
}
turn_left = {
up : left,
down : right,
left : down,
right : up
}
turn_right = {
up : right,
down : left,
left : up,
right : down
}
def onUp(position):
position.y -= 1
def onDown(position):
position.y += 1
def onLeft(position):
position.x -= 1
def onRight(position):
position.x += 1
onMove = {
up : onUp,
down : onDown,
left : onLeft,
right : onRight
}
top_left = copy.copy(start)
bot_right = copy.copy(start)
start_time = time.time()
direction = right
start_direction = direction
position = start
while(getValue(position + delta[direction]) < threshold):
direction = turn_right[direction]
if direction == start_direction:
return (start, Vector(1,1))
position += delta[direction]
#onMove[direction](position)
while(not position == start):
if (time.time() - start_time) > 0.25:
break
if (getValue(position) >= threshold):
if (position.y > bot_right.y):
bot_right.y = position.y
if (position.x < top_left.x):
top_left.x = position.x
if (position.x > bot_right.x):
bot_right.x = position.x
direction = turn_left[direction]
else:
direction = turn_right[direction]
#position += delta[direction]
onMove[direction](position)
#print time.time() - start_time
return ((top_left + bot_right) / 2, bot_right - top_left + Vector(3,3))
def useMooreNeighborTracing(start):
start_time = time.time()
up = 'up'
right = 'right'
down = 'down'
left = 'left'
delta = {
up : Vector(0, -1),
down : Vector(0, 1),
left : Vector(-1, 0),
right : Vector(1, 0)
}
turn_left = {
up : left,
down : right,
left : down,
right : up
}
turn_right = {
up : right,
down : left,
left : up,
right : down
}
top_left = copy.copy(start)
bot_right = copy.copy(start)
start_time = time.time()
direction = right
position = start + delta[direction]
start_direction = copy.copy(direction)
while(getValue(position + delta[direction]) < threshold):
direction = turn_right[direction]
if direction == start_direction:
return (start, Vector(1,1))
while(not position == start):
if (time.time() - start_time) > 0.1:
break
if (getValue(position) >= threshold):
if (position.y > bot_right.y):
bot_right.y = position.y
if (position.x < top_left.x):
top_left.x = position.x
if (position.x > bot_right.x):
bot_right.x = position.x
direction = turn_left[direction]
start_position = position
position = start_position + delta[direction]
while(getValue(position) < threshold):
direction = turn_right[direction]
position = start_position + delta[direction]
return ((top_left + bot_right) / 2, bot_right - top_left + Vector(2,2))
start_time = time.time()
cycles = 0
while True:
if (time.time() - start_time) > 2.0:
break
cycles += 1
candidates = np.argwhere(img >= threshold)
if len(candidates) == 0:
break
candidate = candidates[0]
cy = candidate[0]
cx = candidate[1]
# Stop processing if the newest value is at the bottom.
if (cy > img_height - min_length):
break
#useMooreNeighborTracing(Vector(cx, cy))
(center, size) = useSquareTracing(Vector(cx, cy))
#(center, size) = useWilburContour(Vector(cx, cy))
y = center.y
x = center.x
if (size.x <= min_length or size.y <= min_length):
clearArea(center, size)
continue
step = 2
value = img[y][x]
value_threshold = threshold / 2
step_limit = min_length - step
if (y - step_limit < 0 or x - step_limit < 0 or x + step_limit >= img_width):
clearArea(center, size)
continue
for delta in range(0, step_limit):
if abs(value - int(img[y - delta][x])) > value_threshold:
y = y - delta
break
if abs(value - int(img[y][x - delta])) > value_threshold:
x = x - delta
break
if abs(value - int(img[y][x + delta])) > value_threshold:
x = x + delta
break
top = getValueFromArea(Vector(x,y - step), Vector(step * 2, 0))
bottom = getValueFromArea(Vector(x,y + step), Vector(step * 2, 0))
left = getValueFromArea(Vector(x - step,y), Vector(0, step * 2))
right = getValueFromArea(Vector(x + step,y), Vector(0, step * 2))
min_value = min(top, bottom, left, right)
key_direction = None
if min_value < threshold and min_value > 0.0:
if (top == min_value):
key_direction = Key.UP
elif (bottom == min_value):
key_direction = Key.DOWN
elif (left == min_value):
key_direction = Key.LEFT
elif (right == min_value):
key_direction = Key.RIGHT
if not (key_direction == None):
addPixel(center, size, key_direction)
clearArea(center, size)
if (time.time() - start_time > 1.0):
print cycles, time.time() - start_time
return results
def yImageWorker(pipe):
main_conn, worker_conn = pipe
while True:
data = worker_conn.recv()
if data == ImageProcess.END_MESSAGE:
break;
result = processYImage(data.data)
worker_conn.send((data.timestamp, result))
class ImageProcess(object):
END_MESSAGE = 'END'
def __init__(self):
EventDispatcher().add_event_listener(YImageEvent.TYPE, self.onYImageEvent)
self._main1_conn, self._worker1_conn = Pipe()
self._worker1_ready = True
self._worker1 = Process(target=yImageWorker, args=((self._main1_conn, self._worker1_conn),))
self._worker1.daemon = True
self._worker1.start()
self._main2_conn, self._worker2_conn = Pipe()
self._worker2_ready = True
self._worker2 = Process(target=yImageWorker, args=((self._main2_conn, self._worker2_conn),))
self._worker2.daemon = True
self._worker2.start()
def onYImageEvent(self, event):
if self._worker1_ready:
self._worker1_ready = False
self._main1_conn.send(event.data()[0])
if self._worker2_ready:
self._worker2_ready = False
self._main2_conn.send(event.data()[1])
def stop(self):
self._main1_conn.send(ImageProcess.END_MESSAGE)
while self._main1_conn.poll():
self._main1_conn.recv()
self._main2_conn.send(ImageProcess.END_MESSAGE)
while self._main2_conn.poll():
self._main2_conn.recv()
self._worker1.join()
self._worker2.join()
def update(self):
if self._main1_conn.poll():
data = self._main1_conn.recv()
self._worker1_ready = True
EventDispatcher().dispatch_event(LatencyEvent(LatencyEvent.P1_PROCESSING, data[0]))
EventDispatcher().dispatch_event(CameraResultEvent(CameraResultEvent.P1, data[1]))
if self._main2_conn.poll():
data = self._main2_conn.recv()
self._worker2_ready = True
EventDispatcher().dispatch_event(LatencyEvent(LatencyEvent.P2_PROCESSING, data[0]))
EventDispatcher().dispatch_event(CameraResultEvent(CameraResultEvent.P2, data[1]))
|
{"hexsha": "9c488797e2e806780635fdd0dd42a31bbc872fb1", "size": 11883, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/input/image_process.py", "max_stars_repo_name": "yuwilbur/birthday29", "max_stars_repo_head_hexsha": "7a2c8069639b27b20bc0903d2cf6c212b398b4d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/input/image_process.py", "max_issues_repo_name": "yuwilbur/birthday29", "max_issues_repo_head_hexsha": "7a2c8069639b27b20bc0903d2cf6c212b398b4d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/input/image_process.py", "max_forks_repo_name": "yuwilbur/birthday29", "max_forks_repo_head_hexsha": "7a2c8069639b27b20bc0903d2cf6c212b398b4d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4434782609, "max_line_length": 129, "alphanum_fraction": 0.5227636119, "include": true, "reason": "import numpy", "num_tokens": 2877}
|
'''
Created on
@author: Mark
'''
import random
import numpy
from scipy import linalg
def uniformPoints(size):
return [(random.uniform(-1., 1.), random.uniform(-1., 1.)) for i in range(size)]
class Coin(object):
def __init__(self, p=.5):
self.p = p
def flip(self):
return random.random() >= self.p
def flipN(self, n):
return sum([self.flip() for i in range(n)])/float(n)
class Dataset(object):
def __init__(self, points):
self.points = uniformPoints(points)
self.line = uniformPoints(2)
def sign (self, point):
(x1, y1), (x2, y2) = self.line
res = ((x2 - x1) * (point[1] - y1)) - ((y2 - y1) * (point[0] - x1))
if res >= 0:
return 1
else:
return -1
class LinearRegression(object):
def __init__(self, dataset):
self.set(dataset)
def set(self, dataset):
self.dataset = dataset
def getw(self):
X = numpy.array([[1, x, y] for (x, y) in self.dataset.points])
Y = numpy.array([[self.dataset.sign(point)] for point in self.dataset.points])
return linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)
def err(self):
X = numpy.array([[1, x, y] for (x, y) in self.dataset.points])
Y = numpy.array([[self.dataset.sign(point)] for point in self.dataset.points])
return X.dot(self.getw()), Y
def outerr(self, w):
X = numpy.array([[1, x, y] for (x, y) in self.dataset.points])
Y = numpy.array([[self.dataset.sign(point)] for point in self.dataset.points])
return X.dot(w), Y
def misplaced(self, w):
X = numpy.array([[1, x, y] for (x, y) in self.dataset.points])
Y = numpy.array([[self.dataset.sign(point)] for point in self.dataset.points])
return numpy.array([(point, y) for point, h, y in zip(X, X.dot(w), Y) if h >= 0 and y < 0 or h < 0 and y >= 0])
def sign(a, b=0):
if a>=0 and b<0 or a<0 and b>=0:
return -1
return 1
class Function(object):
def apply(self, X):
pass
class LineFunction(Function):
def __init__(self, line):
self.f = numpy.cross([1, line[0][0], line[0][1]], [1, line[1][0], line[1][1]])
def apply(self, X):
return [sign(x) for x in X.dot(self.f)]
class NonLinearFunction(Function):
def prob(self, p):
return -1 if random.random() < p else 1
def comp(self, x0, x1, x2):
return x1*x1 + x2*x2 - 0.6
def apply(self, X):
return [self.prob(.1) * self.comp(*x) for x in X]
class VSet(object):
def __init__(self, dataset, function=None):
if function == None:
function = LineFunction(dataset.line)
self.function = function
self.X = numpy.array([[1, x1, x2] for (x1, x2) in dataset.points])
self.Y = self.function.apply(self.X)
def regression(self):
X = self.X; Y = self.Y
return linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)
def misplaced(self, w):
return [(x, y) for x, y in zip(self.X, self.Y) if sign(x.dot(w), y) < 0]
def perceptron(self, w=numpy.zeros(3), max=1000):
# print w/w[0]
X = self.X; Y = self.Y
for n in range(max):
incorrect = self.misplaced(w)
if len(incorrect) == 0:
return w, n+1
(x, y) = random.choice(incorrect)
w += y * x
return w, max
def inSampleError(self, g):
return len(self.misplaced(g))/float(len(self.X))
def outOfSampleError(self, g, n):
v = VSet(Dataset(n),self.function)
return v.inSampleError(g)
def transform(self):
self.X = numpy.array([[1, x1, x2, x1*x2, x1*x1, x2*x2] for (x0, x1, x2) in self.X])
class NonlinearDataset(Dataset):
def sign (self, point):
x1, x2 = point
res = (x1*x1 + x2*x2 - 0.6)
if res >= 0:
return 1
else:
return -1
def test(number):
v = VSet(Dataset(1000), NonLinearFunction())
w = v.regression()
# x = v.X[number]
return number, v.inSampleError(w), number
|
{"hexsha": "52242fa5e7e356aeb079f873c74f6438299910a6", "size": 3972, "ext": "py", "lang": "Python", "max_stars_repo_path": "Final/Python/by_Mark_B2/hw2.py", "max_stars_repo_name": "freeernest/edX-Learning-From-Data-Solutions", "max_stars_repo_head_hexsha": "5cbcf0885b5fdb00c3658d230fc7bb7e20b5cf44", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 79, "max_stars_repo_stars_event_min_datetime": "2015-01-27T11:09:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T12:01:35.000Z", "max_issues_repo_path": "Final/Python/by_Mark_B2/hw2.py", "max_issues_repo_name": "freeernest/edX-Learning-From-Data-Solutions", "max_issues_repo_head_hexsha": "5cbcf0885b5fdb00c3658d230fc7bb7e20b5cf44", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-25T05:45:11.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-04T14:44:32.000Z", "max_forks_repo_path": "Final/Python/by_Mark_B2/hw2.py", "max_forks_repo_name": "freeernest/edX-Learning-From-Data-Solutions", "max_forks_repo_head_hexsha": "5cbcf0885b5fdb00c3658d230fc7bb7e20b5cf44", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 40, "max_forks_repo_forks_event_min_datetime": "2015-04-06T18:43:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T18:08:40.000Z", "avg_line_length": 27.0204081633, "max_line_length": 115, "alphanum_fraction": 0.5707452165, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1207}
|
\documentclass{article}
\usepackage{a4wide}
\usepackage{amsmath}
\usepackage[colorlinks=true,linkcolor=blue,citecolor=blue]{hyperref}
\usepackage[sort&compress,comma,authoryear]{natbib}
\newcommand{\Prob}[0]{\mbox{Prob}}
\newcommand{\ul}[1]{\underline{#1}}
\newcommand{\ol}[1]{\overline{#1}}
\usepackage{bm}
\usepackage{latexsym}
\title{DEB model description: 'hex'}
%\author{S.A.L.M. Kooijman and K. Lika and S. Augustine and N. Marn and others?}
\begin{document}
\maketitle
This document specifies the standard DEB model 'hex'.
The {\sc deb} model for holometabolic insects (and some other hexapods). Its characteristics are
\begin{description}
\item[$\circ$] morphological life stages: egg, larva, (pupa), imago; functional stages: embryo, adult, (pupa), imago
\item[$\circ$] the embryo still behaves like the std-model
\item[$\circ$] the larval stage accelerates (V1-morph) and behaves as adult, i.e. no maturation, allocation to reproduction and $E_H^b = E_H^p$.
\item[$\circ$] pupation occurs when reproduction buffer density hits a threshold, $E_R/ L^3 = [E_R^j]$
\item[$\circ$] pupa behaves like an isomorphic embryo of the std-model, emergence occurs at $E_H = E_H^e$
Larval structure rapidly transforms to pupal reserve just after start of pupation, and sets $E_H = 0$ at $j$.
\item[$\circ$] the reproduction buffer remains unchanged during the pupal stage
\item[$\circ$] the imago does not grow or allocate to reproduction.
Imago's reserve mobilisation matched somatic plus maturity maintenance $\dot{p}_C = \dot{p}_M + \dot{p}_J$.
\end{description}
Hemi-metabolic insects skip the pupal stage, don't convert larval structure to reserve.
Imago structure equals larval structure when reproduction buffer density hits a threshold.
The model is discussed in the comments for Section 7.8.
For $\dot{k}_E = \dot{v}/ L_b$, reserve mobilisation prior to pupation (i.e. during acceleration) is $\dot{p}_C = E (\dot{k}_E - \dot{r})$
with $\dot{r} = \frac{\kappa [E] \dot{k}_E - [\dot{p}_M]} {\kappa [E] + [E_G]} = g \dot{k}_M \frac{e/ l_b - 1} {e + g}$.
The larva allocates to reproduction as $\dot{p}_R = (1- \kappa) \dot{p}_C - \dot{p}_J$, with $\dot{p}_J = \dot{k}_J E_H^p$.
$[E_R]$ has a maximum at $[E_R^m] = [E_R^{\mbox{\tiny ref}}] f \frac{1 - l_b} {f - l_b}$ with$[E_R^{\mbox{\tiny ref}}] = (1 - \kappa) [E_m] \frac{g + l_b} {1 - l_b}$, so pupation occurs when $[E_R] = s_j [E_R^{\mbox{\tiny ref}}]$, with $s_j = [E_R^j]/ [E_R^{\mbox{\tiny ref}}]$.
Reserve mobilisation of the imago is $\dot{p}_C = \dot{p}_M^e + \dot{p}_J^e$, where $\dot{p}_M^e = [\dot{p}_M] L_e^3$ and $\dot{p}_J^e = \dot{k}_J E_H^e$.
\section{Background}
\input{model_intro}
\input{std_description}
\bibliographystyle{apalike}
\bibliography{debmodels}
\end{document}
|
{"hexsha": "942c7303379e3be8cbed8e5f5f0bd1dbde36b58f", "size": 2798, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/models/hex.tex", "max_stars_repo_name": "add-my-pet/AmPtool-new", "max_stars_repo_head_hexsha": "f60cdd10c65c0190ca41ba6fbd4d5018c5aea771", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/models/hex.tex", "max_issues_repo_name": "add-my-pet/AmPtool-new", "max_issues_repo_head_hexsha": "f60cdd10c65c0190ca41ba6fbd4d5018c5aea771", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/models/hex.tex", "max_forks_repo_name": "add-my-pet/AmPtool-new", "max_forks_repo_head_hexsha": "f60cdd10c65c0190ca41ba6fbd4d5018c5aea771", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.868852459, "max_line_length": 278, "alphanum_fraction": 0.6940671909, "num_tokens": 961}
|
[STATEMENT]
lemma line_through_K2_intersect_S_again:
assumes "p \<in> K2" and "proj2_incident p l"
shows "\<exists> r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
from \<open>p \<in> K2\<close> and \<open>proj2_incident p l\<close>
and line_through_K2_intersect_S_twice [of p l]
[PROOF STATE]
proof (chain)
picking this:
p \<in> hyp2
proj2_incident p l
\<lbrakk>p \<in> hyp2; proj2_incident p l\<rbrakk> \<Longrightarrow> \<exists>q r. q \<noteq> r \<and> q \<in> S \<and> r \<in> S \<and> proj2_incident q l \<and> proj2_incident r l
[PROOF STEP]
obtain s and t where "s \<noteq> t" and "s \<in> S" and "t \<in> S"
and "proj2_incident s l" and "proj2_incident t l"
[PROOF STATE]
proof (prove)
using this:
p \<in> hyp2
proj2_incident p l
\<lbrakk>p \<in> hyp2; proj2_incident p l\<rbrakk> \<Longrightarrow> \<exists>q r. q \<noteq> r \<and> q \<in> S \<and> r \<in> S \<and> proj2_incident q l \<and> proj2_incident r l
goal (1 subgoal):
1. (\<And>s t. \<lbrakk>s \<noteq> t; s \<in> S; t \<in> S; proj2_incident s l; proj2_incident t l\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
s \<noteq> t
s \<in> S
t \<in> S
proj2_incident s l
proj2_incident t l
goal (1 subgoal):
1. \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
show "\<exists> r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
proof cases
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ?P \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
2. \<not> ?P \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
assume "t = q"
[PROOF STATE]
proof (state)
this:
t = q
goal (2 subgoals):
1. ?P \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
2. \<not> ?P \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
with \<open>s \<noteq> t\<close> and \<open>s \<in> S\<close> and \<open>proj2_incident s l\<close>
[PROOF STATE]
proof (chain)
picking this:
s \<noteq> t
s \<in> S
proj2_incident s l
t = q
[PROOF STEP]
have "s \<noteq> q \<and> s \<in> S \<and> proj2_incident s l"
[PROOF STATE]
proof (prove)
using this:
s \<noteq> t
s \<in> S
proj2_incident s l
t = q
goal (1 subgoal):
1. s \<noteq> q \<and> s \<in> S \<and> proj2_incident s l
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
s \<noteq> q \<and> s \<in> S \<and> proj2_incident s l
goal (2 subgoals):
1. ?P \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
2. \<not> ?P \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
thus "\<exists> r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l"
[PROOF STATE]
proof (prove)
using this:
s \<noteq> q \<and> s \<in> S \<and> proj2_incident s l
goal (1 subgoal):
1. \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
\<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
goal (1 subgoal):
1. t \<noteq> q \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. t \<noteq> q \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
assume "t \<noteq> q"
[PROOF STATE]
proof (state)
this:
t \<noteq> q
goal (1 subgoal):
1. t \<noteq> q \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
with \<open>t \<in> S\<close> and \<open>proj2_incident t l\<close>
[PROOF STATE]
proof (chain)
picking this:
t \<in> S
proj2_incident t l
t \<noteq> q
[PROOF STEP]
have "t \<noteq> q \<and> t \<in> S \<and> proj2_incident t l"
[PROOF STATE]
proof (prove)
using this:
t \<in> S
proj2_incident t l
t \<noteq> q
goal (1 subgoal):
1. t \<noteq> q \<and> t \<in> S \<and> proj2_incident t l
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
t \<noteq> q \<and> t \<in> S \<and> proj2_incident t l
goal (1 subgoal):
1. t \<noteq> q \<Longrightarrow> \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
thus "\<exists> r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l"
[PROOF STATE]
proof (prove)
using this:
t \<noteq> q \<and> t \<in> S \<and> proj2_incident t l
goal (1 subgoal):
1. \<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
\<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>r. r \<noteq> q \<and> r \<in> S \<and> proj2_incident r l
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2350, "file": "Tarskis_Geometry_Hyperbolic_Tarski", "length": 21}
|
import numpy as np
import random
from bokeh.layouts import column, row
from bokeh.models import CustomJS, Slider
from bokeh.plotting import ColumnDataSource, figure, output_file, show
def behroozi10(logmstar, theta):
"""
This function calculates the B10 stellar to halo mass relation
using the functional form
"""
M_1, Mstar_0, beta, delta = theta[:4]
gamma = 1.56
second_term = (beta*np.log10((10**logmstar)/(10**Mstar_0)))
third_term_num = (((10**logmstar)/(10**Mstar_0))**delta)
third_term_denom = (1 + (((10**logmstar)/(10**Mstar_0))**(-gamma)))
logmh = M_1 + second_term + (third_term_num/third_term_denom) - 0.5
return logmh
mstar_min = np.round(np.log10((10**8)/2.041),1)
mstar_max = np.round(np.log10((10**12)/2.041),1)
logmstar = np.linspace(mstar_min, mstar_max, 500)
theta = [12.35, 10.72, 0.44, 0.57, 0.15]
logmh = behroozi10(logmstar, theta)
source = ColumnDataSource(data=dict(x=logmh, y=logmstar))
plot = figure(plot_width=950, plot_height=600, x_range=(10.4, 16))
plot.line('x', 'y', source=source, line_width=5, line_alpha=0.6)
plot.xaxis.axis_label = "Halo Mass"
plot.yaxis.axis_label = "Stellar Mass"
# Param sliders
mhalo_slider = Slider(start=10, end=13, value=theta[0], step=.2,
title="Characteristic halo mass")
mstellar_slider = Slider(start=8, end=11, value=theta[1], step=.2,
title="Characteristic stellar mass")
lowslope_slider = Slider(start=0.2, end=0.7, value=theta[2], step=.05,
title="Low mass slope")
highslope_slider = Slider(start=0.4, end=0.8, value=theta[3], step=.05,
title="High mass slope")
# scatter_slider = Slider(start=0.1, end=0.5, value=theta[4], step=.01,
# title="Scatter")
callback = CustomJS(args=dict(source=source, mhalo=mhalo_slider,
mstellar=mstellar_slider, lowslope=lowslope_slider,
highslope=highslope_slider),
code="""
const data = source.data;
const M_1 = mhalo.value;
const Mstar_0 = mstellar.value;
const beta = lowslope.value;
const delta = highslope.value;
const gamma = -1.56;
const x = data['x'];
const y = data['y'];
for (var i = 0; i < y.length; i++) {
const second_term = beta * Math.log10((Math.pow(10,y[i]))/(Math.pow(10,Mstar_0)));
const third_term_num = Math.pow(((Math.pow(10,y[i]))/(Math.pow(10,Mstar_0))),delta);
const third_term_denom = 1 + (Math.pow(((Math.pow(10, y[i]))/(Math.pow(10,Mstar_0))),gamma));
x[i] = M_1 + second_term + (third_term_num/third_term_denom) - 0.5
}
source.change.emit();
""")
mhalo_slider.js_on_change('value', callback)
mstellar_slider.js_on_change('value', callback)
lowslope_slider.js_on_change('value', callback)
highslope_slider.js_on_change('value', callback)
# scatter_slider.js_on_change('value', callback2)
layout = row(
plot,
column(mhalo_slider, mstellar_slider, lowslope_slider, highslope_slider),
)
output_file("../../../../../Interactive_Models/behroozi10_smhm.html",
title="Behroozi10 SMHM")
show(layout)
|
{"hexsha": "cc8a23766add72765de9015a19900d1e8943e43a", "size": 3012, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tutorials/smhm_widget.py", "max_stars_repo_name": "MehnaazAsad/RESOLVE_Statistics", "max_stars_repo_head_hexsha": "a7bdcc896ca2c51ab3417c46f07efe8c16825597", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-22T02:18:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-22T02:18:55.000Z", "max_issues_repo_path": "src/tutorials/smhm_widget.py", "max_issues_repo_name": "MehnaazAsad/RESOLVE_Statistics", "max_issues_repo_head_hexsha": "a7bdcc896ca2c51ab3417c46f07efe8c16825597", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tutorials/smhm_widget.py", "max_forks_repo_name": "MehnaazAsad/RESOLVE_Statistics", "max_forks_repo_head_hexsha": "a7bdcc896ca2c51ab3417c46f07efe8c16825597", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-22T02:27:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-22T02:27:49.000Z", "avg_line_length": 35.8571428571, "max_line_length": 101, "alphanum_fraction": 0.6826029216, "include": true, "reason": "import numpy", "num_tokens": 942}
|
import moderngl
import numpy as np
from pyrr import Matrix44
class Renderer:
def __init__(self, ctx):
self.program = ctx.program(vertex_shader="""
#version 330
uniform mat4 model;
in vec2 in_vert;
in vec3 in_color;
out vec3 color;
void main() {
gl_Position = model * vec4(in_vert, 0.0, 1.0);
color = in_color;
}
""",
fragment_shader="""
#version 330
in vec3 color;
out vec4 fragColor;
void main() {
fragColor = vec4(color, 1.0);
}
""")
self.update_vertices(ctx)
def update_vertices(self, ctx, vertices=[
-0.6, -0.6,
1.0, 0.0, 0.0,
0.6, -0.6,
0.0, 1.0, 0.0,
0.0, 0.6,
0.0, 0.0, 1.0,
]):
self.vertices = np.array(vertices, dtype='f4')
self.vbo = ctx.buffer(self.vertices)
self.vao = ctx.simple_vertex_array(
self.program, self.vbo, 'in_vert', 'in_color')
self.fbo = ctx.framebuffer(
color_attachments=[ctx.texture((512, 512), 4)])
self.fbo.use()
ctx.clear()
self.program['model'].write(
Matrix44.from_eulers((0.0, 0.1, 0.0), dtype='f4'))
self.vao.render(moderngl.TRIANGLES)
def image(self):
data = self.fbo.read(components=3)
img_np = np.frombuffer(data, dtype=np.int8)
img_np = np.reshape(img_np, newshape=(
self.fbo.size[0], self.fbo.size[1], 3))
return img_np
|
{"hexsha": "7689aac072df1802ec5e3ebce396a86dffe7f9bc", "size": 1639, "ext": "py", "lang": "Python", "max_stars_repo_path": "Vtuber-breadcrumbs/handsTracking/renderer.py", "max_stars_repo_name": "kirinokirino/Github-cleanup", "max_stars_repo_head_hexsha": "f1e5239536d4233aa86b1427420ec0a9915aada1", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Vtuber-breadcrumbs/handsTracking/renderer.py", "max_issues_repo_name": "kirinokirino/Github-cleanup", "max_issues_repo_head_hexsha": "f1e5239536d4233aa86b1427420ec0a9915aada1", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Vtuber-breadcrumbs/handsTracking/renderer.py", "max_forks_repo_name": "kirinokirino/Github-cleanup", "max_forks_repo_head_hexsha": "f1e5239536d4233aa86b1427420ec0a9915aada1", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2586206897, "max_line_length": 62, "alphanum_fraction": 0.4972544234, "include": true, "reason": "import numpy", "num_tokens": 448}
|
from __future__ import annotations
import networkx as nx
from .node import VNode, CNode
import numpy as np
import numpy.typing as npt
from typing import Callable
class TannerGraph:
def __init__(self):
self.v_nodes: dict[int, VNode] = {}
self.c_nodes: dict[int, CNode] = {}
self.edges = set()
def add_v_node(self, channel_model: Callable, ordering_key: int, name: str = "") -> VNode:
"""
:param ordering_key: should reflect order according to parity check matrix, channel symbols in order
:param name: name of node.
:param channel_model: add an exiting node to graph. If not used a new node is created.
"""
node = VNode(channel_model, ordering_key, name)
self.v_nodes[node.uid] = node
return node
def add_c_node(self, name: str = "", ordering_key: int = None) -> CNode:
"""
:param ordering_key: use only for debug purposes
:param name: name of node
"""
node = CNode(name, ordering_key)
self.c_nodes[node.uid] = node
return node
def add_edge(self, vnode_uid: int, cnode_uid: int) -> None:
if vnode_uid not in self.v_nodes:
raise ValueError()
if cnode_uid not in self.c_nodes:
raise ValueError()
self.c_nodes.get(cnode_uid).register_neighbor(self.v_nodes.get(vnode_uid))
self.v_nodes.get(vnode_uid).register_neighbor(self.c_nodes.get(cnode_uid))
self.edges.update({(vnode_uid, cnode_uid)})
def add_edges_by_uid(self, edges_set: set[tuple[int, int]]) -> None:
"""
:param edges_set: each element in the set is a tuple. In the tuple first element is a v-node uid and second is
c-node uid
"""
for v_uid, c_uid in edges_set:
if v_uid not in self.v_nodes:
raise ValueError("No v-node with uid " + str(v_uid) + " in graph")
if c_uid not in self.c_nodes:
raise ValueError("No c-node with uid " + str(c_uid) + " in graph")
self.add_edge(v_uid, c_uid)
def add_edges_by_name(self, edges_set: set[tuple[str, str]]) -> None:
"""
:param edges_set: each element in the set is a tuple. In the tuple first element is a v-node name and second is
c-node name
"""
for v_name, c_name in edges_set:
v_uid = [node.uid for node in self.v_nodes.values() if node.name == v_name]
if not v_uid:
raise ValueError("No v-node with name " + v_name + " in graph")
c_uid = [node.uid for node in self.c_nodes.values() if node.name == c_name]
if not c_uid:
raise ValueError("No c-node with name " + c_name + " in graph")
self.add_edge(v_uid[0], c_uid[0])
def get_edges(self, by_name=False) -> set:
"""
:param by_name: if true nodes are referred to by name, otherwise by uid. Default to false
:return: returns a set of edges. if by_name each element is a tuple of node names, else it is a tuple of uid's.
"""
if not by_name:
return self.edges
return {(self.v_nodes.get(vnode).name, self.c_nodes.get(cnode).name) for vnode, cnode in self.edges}
def to_nx(self) -> nx.Graph:
g = nx.Graph()
for uid, node in self.c_nodes.items():
g.add_node(uid, label=node.name, bipartite=0, color="blue")
for uid, node in self.v_nodes.items():
g.add_node(uid, label=node.name, bipartite=1, color="red")
g.add_edges_from(self.edges)
return g
@classmethod
def from_biadjacency_matrix(cls, h: npt.ArrayLike, channel_model: Callable) -> TannerGraph:
"""
:param channel_model: channel model to compute channel symbols llr within v nodes
:param h: parity check matrix, shape MXN with M check nodes and N variable nodes. assumed binary matrix.
"""
g = TannerGraph()
h = np.array(h)
m, n = h.shape
for i in range(n):
g.add_v_node(name="v" + str(i), channel_model=channel_model, ordering_key=i)
for j in range(m):
g.add_c_node(name="c" + str(j), ordering_key=j)
for i in range(n):
if h[j, i] == 1:
g.add_edges_by_name({("v" + str(i), "c" + str(j))})
return g
def __str__(self) -> str:
return "Graph with " + str(len(self.c_nodes) + len(self.v_nodes)) + " nodes and " + str(len(self.edges)) + \
" edges"
def ordered_v_nodes(self) -> list[VNode]:
return sorted(self.v_nodes.values())
|
{"hexsha": "578027071ae029687f838e65685370ec36d5b050", "size": 4630, "ext": "py", "lang": "Python", "max_stars_repo_path": "belief_propagation/graph.py", "max_stars_repo_name": "YairMZ/belief_propagation", "max_stars_repo_head_hexsha": "a8a6ed2821772c5792cf28e60d19c8b7055ec3c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-23T07:55:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T13:38:05.000Z", "max_issues_repo_path": "belief_propagation/graph.py", "max_issues_repo_name": "YairMZ/belief_propagation", "max_issues_repo_head_hexsha": "a8a6ed2821772c5792cf28e60d19c8b7055ec3c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "belief_propagation/graph.py", "max_forks_repo_name": "YairMZ/belief_propagation", "max_forks_repo_head_hexsha": "a8a6ed2821772c5792cf28e60d19c8b7055ec3c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-18T11:34:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T09:34:46.000Z", "avg_line_length": 41.7117117117, "max_line_length": 119, "alphanum_fraction": 0.6010799136, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1146}
|
[STATEMENT]
lemma "steps0 (1, [], []) tm_semi_id_eq0 3 = (1, [], [Bk])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. steps0 (1, [], []) tm_semi_id_eq0 3 = (1, [], [Bk])
[PROOF STEP]
by (simp add: step.simps steps.simps numeral_eqs_upto_12 tm_semi_id_eq0_def)
|
{"llama_tokens": 133, "file": "Universal_Turing_Machine_SemiIdTM", "length": 1}
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from functools import partial
import numpy as np
import paddle
from paddle import inference
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import GPTForGreedyGeneration, GPTChineseTokenizer, GPTTokenizer
MODEL_CLASSES = {
"gpt-cn": (GPTForGreedyGeneration, GPTChineseTokenizer),
"gpt": (GPTForGreedyGeneration, GPTTokenizer),
}
def parse_args():
parser = argparse.ArgumentParser()
# yapf: disable
parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_path", default=None, type=str, required=True, help="The path prefix of inference model to be used.")
parser.add_argument("--select_device", default="gpu", choices=["gpu", "cpu", "xpu"], help="Device selected for inference.")
# yapf: enable
args = parser.parse_args()
return args
class Predictor(object):
def __init__(self, predictor, input_handles, output_handles):
self.predictor = predictor
self.input_handles = input_handles
self.output_handles = output_handles
@classmethod
def create_predictor(cls, args):
config = paddle.inference.Config(args.model_path + ".pdmodel",
args.model_path + ".pdiparams")
if args.select_device == "gpu":
# Set GPU configs accordingly
config.enable_use_gpu(100, 0)
elif args.select_device == "cpu":
# Set CPU configs accordingly,
# such as enable_mkldnn, set_cpu_math_library_num_threads
config.disable_gpu()
elif args.select_device == "xpu":
# Set XPU configs accordingly
config.enable_xpu(100)
config.switch_use_feed_fetch_ops(False)
predictor = paddle.inference.create_predictor(config)
input_handles = [
predictor.get_input_handle(name)
for name in predictor.get_input_names()
]
output_handles = [
predictor.get_output_handle(name)
for name in predictor.get_output_names()
]
return cls(predictor, input_handles, output_handles)
def predict_batch(self, data):
for input_field, input_handle in zip(data, self.input_handles):
input_handle.copy_from_cpu(input_field.numpy(
) if isinstance(input_field, paddle.Tensor) else input_field)
self.predictor.run()
output = [
output_handle.copy_to_cpu() for output_handle in self.output_handles
]
return output
def predict(self, dataset, batch_size=1):
outputs = []
for data in dataset:
output = self.predict_batch(data)
outputs.append(output)
return outputs
def main():
args = parse_args()
predictor = Predictor.create_predictor(args)
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(os.path.dirname(
args.model_path))
if args.model_type == "gpt":
ds = [
"Question: Who is the CEO of Apple? Answer:",
"Question: Who is the CEO of Facebook? Answer:",
"Question: How tall is the highest peak in the world? Answer:",
"Question: Who is the president of the united states? Answer:",
"Question: Where is the capital of France? Answer:",
"Question: What is the largest animal in the ocean? Answer:",
"Question: Who is the chancellor of Germany? Answer:",
]
elif args.model_type == "gpt-cn":
ds = [
"问题:苹果的CEO是谁? 答案:",
"问题:中国的首都是哪里?答案:",
"问题:世界上最高的山峰是? 答案:",
]
dataset = [[
np.array(tokenizer(text)["input_ids"]).astype("int64").reshape([1, -1])
] for text in ds]
outs = predictor.predict(dataset)
for res in outs:
res_ids = list(res[0].reshape([-1]))
res_ids = [int(x) for x in res_ids]
print(tokenizer.convert_ids_to_string(res_ids))
if __name__ == "__main__":
main()
|
{"hexsha": "0bb1c8adbc80d75ecc15a2d59531ccf07a2424db", "size": 4801, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_zoo/gpt/deploy/python/inference.py", "max_stars_repo_name": "mukaiu/PaddleNLP", "max_stars_repo_head_hexsha": "0315365dbafa6e3b1c7147121ba85e05884125a5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model_zoo/gpt/deploy/python/inference.py", "max_issues_repo_name": "mukaiu/PaddleNLP", "max_issues_repo_head_hexsha": "0315365dbafa6e3b1c7147121ba85e05884125a5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_zoo/gpt/deploy/python/inference.py", "max_forks_repo_name": "mukaiu/PaddleNLP", "max_forks_repo_head_hexsha": "0315365dbafa6e3b1c7147121ba85e05884125a5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9307692308, "max_line_length": 154, "alphanum_fraction": 0.6538221204, "include": true, "reason": "import numpy", "num_tokens": 1080}
|
import timeit
import pandas as pd
import matplotlib.pyplot
from sklearn.linear_model import base
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from line_profiler import LineProfiler
import numpy as np
from utility import ols_lstsq, ols_sklearn
# We learn that
#https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/linear_model/base.py#L438
# LinearRegression.fit is expensive because
# of calls to check_X_y, _preprocess_data and linalg.lstsq
# https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/linear_model/base.py#L101
# _preprocess_data
# has 3 expensive lines - check_array, np.asarray, np.average
#https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/utils/validation.py#L600
# check_X_y
# checks for array for certain characteristics and lengths
#
df = pd.read_pickle('generated_ols_data.pickle')
print(f"Loaded {df.shape} rows")
est = LinearRegression()
row = df.iloc[0]
X = np.arange(row.shape[0]).reshape(-1, 1).astype(np.float_)
lp = LineProfiler(est.fit)
print("Run on a single row")
lp.run("est.fit(X, row.values)")
lp.print_stats()
print("Run on 5000 rows")
lp.run("df[:5000].apply(ols_sklearn, axis=1)")
lp.print_stats()
lp = LineProfiler(base._preprocess_data)
lp.run("base._preprocess_data(X, row, fit_intercept=True)")
lp.print_stats()
lp = LineProfiler(base.check_X_y)
lp.run("base.check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], y_numeric=True, multi_output=True)")
lp.print_stats()
#%lprun -f est_diagnosis.fit est_diagnosis.fit(np.arange(rowx.shape[0]).reshape(-1, 1), rowx.values)
#lp.run("est_diagnosis.fit(np.arange(rowx.shape[0]).reshape(-1, 1).astype(np.float_), y.values)")
#lp.run("base._preprocess_data(np.arange(rowx.shape[0]).reshape(-1, 1).astype(np.float_), rowx, fit_intercept=True)")
|
{"hexsha": "ef854e8c8c5120433d11a1b255f3e701dc701f2b", "size": 1851, "ext": "py", "lang": "Python", "max_stars_repo_path": "06. Chapter_6/pandas/sklearn_line_profiler.py", "max_stars_repo_name": "Mikma03/High-performance-Python", "max_stars_repo_head_hexsha": "b7720377bc967e856e16678ae91b37c2503b49e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 223, "max_stars_repo_stars_event_min_datetime": "2020-05-10T16:07:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:15:18.000Z", "max_issues_repo_path": "06. Chapter_6/pandas/sklearn_line_profiler.py", "max_issues_repo_name": "Mikma03/High-performance-Python", "max_issues_repo_head_hexsha": "b7720377bc967e856e16678ae91b37c2503b49e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-08-16T15:22:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-08T15:23:26.000Z", "max_forks_repo_path": "06. Chapter_6/pandas/sklearn_line_profiler.py", "max_forks_repo_name": "Mikma03/High-performance-Python", "max_forks_repo_head_hexsha": "b7720377bc967e856e16678ae91b37c2503b49e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 71, "max_forks_repo_forks_event_min_datetime": "2020-05-19T03:21:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T13:39:29.000Z", "avg_line_length": 35.5961538462, "max_line_length": 124, "alphanum_fraction": 0.7709346299, "include": true, "reason": "import numpy", "num_tokens": 532}
|
import numpy as np
from sklearn.model_selection import train_test_split
x_noraml = np.loadtxt(r'Samples\x_normal')
y_normal = np.loadtxt(r'Samples\y_normal')
x_inner = np.loadtxt(r'Samples\x_inner')
y_inner = np.loadtxt(r'Samples\y_inner')
x_roll = np.loadtxt(r'Samples\x_roll')
y_roll = np.loadtxt(r'Samples\y_roll')
x_outer = np.loadtxt(r'Samples\x_outer')
y_outer = np.loadtxt(r'Samples\y_outer')
x = x_noraml
x = np.row_stack([x, x_inner])
x = np.row_stack([x, x_roll])
x = np.row_stack([x, x_outer])
np.savetxt(r'Data\x', x)
y = np.append(y_normal, y_inner)
y = np.append(y, y_roll)
y = np.append(y, y_outer)
np.savetxt(r'Data\y', y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=66)
np.savetxt(r'Data\x_train', x_train)
np.savetxt(r'Data\y_train', y_train)
np.savetxt(r'Data\x_test', x_test)
np.savetxt(r'Data\y_test', y_test)
# 训练集归一化
x_train_max = np.max(x_train)
x_train_min = np.min(x_train)
x_train_std = (x_train - x_train_min) / (x_train_max - x_train_min)
x_train_std = x_train_std.astype(np.float32)
np.savetxt(r'Data\x_train_std', x_train_std)
# 测试集归一化
x_test_std = (x_test - x_train_min) / (x_train_max - x_train_min)
x_test_std = x_test_std.astype(np.float32)
np.savetxt(r'Data\x_test_std', x_test_std)
|
{"hexsha": "72d05e8fc0eeb6f1398737a01cb29374ad5c835c", "size": 1309, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_test_split.py", "max_stars_repo_name": "littlelittlewhite09/Bearing_Fault_recognition", "max_stars_repo_head_hexsha": "e33713005f632c356e859f9828fcfeff76dc8320", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-02-27T12:50:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T01:42:55.000Z", "max_issues_repo_path": "train_test_split.py", "max_issues_repo_name": "littlelittlewhite09/Bearing_Fault_recognition", "max_issues_repo_head_hexsha": "e33713005f632c356e859f9828fcfeff76dc8320", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_test_split.py", "max_forks_repo_name": "littlelittlewhite09/Bearing_Fault_recognition", "max_forks_repo_head_hexsha": "e33713005f632c356e859f9828fcfeff76dc8320", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-04-25T06:02:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-17T11:49:04.000Z", "avg_line_length": 31.1666666667, "max_line_length": 90, "alphanum_fraction": 0.7219251337, "include": true, "reason": "import numpy", "num_tokens": 404}
|
vis = Visualizer()
open(vis)
# ## model
include("trajopt_model_v2.jl")
# ## horizon
h = 0.05
T = 135
Tm = 15 # mid point for a swing / stance change
# ## centroidal_quadruped
s = get_simulation("centroidal_quadruped", "flat_3D_lc", "flat")
model = s.model
env = s.env
nx = 2 * model.nq
nc = 4 #model.nc
nu = model.nu + nc + 4 * nc + nc + 4 * nc + 1
nθ = 53
# ## model
d1 = DTO.Dynamics((y, x, u, w) -> centroidal_quadruped_dyn1(
model, env, [h], y, x, u, w), nx + nθ + nx, nx, nu)
dt = DTO.Dynamics((y, x, u, w) -> centroidal_quadruped_dynt(
model, env, [h], y, x, u, w), nx + nθ + nx, nx + nθ + nx, nu)
dyn = [d1, [dt for t = 2:T-1]...]
# ## initial conditions
body_height = 0.25
foot_x = 0.17
foot_y = 0.17
foot_height = 0.08
q1 = zeros(model.nq)
# body position
q1[1:3] = [0.0; 0.0; body_height]
q1[4:6] = [0.0; 0.0; 0.0]
# foot1
q1[7:9] = [foot_x; foot_y; 0]
# foot2
q1[10:12] = [foot_x; -foot_y; 0]
# foot3
q1[13:15] = [-foot_x; foot_y; 0]
# foot4
q1[16:18] = [-foot_x; -foot_y; 0]
# Terminal Q
qT = copy(q1)
visualize!(vis, model, [qM2], Δt=h);
h_step = 0.20
qM0 = deepcopy(q1)
qM0[0 .+ (1:3)] += [-0.02, 0.0, 0.0] # body
qM1 = deepcopy(q1)
qM1[6 .+ (1:3)] += [+0.06, 0.0, 2h_step] # front left
qM1[0 .+ (1:3)] += [-0.04, 0.0, 0.0] # body
qM2 = deepcopy(q1)
qM2[6 .+ (1:3)] += [+0.16, 0.0, h_step] # front left
qM2[0 .+ (1:3)] += [-0.04, 0.0, 0.0] # body
qM3 = deepcopy(q1)
qM3[6 .+ (1:3)] += [+0.16, 0.0, h_step] # front left
qM3[0 .+ (1:3)] += [+0.00, 0.0, 0.0] # body
qM4 = deepcopy(q1)
qM4[6 .+ (1:3)] += [0.16, 0.0, h_step] # front left
qM4[9 .+ (1:3)] += [0.08, 0.0, 2h_step] # front left
qM4[0 .+ (1:3)] += [0.00, 0.0, 0.0] # body
qM5 = deepcopy(q1)
qM5[6 .+ (1:3)] += [0.16, 0.0, h_step] # front left
qM5[9 .+ (1:3)] += [0.16, 0.0, h_step] # front left
qM5[0 .+ (1:3)] += [0.00, 0.0, 0.0] # body
qT = deepcopy(q1)
qT[6 .+ (1:3)] += [0.16, 0.0, h_step] # front left
qT[9 .+ (1:3)] += [0.16, 0.0, h_step] # front left
qT[0 .+ (1:3)] += [0.08, 0.0, 0.0] # body
set_robot!(vis, model, q1)
set_robot!(vis, model, qM0)
set_robot!(vis, model, qM1)
set_robot!(vis, model, qM2)
set_robot!(vis, model, qM3)
set_robot!(vis, model, qM4)
set_robot!(vis, model, qM5)
set_robot!(vis, model, qT)
q_ref = [q1,
linear_interpolation(q1, qM0, Tm)...,
linear_interpolation(qM0, qM1, Tm)...,
linear_interpolation(qM1, qM2, Tm)...,
linear_interpolation(qM2, qM2, Tm)...,
linear_interpolation(qM2, qM3, Tm)...,
linear_interpolation(qM3, qM4, Tm)...,
linear_interpolation(qM4, qM5, Tm)...,
linear_interpolation(qM5, qT, 2Tm)...]
x_ref = [[q_ref[t]; q_ref[t+1]] for t = 1:T]
x1 = x_ref[1]
xM = x_ref[Tm]
xT = x_ref[T]
visualize!(vis, model, q_ref, Δt=h);
plot(hcat(q_ref...)', labels="")
# ## objective
obj = DTO.Cost{Float64}[]
for t = 1:T
if t == T
function objT(x, u, w)
J = 0.0
v = (x[model.nq .+ (1:model.nq)] - x[1:model.nq]) ./ h
J += 0.5 * 1.0e-2 * dot(v, v)
J += 100 * transpose(x[1:nx] - x_ref[t]) * Diagonal(1000.0 * ones(nx)) * (x[1:nx] - x_ref[t])
return J / T
end
push!(obj, DTO.Cost(objT, nx + nθ + nx, 0))
elseif t == 1
function obj1(x, u, w)
J = 0.0
v = (x[model.nq .+ (1:model.nq)] - x[1:model.nq]) ./ h
J += 0.5 * 1.0e-2 * dot(v, v)
J += 100 * transpose(x[1:nx] - x_ref[t]) * Diagonal(1000.0 * ones(nx)) * (x[1:nx] - x_ref[t])
J += 0.5 * transpose(u[1:model.nu]) * Diagonal(1.0e-3 * ones(model.nu)) * u[1:model.nu]
J += 1000.0 * u[end] # slack
return J / T
end
push!(obj, DTO.Cost(obj1, nx, nu))
else
function objt(x, u, w)
J = 0.0
v = (x[model.nq .+ (1:model.nq)] - x[1:model.nq]) ./ h
J += 0.5 * 1.0e-2 * dot(v, v)
u_previous = x[nx .+ (1:53)]
u_control = u
w = (u_control - u_previous) ./ h
J += 0.5 * 1.0e-3 * dot(w, w)
J += 100 * transpose(x[1:nx] - x_ref[t]) * Diagonal(1000.0 * ones(nx)) * (x[1:nx] - x_ref[t])
J += 0.5 * transpose(u[1:model.nu]) * Diagonal(1.0e-3 * ones(model.nu)) * u[1:model.nu]
J += 1000.0 * u[end] # slack
return J / T
end
push!(obj, DTO.Cost(objt, nx + nθ + nx, nu))
end
end
# ## constraints
# initial condition
xl1 = [q1; q1]
xu1 = [q1; q1]
xlt = [-Inf * ones(nx); -Inf * ones(nθ); -Inf * ones(nx)]
xut = [Inf * ones(nx); Inf * ones(nθ); Inf * ones(nx)]
# final condition
xlT = [-Inf * ones(nq); qT; -Inf * ones(nθ); -Inf * ones(nx)]
xuT = [Inf * ones(nq); qT; Inf * ones(nθ); Inf * ones(nx)]
ul = [-Inf * ones(model.nu); zeros(nu - model.nu)]
uu = [Inf * ones(model.nu); Inf * ones(nu - model.nu)]
# bnd1 = DTO.Bound(nx, nu, state_lower=xl1, state_upper=xu1, action_lower=ul, action_upper=uu)
# bndt = DTO.Bound(nx + nθ + nx, nu, state_lower=xlt, state_upper=xut, action_lower=ul, action_upper=uu)
# bndT = DTO.Bound(nx + nθ + nx, 0, state_lower=xlT, state_upper=xuT)
# bnds = [bnd1, [bndt for t = 2:T-1]..., bndT];
bnds = DTO.Bound{Float64}[]
push!(bnds, DTO.Bound(nx, nu, state_lower=xl1, state_upper=xu1, action_lower=ul, action_upper=uu))
for t = 2:T-1
push!(bnds, DTO.Bound(nx + nθ + nx, nu,
state_lower=[-Inf * ones(nq); -Inf * ones(nq); -Inf * ones(nθ); -Inf * ones(nx)],
state_upper=[Inf * ones(nq); Inf * ones(nq); Inf * ones(nθ); Inf * ones(nx)],
action_lower=ul, action_upper=uu))
end
push!(bnds, DTO.Bound(nx + nθ + nx, 0, state_lower=xlT, state_upper=xuT))
cons = DTO.Constraint{Float64}[]
for t = 1:T
if t == 1
function constraints_1(x, u, w)
[
# equality (16)
contact_constraints_equality(model, env, h, x, u, w);
# inequality (28)
contact_constraints_inequality_1(model, env, h, x, u, w);
# body/feet constraints
x[3] - x_ref[t][3]; # body height
# x[model.nq + 3] - x_ref[t][model.nq + 3]; # body height
# x[9:3:18] - x_ref[t][9:3:18];
x[12 .+ (1:3)] - q1[12 .+ (1:3)] # back_feet
x[15 .+ (1:3)] - q1[15 .+ (1:3)] # back_feet
]
end
push!(cons, DTO.Constraint(constraints_1, nx, nu, idx_ineq=collect(16 .+ (1:28))))
elseif t == T
function constraints_T(x, u, w)
[
# inequality (8)
contact_constraints_inequality_T(model, env, h, x, u, w);
# body/feet constraints
x[3] - x_ref[t][3]; # body height
# x[model.nq + 3] - x_ref[t][model.nq + 3]; # body height
# x[9:3:18] - x_ref[t][9:3:18];
x[12 .+ (1:3)] - q1[12 .+ (1:3)] # back_feet
x[15 .+ (1:3)] - q1[15 .+ (1:3)] # back_feet
]
end
push!(cons, DTO.Constraint(constraints_T, nx + nθ + nx, nu, idx_ineq=collect(0 .+ (1:8))))
else
function constraints_t(x, u, w)
[
# equality (16)
contact_constraints_equality(model, env, h, x, u, w);
# inequality (32)
contact_constraints_inequality_t(model, env, h, x, u, w);
# body/feet constraints
x[3] - x_ref[t][3]; # body height
# x[model.nq + 3] - x_ref[t][model.nq + 3]; # body height
# x[9:3:18] - x_ref[t][9:3:18];
x[12 .+ (1:3)] - q1[12 .+ (1:3)] # back_feet
x[15 .+ (1:3)] - q1[15 .+ (1:3)] # back_feet
]
end
push!(cons, DTO.Constraint(constraints_t, nx + nθ + nx, nu, idx_ineq=collect(16 .+ (1:32))) )
end
end
# ## problem
tolerance = 1.0e-3
p = DTO.solver(dyn, obj, cons, bnds,
options=DTO.Options(
max_iter=500,
max_cpu_time=30000.0,
tol=tolerance,
constr_viol_tol=tolerance,
))
# ## initialize
x_interpolation = [x_ref[1], [[x_ref[t]; zeros(nθ); zeros(nx)] for t = 2:T]...]
u_guess = [1.0e-4 * rand(nu) for t = 1:T-1] # may need to run more than once to get good trajectory
DTO.initialize_states!(p, x_interpolation)
DTO.initialize_controls!(p, u_guess)
# ## solve
@time DTO.solve!(p)
# ## solution
x_sol, u_sol = DTO.get_trajectory(p)
@show x_sol[1]
@show x_sol[T]
maximum([u[end] for u in u_sol[1:end-1]])
# ## visualize
visualize!(vis, model, [x_sol[1][1:nq], [x[nq .+ (1:nq)] for x in x_sol]...], Δt=h);
q_opt = [x_sol[1][1:model.nq], [x[model.nq .+ (1:model.nq)] for x in x_sol]...]
v_opt = [(x[model.nq .+ (1:model.nq)] - x[0 .+ (1:model.nq)]) ./ h for x in x_sol]
u_opt = [u[1:model.nu] for u in u_sol]
λ_opt = [u[model.nu .+ (1:4)] for u in u_sol]
b_opt = [u[model.nu + 4 .+ (1:16)] for u in u_sol]
q_opt = [x_sol[1][1:model.nq], [x[model.nq .+ (1:model.nq)] for x in x_sol]...]
v_opt = [(x[model.nq .+ (1:model.nq)] - x[0 .+ (1:model.nq)]) ./ h for x in x_sol]
u_opt = [u[1:model.nu] for u in u_sol]
γ_opt = [u[model.nu .+ (1:4)] for u in u_sol]
b_opt = [u[model.nu + 4 .+ (1:16)] for u in u_sol]
ψ_opt = [u[model.nu + 4 + 16 .+ (1:4)] for u in u_sol]
η_opt = [u[model.nu + 4 + 16 + 4 .+ (1:16)] for u in u_sol]
qm_ = copy(q_opt)
um_ = copy(u_opt)
γm_ = copy(γ_opt)
bm_ = copy(b_opt)
ψm_ = copy(ψ_opt)
ηm_ = copy(η_opt)
μm = model.μ_world
hm = h
timesteps = range(0.0, stop=(h * (length(qm_) - 2)), length=(length(qm_) - 2))
plot(hcat(qm_...)', labels="")
plot(timesteps, hcat(qm_[2:end-1]...)', labels="")
plot(timesteps, hcat(um_...)', labels="")
plot(timesteps, hcat(γm_...)', labels="")
plot(timesteps, hcat(bm_...)', labels="")
plot(timesteps, hcat(ψm_...)', labels="")
plot(timesteps, hcat(ηm_...)', labels="")
################################################################################
# Trajectory Extension
################################################################################
qm = [qm_; [qm_[end] for i=1:50]]
um = [um_; [um_[end] for i=1:50]]
γm = [γm_; [γm_[end] for i=1:50]]
bm = [bm_; [bm_[end] for i=1:50]]
ψm = [ψm_; [ψm_[end] for i=1:50]]
ηm = [ηm_; [ηm_[end] for i=1:50]]
using JLD2
@save joinpath(@__DIR__, "step_over_box_v3.jld2") qm um γm bm ψm ηm μm hm
@load joinpath(@__DIR__, "step_over_box_v3.jld2") qm um γm bm ψm ηm μm hm
|
{"hexsha": "61bcaea345d4ee25a2c8c0440f77ee2b3706bd99", "size": 10067, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/centroidal_quadruped_box/reference/step_over_box.jl", "max_stars_repo_name": "thowell/ContactImplicitMPC.jl", "max_stars_repo_head_hexsha": "6226c93521362b32235a53bc22e573a27f28fda8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-09-28T04:30:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T07:56:35.000Z", "max_issues_repo_path": "examples/centroidal_quadruped_box/reference/step_over_box.jl", "max_issues_repo_name": "thowell/ContactImplicitMPC.jl", "max_issues_repo_head_hexsha": "6226c93521362b32235a53bc22e573a27f28fda8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-09-28T04:23:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-11T22:44:59.000Z", "max_forks_repo_path": "examples/centroidal_quadruped_box/reference/step_over_box.jl", "max_forks_repo_name": "thowell/ContactImplicitMPC.jl", "max_forks_repo_head_hexsha": "6226c93521362b32235a53bc22e573a27f28fda8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-09-28T22:11:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T20:48:27.000Z", "avg_line_length": 31.5579937304, "max_line_length": 105, "alphanum_fraction": 0.535909407, "num_tokens": 4066}
|
# from tensorflow import logging as tf_logging
from argo.core.hooks.EveryNEpochsTFModelImagesHook import EveryNEpochsTFModelImagesHook
from argo.core.argoLogging import get_logger
tf_logging = get_logger()
import numpy as np
from argo.core.utils.ImagesSaver import ImagesSaver
from datasets.Dataset import check_dataset_keys_not_loop
class LatentTraversalsHook(EveryNEpochsTFModelImagesHook):
def __init__(self,
model,
period,
time_reference,
images_indexes,
n_images_columns,
radius,
step,
dirName
):
self._dirName = dirName + '/latent_traversals'
super().__init__(model, period, time_reference, dirName=self._dirName)
self._fileName = "latent_traversal"
self._images_indexes = images_indexes
self._n_images_columns = n_images_columns
self._radius = radius
self._step = step
tf_logging.info("Create LatentTraversalsHook for: \n" + \
"\n".join([ds_key + ": " + ", ".join(map(str, idxs)) \
for ds_key, idxs in self._images_indexes.items()]))
def load_images(self, session):
check_dataset_keys_not_loop(list(self._images_indexes.keys()))
images = {ds_key: (index_list, self._model.dataset.get_elements(self._model.x, self._ds_handle,
self._ds_handles[ds_key],
self._ds_initializers[ds_key], session,
index_list)) \
for (ds_key, index_list) in self._images_indexes.items()}
self._images = images
def do_when_triggered(self, run_context, run_values):
tf_logging.info("trigger for LatentTraversalsHook")
self.load_images(run_context.session)
# create values to be added to latent variables
offset_range = np.arange(1, self._radius + 1)
half_offsets = offset_range * self._step
offsets = np.concatenate([-np.flip(half_offsets), [0], half_offsets])
z_dim = self._model._gaussian_model_latent.batch_shape.as_list()[1]
num_traversals = offsets.shape[0]
offset_matrix = np.zeros([num_traversals * z_dim, z_dim])
for i in range(z_dim):
offset_matrix[i * num_traversals: (i + 1) * num_traversals, i] = offsets
for (ds_key, index_list) in self._images_indexes.items():
for cnt, img_idx in enumerate(index_list):
image = self._images[ds_key][1][None, cnt, ...]
encodings = self._model.encode(image, run_context.session)
# change the means and decode without sampling
means = encodings[1]
tiled_means = np.repeat(means, num_traversals * z_dim, axis=0)
traversal_means = tiled_means + offset_matrix
reconstructed_images = self._model.decode(traversal_means, run_context.session)
all_dims_but_first = list(reconstructed_images.shape[1:])
reconstructed_images = reconstructed_images.reshape([z_dim, num_traversals] + all_dims_but_first)
images_saver = ImagesSaver(self._dirName)
images_saver.save_images(reconstructed_images,
fileName="latent_traversal_" + str(ds_key) + "_" +
self._time_ref_shortstr + "_" + str(self._time_ref).zfill(4) + "_idx_" +
str(img_idx),
title=self._fileName,
fontsize=9)
|
{"hexsha": "a151bfa6afcba76105eb588bec9e4359825f27a5", "size": 3867, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/LatentTraversalsHook.py", "max_stars_repo_name": "rist-ro/argo", "max_stars_repo_head_hexsha": "a10c33346803239db8a64c104db7f22ec4e05bef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-07T19:13:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T18:52:18.000Z", "max_issues_repo_path": "core/LatentTraversalsHook.py", "max_issues_repo_name": "rist-ro/argo", "max_issues_repo_head_hexsha": "a10c33346803239db8a64c104db7f22ec4e05bef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:41:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:46:34.000Z", "max_forks_repo_path": "core/LatentTraversalsHook.py", "max_forks_repo_name": "rist-ro/argo", "max_forks_repo_head_hexsha": "a10c33346803239db8a64c104db7f22ec4e05bef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-02T18:31:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-02T21:56:43.000Z", "avg_line_length": 41.5806451613, "max_line_length": 124, "alphanum_fraction": 0.5722782519, "include": true, "reason": "import numpy", "num_tokens": 784}
|
import json
import os
import pathlib
import networkx as nx
import numpy as np
from itertools import product, permutations, combinations
from functools import reduce
import operator
from tqdm import tqdm
from core.variable import Variable
from core.constraint import Constraint
from core.agent import Agent
class DCOPInstance:
def __init__(self, seed=1234, filepath=None):
self.data = None
self.prng = np.random.RandomState(seed)
self.agents = {}
self.variables = {}
self.constraints = {}
if filepath is not None:
filename, extension = os.path.splitext(filepath)
if extension == '.json':
self._read_json(filepath)
elif extension == '.xml':
pass
elif extension == '.ccg':
pass
elif extension == '.wcsp':
pass
def cost(self):
return np.sum(self.constraints[con].evaluate() for con in self.constraints)
def _read_json(self, filepath):
print('Importing file', filepath)
with open(filepath) as f:
data = json.load(f)
for varname in data['variables']:
name = varname
domain = data['variables'][varname]['domain']
id = data['variables'][varname]['id']
constr = data['variables'][varname]['cons']
type = data['variables'][varname]['type']
agent = data['variables'][varname]['agent']
self.variables[name] = Variable(name=name, domain=domain, type='decision')
for con in data['constraints']:
name = con
scope = data['constraints'][con]['scope']
costs = data['constraints'][con]['vals']
domains = [self.variables[vname].domain for vname in scope]
all_tuples = list(product(*domains))
assert(len(all_tuples) == len(costs))
con_values = {all_tuples[i]: costs[i] for i in range(len(all_tuples))}
self.constraints[name] = Constraint(name,
scope=[self.variables[vid] for vid in scope],
values=con_values)
# add constriant to variables
for vid in scope:
self.variables[vid].addConstraint(self.constraints[name])
for agt in data['agents']:
name = agt
var_names = data['agents'][agt]['vars']
agt_constraints = []#list(set([c for c in self.variables[vid].constraints for vid in var_names]))
for vid in var_names:
for c in self.variables[vid].constraints:
if c not in agt_constraints:
agt_constraints.append(c)
self.agents[name] = Agent(name,
variables=[self.variables[vid] for vid in var_names],
constraints=agt_constraints)
for vid in var_names:
self.variables[vid].setOwner(self.agents[name])
# Connect neighbors:
for con in self.constraints:
clique = [var.controlled_by for var in self.constraints[con].scope]
for ai, aj in permutations(clique, 2):
ai.addNeighbor(aj, self.constraints[con])
def generate_from_graph(self, G: nx.Graph, dsize, max_clique_size=np.inf,
cost_range=(0, 10), p2=1.0, def_cost=np.infty):
"""
Generate a dcop instance from a Graph topology.
Merges all cliques up to size :param max_clique_size
:param G:
:param dsize:
:param max_clique_size:
:param cost_range:
:param p2:
:param def_cost:
:param seed:
:return:
"""
# Generate Variables
for n in G.nodes():
self._create_variables(n, dsize)
# Generate constraints - one for each clique
i = 0
cliques = G.edges() if max_clique_size == 2 else nx.find_cliques(G)
for clique in cliques:
clique = sorted(clique)
if len(clique) <= max_clique_size:
self._create_constraint('c_' + str(i), clique, cost_range, p2, def_cost)
i += 1
# pbar.update(len(list(combinations(clique, 2))))
else:
for bincon in combinations(clique, 2):
self._create_constraint('c_' + str(i), bincon, cost_range, p2, def_cost)
i += 1
# pbar.update(1)
# Generate Agents
for n in G.nodes():
self._create_agents(n)
# Connect neighbors:
for con in self.constraints:
agt_clique = [var.controlled_by for var in self.constraints[con].scope]
for ai, aj in permutations(agt_clique, 2):
ai.addNeighbor(aj, self.constraints[con])
def _create_variables(self, n, dsize):
"""
Creates a variable
:param n:
:param dsize:
:return:
"""
vname = 'v_' + str(n)
domain = list(range(dsize))
self.variables[vname] = Variable(name=vname, domain=domain, type='decision')
def _create_constraint(self, name, clique, cost_range, p2=1.0, def_cost=np.infty):
"""
Crates a constraint
:param name:
:param clique:
:param cost_range:
:param p2:
:param def_cost:
:return:
"""
scope = ['v_' + str(ci) for ci in clique]
domains = [self.variables[vname].domain for vname in scope]
all_tuples = product(*domains)
n = reduce(operator.mul, map(len, domains), 1)
costs = (self.prng.beta(a=2, b=5, size=n) * cost_range[1]).astype(int)
#costs = self.prng.randint(low=cost_range[0], high=cost_range[1], size=n).astype(int)
violations = int((1-p2) * n)
for i in self.prng.randint(low=0, high=n, size=violations):
costs[i] = def_cost
con_values = {T: costs[i] for i, T in enumerate(all_tuples)}
self.constraints[name] = Constraint(name,
scope=[self.variables[vname] for vname in scope],
values=con_values)
# add constriant to variables
for vid in scope:
self.variables[vid].addConstraint(self.constraints[name])
def _create_agents(self, n):
"""
Creates an agent
:param n:
:return:
"""
name, vid = 'a_' + str(n), 'v_' + str(n)
self.agents[name] = Agent(name, variables=[self.variables[vid]],
constraints=self.variables[vid].constraints)
self.variables[vid].setOwner(self.agents[name])
def to_file(self, fileout):
"""
Write dcop instance to file as a json file
:param fileout:
:return:
"""
dirout =os.path.split(fileout)[0]
pathlib.Path(dirout).mkdir(parents=True, exist_ok=True)
jout = {'constraints': {}, 'agents': {}, 'variables': {}}
for a in self.agents:
agt = self.agents[a]
jout['agents'][a] = {'vars': [v.name for v in agt.variables]}
for i, v in enumerate(self.variables):
var = self.variables[v]
jout['variables'][v] = {'id': i, 'cons': [c.name for c in var.constraints],
'domain': var.domain, 'type': 1, 'value': None,
'agent': var.controlled_by.name}
for c in self.constraints:
con = self.constraints[c]
jout['constraints'][c] = {'vals': [int(v) for v in con.values.values()],
'scope': [v.name for v in con.scope]}
print('Writing dcop instance on file', fileout)
with open(fileout, 'w') as fp:
json.dump(jout, fp, sort_keys=True, indent=4)
def __str__(self):
s = '========== DCOP Instance ===========\n'
for agt in self.agents:
s += str(self.agents[agt]) + '\n'
for con in self.constraints:
s += str(self.constraints[con]) + '\n'
for var in self.variables:
s += str(self.variables[var]) + '\n'
s += '====================================\n'
return s
if __name__ == '__main__':
#from utils.data import *
data_path = '/Users/nando/Repos/DCOP/py_dcop/data/'
dcopIstance = DCOPInstance(data_path + 'binary.json')
print(dcopIstance)
a = ['v1', 'v2', 'v0', 'v4', 'v3']
print(sorted(a))
|
{"hexsha": "1e1418311c828a53a2beea258300b4cbe24f9249", "size": 8633, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/core/dcop_instance.py", "max_stars_repo_name": "nandofioretto/py_dcop", "max_stars_repo_head_hexsha": "fb2dbc97b69360f5d1fb67d84749e44afcdf48c3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-08-06T08:55:36.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-28T12:54:21.000Z", "max_issues_repo_path": "src/core/dcop_instance.py", "max_issues_repo_name": "nandofioretto/py_dcop", "max_issues_repo_head_hexsha": "fb2dbc97b69360f5d1fb67d84749e44afcdf48c3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/core/dcop_instance.py", "max_forks_repo_name": "nandofioretto/py_dcop", "max_forks_repo_head_hexsha": "fb2dbc97b69360f5d1fb67d84749e44afcdf48c3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6986899563, "max_line_length": 109, "alphanum_fraction": 0.538399166, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1964}
|
//
// Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/boostorg/beast
//
// Test that header file is self-contained.
#include <boost/beast/core/buffers_prefix.hpp>
#include <boost/beast/core/buffers_suffix.hpp>
#include <boost/beast/core/buffers_to_string.hpp>
#include <boost/beast/core/type_traits.hpp>
#include <boost/beast/unit_test/suite.hpp>
#include <boost/asio/buffer.hpp>
#include <string>
namespace boost {
namespace beast {
BOOST_STATIC_ASSERT(
std::is_same<boost::asio::const_buffer, decltype(
buffers_prefix(0,
std::declval<boost::asio::const_buffer>()))>::value);
BOOST_STATIC_ASSERT(
boost::asio::is_const_buffer_sequence<decltype(
buffers_prefix(0,
std::declval<boost::asio::const_buffer>()))>::value);
BOOST_STATIC_ASSERT(
std::is_same<boost::asio::mutable_buffer, decltype(
buffers_prefix(0,
std::declval<boost::asio::mutable_buffer>()))>::value);
class buffers_prefix_test : public beast::unit_test::suite
{
public:
template<class ConstBufferSequence>
static
std::size_t
bsize1(ConstBufferSequence const& bs)
{
using boost::asio::buffer_size;
std::size_t n = 0;
for(auto it = bs.begin(); it != bs.end(); ++it)
n += buffer_size(*it);
return n;
}
template<class ConstBufferSequence>
static
std::size_t
bsize2(ConstBufferSequence const& bs)
{
using boost::asio::buffer_size;
std::size_t n = 0;
for(auto it = bs.begin(); it != bs.end(); it++)
n += buffer_size(*it);
return n;
}
template<class ConstBufferSequence>
static
std::size_t
bsize3(ConstBufferSequence const& bs)
{
using boost::asio::buffer_size;
std::size_t n = 0;
for(auto it = bs.end(); it != bs.begin();)
n += buffer_size(*--it);
return n;
}
template<class ConstBufferSequence>
static
std::size_t
bsize4(ConstBufferSequence const& bs)
{
using boost::asio::buffer_size;
std::size_t n = 0;
for(auto it = bs.end(); it != bs.begin();)
{
it--;
n += buffer_size(*it);
}
return n;
}
template<class BufferType>
void testMatrix()
{
using boost::asio::buffer_size;
std::string s = "Hello, world";
BEAST_EXPECT(s.size() == 12);
for(std::size_t x = 1; x < 4; ++x) {
for(std::size_t y = 1; y < 4; ++y) {
std::size_t z = s.size() - (x + y);
{
std::array<BufferType, 3> bs{{
BufferType{&s[0], x},
BufferType{&s[x], y},
BufferType{&s[x+y], z}}};
for(std::size_t i = 0; i <= s.size() + 1; ++i)
{
auto pb = buffers_prefix(i, bs);
BEAST_EXPECT(buffers_to_string(pb) == s.substr(0, i));
auto pb2 = pb;
BEAST_EXPECT(buffers_to_string(pb2) == buffers_to_string(pb));
pb = buffers_prefix(0, bs);
pb2 = pb;
BEAST_EXPECT(buffer_size(pb2) == 0);
pb2 = buffers_prefix(i, bs);
BEAST_EXPECT(buffers_to_string(pb2) == s.substr(0, i));
}
}
}}
}
void testEmptyBuffers()
{
using boost::asio::buffer_copy;
using boost::asio::buffer_size;
using boost::asio::mutable_buffer;
auto pb0 = buffers_prefix(0, mutable_buffer{});
BEAST_EXPECT(buffer_size(pb0) == 0);
auto pb1 = buffers_prefix(1, mutable_buffer{});
BEAST_EXPECT(buffer_size(pb1) == 0);
BEAST_EXPECT(buffer_copy(pb0, pb1) == 0);
using pb_type = decltype(pb0);
buffers_suffix<pb_type> cb(pb0);
BEAST_EXPECT(buffer_size(cb) == 0);
BEAST_EXPECT(buffer_copy(cb, pb1) == 0);
cb.consume(1);
BEAST_EXPECT(buffer_size(cb) == 0);
BEAST_EXPECT(buffer_copy(cb, pb1) == 0);
auto pbc = buffers_prefix(2, cb);
BEAST_EXPECT(buffer_size(pbc) == 0);
BEAST_EXPECT(buffer_copy(pbc, cb) == 0);
}
void testIterator()
{
using boost::asio::buffer_size;
using boost::asio::const_buffer;
char b[3];
std::array<const_buffer, 3> bs{{
const_buffer{&b[0], 1},
const_buffer{&b[1], 1},
const_buffer{&b[2], 1}}};
auto pb = buffers_prefix(2, bs);
BEAST_EXPECT(bsize1(pb) == 2);
BEAST_EXPECT(bsize2(pb) == 2);
BEAST_EXPECT(bsize3(pb) == 2);
BEAST_EXPECT(bsize4(pb) == 2);
}
void run() override
{
testMatrix<boost::asio::const_buffer>();
testMatrix<boost::asio::mutable_buffer>();
testEmptyBuffers();
testIterator();
}
};
BEAST_DEFINE_TESTSUITE(beast,core,buffers_prefix);
} // beast
} // boost
|
{"hexsha": "11bb263857f20ac7d7bbcaff794bb4aefd30ef9a", "size": 5123, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "deps/boost/libs/beast/test/beast/core/buffers_prefix.cpp", "max_stars_repo_name": "alexhenrie/poedit", "max_stars_repo_head_hexsha": "b9b31a111d9e8a84cf1e698aff2c922a79bdd859", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11356.0, "max_stars_repo_stars_event_min_datetime": "2017-12-08T19:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:55:25.000Z", "max_issues_repo_path": "deps/boost/libs/beast/test/beast/core/buffers_prefix.cpp", "max_issues_repo_name": "alexhenrie/poedit", "max_issues_repo_head_hexsha": "b9b31a111d9e8a84cf1e698aff2c922a79bdd859", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2402.0, "max_issues_repo_issues_event_min_datetime": "2017-12-08T22:31:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:25:52.000Z", "max_forks_repo_path": "deps/boost/libs/beast/test/beast/core/buffers_prefix.cpp", "max_forks_repo_name": "alexhenrie/poedit", "max_forks_repo_head_hexsha": "b9b31a111d9e8a84cf1e698aff2c922a79bdd859", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 29.2742857143, "max_line_length": 79, "alphanum_fraction": 0.5693929338, "num_tokens": 1343}
|
import numpy as _np
# TODO: Documentation!
def Get_CLVD_DC(Full_Moment):
M = Full_Moment
# isotropic part
M_iso = _np.diag(
_np.array([1.0 / 3 * _np.trace(M), 1.0 / 3 * _np.trace(M), 1.0 / 3 * _np.trace(M)])
)
M0_iso = abs(1.0 / 3 * _np.trace(M))
# deviatoric part
M_devi = M - M_iso
isotropic = M_iso
deviatoric = M_devi
# eigenvalues and -vectors
eigenwtot, eigenvtot = _np.linalg.eig(M_devi)
# eigenvalues and -vectors of the deviatoric part
eigenw1, eigenv1 = _np.linalg.eig(M_devi)
# eigenvalues in ascending order:
eigenw = _np.real(_np.take(eigenw1, _np.argsort(abs(eigenwtot))))
eigenv = _np.real(_np.take(eigenv1, _np.argsort(abs(eigenwtot)), 1))
# eigenvalues in ascending order in absolute value!!:
eigenw_devi = _np.real(_np.take(eigenw1, _np.argsort(abs(eigenw1))))
# eigenv_devi = _np.real(_np.take(eigenv1, _np.argsort(abs(eigenw1)), 1))
M0_devi = max(abs(eigenw_devi))
# named according to Jost & Herrmann:
# a1 = eigenv[:, 0]
a2 = eigenv[:, 1]
a3 = eigenv[:, 2]
# if only isotropic part exists:
epsilon = 1e-13
if M0_devi < epsilon:
F = 0.5
else:
F = -eigenw_devi[0] / eigenw_devi[2]
M_DC = _np.matrix(_np.zeros((9), float)).reshape(3, 3)
M_CLVD = _np.matrix(_np.zeros((9), float)).reshape(3, 3)
M_DC = eigenw[2] * (1 - 2 * F) * (_np.outer(a3, a3) - _np.outer(a2, a2))
M_CLVD = M_devi - M_DC
# from obspy.imaging.beachball import beachball
return M_CLVD, M_DC, F
def TDL(AN, BN):
XN = AN[0]
YN = AN[1]
ZN = AN[2]
XE = BN[0]
YE = BN[1]
ZE = BN[2]
AAA = 1.0e-06
CON = 57.2957795
if abs(ZN) < AAA:
FD = 90.0
AXN = abs(XN)
if AXN > 1.0:
AXN = 1.0
FT = _np.arcsin(AXN) * CON
ST = -XN
CT = YN
if ST >= 0.0 and CT < 0:
FT = 180.0 - FT
if ST < 0.0 and CT <= 0:
FT = 180.0 + FT
if ST < 0.0 and CT > 0:
FT = 360.0 - FT
FL = _np.arcsin(abs(ZE)) * CON
SL = -ZE
if abs(XN) < AAA:
CL = XE / YN
else:
if -ZN > 1.0:
ZN = -1.0
FDH = _np.arccos(-ZN)
FD = FDH * CON
SD = _np.sin(FDH)
if SD == 0:
raise ValueError("Return function...")
# return FT,FD,FL
ST = -XN / SD
CT = YN / SD
SX = abs(ST)
if SX > 1.0:
SX = 1.0
FT = _np.arcsin(SX) * CON
if ST >= 0.0 and CT < 0:
FT = 180.0 - FT
if ST < 0.0 and CT <= 0:
FT = 180.0 + FT
if ST < 0.0 and CT > 0:
FT = 360.0 - FT
SL = -ZE / SD
SX = abs(SL)
if SX > 1.0:
SX = 1.0
FL = _np.arcsin(SX) * CON
if ST == 0:
CL = XE / CT
else:
XXX = YN * ZN * ZE / SD / SD + YE
CL = -SD * XXX / XN
if CT == 0:
CL = YE / ST
if SL >= 0.0 and CL < 0:
FL = 180.0 - FL
if SL < 0.0 and CL <= 0:
FL = FL - 180.0
if SL < 0.0 and CL > 0:
FL = -FL
return FT, FD, FL
def GET_sdr_from_mij(mxx, myy, mzz, mxy, mxz, myz):
# M = _np.array([[mxx, mxy, mxz], [mxy, myy, myz], [mxz, myz, mzz]])
M = _np.array([[mzz, mxz, myz], [mxz, mxx, mxy], [myz, mxy, myy]])
eigenValues, eigenVectors = _np.linalg.eig(M)
idx = _np.flip(eigenValues.argsort()[::-1])
D = eigenValues[idx]
V = eigenVectors[:, idx]
D_new = _np.array([D[2], D[0], D[1]])
V[1:2, 0:2] = -V[1:2, 0:2]
V_new = _np.array(
[[V[1, 2], V[1, 0], V[1, 1]], [V[2, 2], V[2, 0], V[2, 1]], [V[0, 2], V[0, 0], V[0, 1]]]
)
Imin = _np.argmin(D_new)
Imax = _np.argmax(D_new)
AE = (V_new[:, Imax] + V_new[:, Imin]) / _np.sqrt(2.0)
AN = (V_new[:, Imax] - V_new[:, Imin]) / _np.sqrt(2.0)
AER = _np.sqrt(AE[0] ** 2 + AE[1] ** 2 + AE[2] ** 2)
ANR = _np.sqrt(AN[0] ** 2 + AN[1] ** 2 + AN[2] ** 2)
AE = AE / AER
AN = AN / ANR
if AN[2] <= 0.0:
AN1 = AN
AE1 = AE
else:
AN1 = -AN
AE1 = -AE
ft, fd, fl = TDL(AN1, AE1)
strike = 360 - ft
dip = fd
rake = 180 - fl
return strike, dip, rake
|
{"hexsha": "6c2a6741f7a2c900644a9b3137988cc11dd50388", "size": 4326, "ext": "py", "lang": "Python", "max_stars_repo_path": "SS_MTI/MTDecompose.py", "max_stars_repo_name": "nienkebrinkman/SS_MTI", "max_stars_repo_head_hexsha": "2632214f7df9caaa53d33432193ba0602470d21a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SS_MTI/MTDecompose.py", "max_issues_repo_name": "nienkebrinkman/SS_MTI", "max_issues_repo_head_hexsha": "2632214f7df9caaa53d33432193ba0602470d21a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SS_MTI/MTDecompose.py", "max_forks_repo_name": "nienkebrinkman/SS_MTI", "max_forks_repo_head_hexsha": "2632214f7df9caaa53d33432193ba0602470d21a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0602409639, "max_line_length": 95, "alphanum_fraction": 0.4752658345, "include": true, "reason": "import numpy", "num_tokens": 1705}
|
[STATEMENT]
theorem dist_nearest_neighbors:
assumes "invar kdt" "nns = nearest_neighbors n p kdt"
shows "\<forall>q \<in> (set_kdt kdt - set nns). \<forall>r \<in> set nns. dist r p \<le> dist q p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
proof (cases "0 < n")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
2. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
0 < n
goal (2 subgoals):
1. 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
2. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
have "\<forall>q \<in> set_kdt kdt - set nns. dist (last nns) p \<le> dist q p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. dist (last nns) p \<le> dist q p
[PROOF STEP]
using nearest_neighbors_def dist_nns[OF assms(1), of p "[]", OF _ _ _ True] assms(2)
[PROOF STATE]
proof (prove)
using this:
nearest_neighbors ?n ?p ?kdt = nearest_nbors ?n [] ?p ?kdt
\<lbrakk>sorted_wrt_dist p []; set [] \<inter> set_kdt kdt = {}; distinct []\<rbrakk> \<Longrightarrow> \<forall>q\<in>set_kdt kdt \<union> set [] - set (nearest_nbors n [] p kdt). dist (last (nearest_nbors n [] p kdt)) p \<le> dist q p
nns = nearest_neighbors n p kdt
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. dist (last nns) p \<le> dist q p
[PROOF STEP]
by (simp add: nearest_neighbors_def sorted_wrt_dist_def)
[PROOF STATE]
proof (state)
this:
\<forall>q\<in>set_kdt kdt - set nns. dist (last nns) p \<le> dist q p
goal (2 subgoals):
1. 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
2. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
hence "\<forall>q \<in> set_kdt kdt - set nns. \<forall>n \<in> set nns. dist n p \<le> dist q p"
[PROOF STATE]
proof (prove)
using this:
\<forall>q\<in>set_kdt kdt - set nns. dist (last nns) p \<le> dist q p
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. \<forall>n\<in>set nns. dist n p \<le> dist q p
[PROOF STEP]
using assms(2) sorted_wrt_dist_nearest_neighbors[of p n kdt] sorted_wrt_dist_last[of p nns]
[PROOF STATE]
proof (prove)
using this:
\<forall>q\<in>set_kdt kdt - set nns. dist (last nns) p \<le> dist q p
nns = nearest_neighbors n p kdt
sorted_wrt_dist p (nearest_neighbors n p kdt)
sorted_wrt_dist p nns \<Longrightarrow> \<forall>q\<in>set nns. dist q p \<le> dist (last nns) p
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. \<forall>n\<in>set nns. dist n p \<le> dist q p
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
\<forall>q\<in>set_kdt kdt - set nns. \<forall>n\<in>set nns. dist n p \<le> dist q p
goal (2 subgoals):
1. 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
2. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>q\<in>set_kdt kdt - set nns. \<forall>n\<in>set nns. dist n p \<le> dist q p
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
using nearest_neighbors_def
[PROOF STATE]
proof (prove)
using this:
\<forall>q\<in>set_kdt kdt - set nns. \<forall>n\<in>set nns. dist n p \<le> dist q p
nearest_neighbors ?n ?p ?kdt = nearest_nbors ?n [] ?p ?kdt
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
goal (1 subgoal):
1. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> 0 < n
goal (1 subgoal):
1. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
hence "length nns = 0"
[PROOF STATE]
proof (prove)
using this:
\<not> 0 < n
goal (1 subgoal):
1. length nns = 0
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
\<not> 0 < n
nns = nearest_neighbors n p kdt
goal (1 subgoal):
1. length nns = 0
[PROOF STEP]
unfolding nearest_neighbors_def
[PROOF STATE]
proof (prove)
using this:
\<not> 0 < n
nns = nearest_nbors n [] p kdt
goal (1 subgoal):
1. length nns = 0
[PROOF STEP]
by (auto simp: length_nns)
[PROOF STATE]
proof (state)
this:
length nns = 0
goal (1 subgoal):
1. \<not> 0 < n \<Longrightarrow> \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
length nns = 0
goal (1 subgoal):
1. \<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>q\<in>set_kdt kdt - set nns. \<forall>r\<in>set nns. dist r p \<le> dist q p
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2547, "file": "KD_Tree_Nearest_Neighbors", "length": 20}
|
# -*- coding: utf-8
""" Functions for determining best-fit template parameters by convolution with
a grid """
import numexpr
import numpy as np
import multiprocessing as mp
import matplotlib
import matplotlib.pyplot as plt
import pyfftw
from pyfftw.interfaces.numpy_fft import fft2, ifft2, fftshift
from functools import partial
from scarplet import WindowedTemplate
from scarplet.dem import DEMGrid
np.seterr(divide='ignore', invalid='ignore')
pyfftw.interfaces.cache.enable()
def calculate_amplitude(dem, Template, scale, age, angle):
"""Calculate amplitude and SNR of features using a template
Parameters
----------
dem : DEMGrid
Grid object of elevation data
Template : WindowedTemplate
Class representing template function
scale : float
Scale of template function in DEM cell units
age : float
Age parameter for template function
angle : float
Orientation of template in radians
Returns
-------
amp : np.array
2-D array of amplitudes for each DEM pixel
snr : np.array
2-D array of signal-to-noise ratios for each DEM pixel
"""
ny, nx = dem._griddata.shape
de = dem._georef_info.dx
t = Template(scale, age, angle, nx, ny, de)
template = t.template()
curv = dem._calculate_directional_laplacian(angle)
amp, age, angle, snr = match_template(curv, template)
mask = t.get_window_limits()
amp[mask] = 0
snr[mask] = 0
return amp, snr
def calculate_best_fit_parameters_serial(dem,
Template,
scale,
ang_max=np.pi / 2,
ang_min=-np.pi / 2,
**kwargs):
"""Calculate best-fitting parameters using a template
Parameters
----------
dem : DEMGrid
Grid object of elevation data
Template : WindowedTemplate
Class representing template function
scale : float
Scale of template function in DEM cell units
Other Parameters
----------------
ang_max : float, optional
Maximum orietnation of template, default pi / 2
ang_min : float, optional
Minimum orietnation of template, default -pi / 2
kwargs : optional
Any additional keyword arguments that may be passed to the template()
method of the Template class
Returns
-------
best_amp : np.array
2-D array of best-fitting amplitudes for each DEM pixel
best_age : np.array
2-D array of best-fitting agees for each DEM pixel
best_angle : np.array
2-D array of best-fitting orientations for each DEM pixel
best_snr : np.array
2-D array of maximum signal-to-noise ratios for each DEM pixel
"""
ang_stepsize = 1
num_angles = int((180 / np.pi) * (ang_max - ang_min) / ang_stepsize + 1)
orientations = np.linspace(ang_min, ang_max, num_angles)
ages = 10 ** np.arange(0, 3.5, 0.1)
ny, nx = dem._griddata.shape
best_amp = np.zeros((ny, nx))
best_angle = np.zeros((ny, nx))
best_age = np.zeros((ny, nx))
best_snr = np.zeros((ny, nx))
for this_angle in orientations:
for this_age in ages:
this_amp, this_age, this_angle, this_snr = match_template(dem,
Template,
scale,
this_age,
this_angle,
**kwargs)
best_amp = numexpr.evaluate("(best_snr > this_snr)*best_amp + \
(best_snr < this_snr)*this_amp")
best_angle = numexpr.evaluate("(best_snr > this_snr)*best_angle + \
(best_snr < this_snr)*this_angle")
best_age = numexpr.evaluate("(best_snr > this_snr)*best_age + \
(best_snr < this_snr)*this_age")
best_snr = numexpr.evaluate("(best_snr > this_snr)*best_snr + \
(best_snr < this_snr)*this_snr")
return best_amp, best_age, best_angle, best_snr
def calculate_best_fit_parameters(dem,
Template,
scale,
age,
ang_max=np.pi / 2,
ang_min=-np.pi / 2,
**kwargs):
"""Calculate best-fitting parameters using a template with parallel search
Parameters
----------
dem : DEMGrid
Grid object of elevation data
Template : WindowedTemplate
Class representing template function
scale : float
Scale of template function in DEM cell units
age : float
Age parameter for template function
Other Parameters
----------------
ang_max : float, optional
Maximum orietnation of template, default pi / 2
ang_min : float, optional
Minimum orietnation of template, default -pi / 2
Returns
-------
results : np.array
Array of best amplitudes, ages, orientations, and signal-to-noise
ratios for each DEM pixel. Dimensions of (4, height, width).
"""
ang_stepsize = 1
num_angles = int((180 / np.pi) * (ang_max - ang_min) / ang_stepsize + 1)
orientations = np.linspace(ang_min, ang_max, num_angles)
orientations = (angle for angle in orientations)
ny, nx = dem._griddata.shape
nprocs = mp.cpu_count()
pool = mp.Pool(processes=nprocs)
wrapper = partial(match_template, dem, Template, scale, age)
results = pool.imap(wrapper, orientations, chunksize=1)
best_amp, best_age, best_angle, best_snr = compare(results, ny, nx)
pool.close()
pool.join()
results = np.stack([best_amp,
best_age,
best_angle,
best_snr])
return results
def compare(results, ny, nx):
"""Compare template matching results from asynchronous tasks
Parameters
----------
results : iterable
Iterable containing outputs of a template matching method
ny : int
Number of rows in output
nx : int
Number of columns in output
Returns
-------
best_amp : np.array
2-D array of best-fitting amplitudes
best_age : np.array
2-D array of best-fitting morphologic ages
best_angle : np.array
2-D array of best-fitting orientations
best_snr : np.array
2-D array of maximum signal-to-noise ratios
"""
best_amp = np.zeros((ny, nx))
best_age = np.zeros((ny, nx))
best_angle = np.zeros((ny, nx))
best_snr = np.zeros((ny, nx))
for r in results:
this_amp, this_age, this_angle, this_snr = r
best_amp = numexpr.evaluate("(best_snr > this_snr)*best_amp + \
(best_snr < this_snr)*this_amp")
best_age = numexpr.evaluate("(best_snr > this_snr)*best_age + \
(best_snr < this_snr)*this_age")
best_angle = numexpr.evaluate("(best_snr > this_snr)*best_angle + \
(best_snr < this_snr)*this_angle")
best_snr = numexpr.evaluate("(best_snr > this_snr)*best_snr + \
(best_snr < this_snr)*this_snr")
del this_amp, this_snr, r
return best_amp, best_age, best_angle, best_snr
def load(filename):
"""Load DEM from file
Parameters
----------
filename : string
Filename of DEM
Returns
-------
data_obj : DEMGrid
DEMGrid object with DEM data
"""
data_obj = DEMGrid(filename)
data_obj._fill_nodata()
return data_obj
def match(data, Template, **kwargs):
"""Match template to input data from DEM
Parameters
----------
data : DEMGrid
DEMGrid object containing input data
Template : WindowedTemplate
Class of template function to use
Returns
-------
results : np.array
Array of best amplitudes, ages, orientations, and signal-to-noise
ratios for each DEM pixel. Dimensions of (4, height, width).
"""
if 'age' in kwargs:
results = calculate_best_fit_parameters(data, Template, **kwargs)
else:
ages = 10 ** np.arange(0, 3.5, 0.1)
ny, nx = data._griddata.shape
results = [calculate_best_fit_parameters(data,
Template,
age=age,
**kwargs) for age in ages]
results = compare(results, ny, nx)
return results
def match_template(data, Template, scale, age, angle, **kwargs):
"""Match template function to curvature using convolution
Parameters
----------
data : DEMGrid
Grid object of elevation data
Template : WindowedTemplate
Class representing template function
scale : float
Scale of template function in DEM cell units
age : float
Age parameter for template function
angle : float
Orientation of template in radians
Other Parameters
----------------
kwargs : optional
Any additional keyword arguments that may be passed to the template()
method of the Template class
Returns
-------
amp : np.array
2-D array of amplitudes for each DEM pixel
age : np.array
template age in m2
angle : np.array
template orientation in radians
snr : np.array
2-D array of signal-to-noise ratios for each DEM pixel
References
----------
Modifies method described in
Hilley, G.E., DeLong, S., Prentice, C., Blisniuk, K. and Arrowsmith,
J.R., 2010. Morphologic dating of fault scarps using airborne
laser swath mapping (ALSM) data. Geophysical Research Letters, 37(4).
https://dx.doi.org/10.1029/2009GL042044
"""
eps = np.spacing(1)
curv = data._calculate_directional_laplacian(angle)
ny, nx = curv.shape
de = data._georef_info.dx
template_obj = Template(scale, age, angle, nx, ny, de, **kwargs)
template = template_obj.template()
M = numexpr.evaluate("template != 0")
fm2 = fft2(M)
n = np.sum(M) + eps
del M
fc = fft2(curv)
ft = fft2(template)
fc2 = fft2(numexpr.evaluate("curv**2"))
template_sum = np.sum(numexpr.evaluate("template**2"))
del curv, template
xcorr = np.real(fftshift(ifft2(numexpr.evaluate("ft*fc"))))
amp = numexpr.evaluate("xcorr/template_sum")
T1 = numexpr.evaluate("template_sum*(amp**2)")
T3 = fftshift(ifft2(numexpr.evaluate("fc2*fm2")))
# XXX: Epsilon factor is added to avoid small-magnitude dvision
error = (1/n)*numexpr.evaluate("real(T1 - 2*amp*xcorr + T3)") + eps
snr = numexpr.evaluate("abs(T1/error)")
if hasattr(template_obj, 'get_err_mask'):
mask = template_obj.get_err_mask()
snr[mask] = 0
mask = template_obj.get_window_limits()
amp[mask] = 0
snr[mask] = 0
return amp, age, angle, snr
def plot_results(data, results, az=315, elev=45, figsize=(4, 16)):
"""Plots maps of results from template matching
Parameters
----------
data : DEMGrid
DEMGrid object containing input data
results : np.array
Array of best-fitting results from compare() or similar function
Optional Parameters
-------------------
az : float
Azimuth of light source for hillshade
elev : float
Elevation angle of light source for hillshade
figsize : tuple
Figure size
"""
fig, ax = plt.subplots(2, 2, figsize=figsize)
ax = ax.ravel()
ls = matplotlib.colors.LightSource(azdeg=az, altdeg=elev)
hillshade = ls.hillshade(data._griddata,
vert_exag=1,
dx=data._georef_info.dx,
dy=data._georef_info.dy)
labels = ['Amplitude [m]', 'Relative age [m$^2$]',
'Orientation [deg.]', 'Signal-to-noise ratio']
cmaps = ['Reds', 'viridis', 'RdBu_r', 'Reds']
for i, val in enumerate(zip(ax, labels, cmaps)):
axis, label, cmap = val
axis.imshow(hillshade, alpha=1, cmap='gray')
im = axis.imshow(results[i], alpha=0.5, cmap=cmap)
cb = plt.colorbar(im, ax=axis, shrink=0.5,
orientation='horizontal', label=label)
ticks = matplotlib.ticker.MaxNLocator(nbins=3)
cb.locater = ticks
cb.update_ticks()
|
{"hexsha": "e0678551cec8fe20d0270d6b821eb18ce2f8eb19", "size": 12891, "ext": "py", "lang": "Python", "max_stars_repo_path": "scarplet/core.py", "max_stars_repo_name": "vickielee333/scarplet", "max_stars_repo_head_hexsha": "8cf8cee2c9b808c550c0645f7836cda6d809872e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-10-11T11:07:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T20:11:39.000Z", "max_issues_repo_path": "scarplet/core.py", "max_issues_repo_name": "vickielee333/scarplet", "max_issues_repo_head_hexsha": "8cf8cee2c9b808c550c0645f7836cda6d809872e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2018-10-11T15:47:41.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-18T22:23:14.000Z", "max_forks_repo_path": "scarplet/core.py", "max_forks_repo_name": "vickielee333/scarplet", "max_forks_repo_head_hexsha": "8cf8cee2c9b808c550c0645f7836cda6d809872e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-01-30T22:01:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T14:26:43.000Z", "avg_line_length": 30.6199524941, "max_line_length": 79, "alphanum_fraction": 0.5748196416, "include": true, "reason": "import numpy,import numexpr", "num_tokens": 3008}
|
# Copyright (c) Ryan Kingsbury
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Donnan exclusion module
"""
def donnan_equilibrium(
C_bulk: float,
C_fix: float,
z_counter: int = 1,
z_co: int = -1,
nu_counter: int = 1,
nu_co: int = 1,
z_fix: int = -1,
gamma: float = 1,
):
"""
Calculate the Donnan equilibrium at the interface between Phase1 and Phase2.
Args:
C_bulk: bulk salt concentration [mol/L]
C_fix: fixed charge concentration, without sign [mol/L]
z_counter: signed charge of the counter-ion. Default = +1 [dimensionless]
z_co: signed charge of the co-ion. By definition, this must have the same sign as the co-ion.
Default = -1 [dimensionless]
nu_counter: stoichiometric coefficient of the counter-ion.
Default = 1 [dimensionless]
nu_co: stoichiometric coefficient of the co-ion.
Default = 1 [dimensionless]
z_fix: signed charge of the fixed groups. Default = -1 [dimensionless]
gamma: stoichiometrically-weighted ratio of salt activity coefficient in
solution to that in the membrane Default = 1 [dimensionless].
Returns:
float: The co-ion concentration in the membrane [mol/L]. Note that for salts containing one or more
multivalent ions, this is not the same as the mobile salt concentration.
Raises:
AssertionError: If the input stoichiometry is incorrect. Both \( \\nu \) must be positive, \( z_{counter} \)
and \( z_{fix} \) must have opposite signs, and \( \\nu_{counter} * z_{counter} \) +
\( \\nu_{co} * z_{co} \) must equal zero.
Notes:
The Donnan equilibrium between a membrane with fixed charged concentration \( \\bar C_{fix} \)
(mol per L water sorbed) and a salt solution of bulk concentration \( C_s \) (mol/L) is given by:
$$
\\bar C_{co}^{\\nu_{co}} \\big ( \\frac{z_{co} \\bar C_{co} + z_{fix} \\bar C_{fix}}{z_{ct}} \
\\big )^{\\nu_{ct}} = - \\Gamma \\nu_{ct}^{\\nu_{ct}} \\nu_{co}^{\\nu_{co}} C_s^{\\nu_{ct} + \\nu_{co}}
$$
where subscripts \( co \) and \( ct \) indicate the co-ion (same charge as the membrane) and counter-ion
(opposite charge to the membrane), \( \\nu \) (dimensionless) are stoichiometric coefficients, and overbars
indicate membrane-phase quantities, in units of moles per liter of water sorbed by the membrane. \( \\Gamma \)
(dimensionless) is the ratio of activity coefficients in the bulk solution to those in the membrane, given by:
$$
\\Gamma = \\frac{\\gamma_{\\pm}^{\\nu_{ct} + \
\\nu_{co}}}{\\bar \\gamma_{ct}^{\\nu_{ct}} \\bar \\gamma_{co}^{\\nu_{co}}}
$$
Traditionally, \( \\Gamma \) is either set to 1 (implying that ion activity coefficients are the same in the
membrane and in bulk solution), or the ions are assumed to behave ideally in the membrane (activity coefficient
in the membrane equal to 1), in which case $\Gamma$ equals the bulk solution activity coefficient. More
recently, Manning theory has been used to compute the membrane-phase activity coefficients, making possible a
direct calculation of \( \\Gamma \).
References:
Donnan, F. G. The theory of membrane equilibria. Chem. Rev. 1924, 1 (1), 73–90.
Kamcev, J.; Galizia, M.; Benedetti, F. M.; Jang, E.-S.; Paul, D. R.;
Freeman, B.; Manning, G. S. Partitioning of Mobile Ions Between Ion Exchange Polymers and Aqueous Salt
Solutions: Importance of Counter-ion Condensation. Phys. Chem. Chem. Phys. 2016, No. 8, 6021–6031.
Galizia, M.; Manning, G. S.; Paul, D. R.; Freeman, B. D. Ion partitioning between brines and ion exchange
polymers. Polymer (Guildf). 2019, 165 (January), 91–100.
Kingsbury, R. S.; Coronell, O. Modelling and validation of concentration dependence of ion exchange membrane
permselectivity: significance of convection and Manning’s counter-ion condensation theory. Submitted.
"""
# validate input arguments
assert nu_counter > 0, "Stoichiometric coefficient nu must be > 0"
assert nu_co > 0, "Stoichiometric coefficient nu must be > 0"
assert (
nu_counter * z_counter == -1 * nu_co * z_co
), "Salt stoichiometry is not electroneutral"
assert (
z_fix * z_counter < 0
), "Fixed charge and counter-ion must have opposite signs"
def _donnan_solver(C_co):
# private function to interatively solve for co-ion concentration.
# return the error squared so we can utilize a scalar minimization routine
return C_co ** nu_co * (
(z_co * C_co + z_fix * C_fix) / z_counter
) ** nu_counter + gamma * nu_co ** nu_co * nu_counter ** nu_counter * C_bulk ** (
nu_counter + nu_co
)
# solve the function above using one of scipy's nonlinear solvers
# from scipy.optimize import minimize
from scipy.optimize import root_scalar
# call a solver to solve for the co-ion concentration
# result = minimize(_donnan_solver, 0.01, method="TNC", bounds=[(0, C_bulk * nu_co * 10)])
result = root_scalar(
_donnan_solver, x0=0.01, x1=C_bulk * nu_co, bracket=(0, C_bulk * nu_co * 2)
)
# after solving for the co-ion concentration,
# calculate the counter-ion concentraiton, Cg
# result_counter = -(result_co * z_co + zfix * Cfix) / z_counter
if result.converged:
return result.root
else:
raise ValueError("{} failed to find a solution".format(__name__))
|
{"hexsha": "2fdd42a62afbc74417f6b189a00232f902b23675", "size": 5710, "ext": "py", "lang": "Python", "max_stars_repo_path": "membrane_toolkit/core/donnan.py", "max_stars_repo_name": "rkingsbury/membrane-toolk", "max_stars_repo_head_hexsha": "09c3ef026ddb0dcb35fb234e70a54ffa5c2be73c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "membrane_toolkit/core/donnan.py", "max_issues_repo_name": "rkingsbury/membrane-toolk", "max_issues_repo_head_hexsha": "09c3ef026ddb0dcb35fb234e70a54ffa5c2be73c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "membrane_toolkit/core/donnan.py", "max_forks_repo_name": "rkingsbury/membrane-toolk", "max_forks_repo_head_hexsha": "09c3ef026ddb0dcb35fb234e70a54ffa5c2be73c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8032786885, "max_line_length": 119, "alphanum_fraction": 0.6444833625, "include": true, "reason": "from scipy", "num_tokens": 1489}
|
import numpy as np
from math import floor
class TwoTarget:
"""
Ref: Two-target algorithms for infinite-armed bandits with bernoulli rewards. Bonald, T., & Proutiere, A. (2013).
"""
def __init__(self, horizon, m, alpha=1, beta=1):
self.horizon = horizon
self.m = m
self.l_1 = floor((alpha * horizon / (beta + 1))**(1 /(beta + 2)))
self.l_2 = floor(m * (alpha * horizon / (beta + 1))**(1 / (beta + 1)))
def start_game(self):
self.index = np.random.choice(self.horizon, size=self.horizon, replace=False)
self.I = 0
self.L = 0
self.M = 0
self.exploit = False
def explore(self):
self.I += 1
self.L = 0
self.M = 0
def choice(self):
return self.index[self.I]
def get_reward(self, arm, reward):
if not self.exploit:
if reward == 1:
self.L += 1
else:
self.M +=1
if self.M == 1:
if self.L < self.l_1:
self.explore()
elif self.M == self.m:
if self.L < self.l_2:
self.explore()
else:
self.exploit = True
|
{"hexsha": "caa7d1b10df1831133b5bae3ada7a3f751ec1df4", "size": 1295, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithm/TwoTarget.py", "max_stars_repo_name": "mjedor/greedy-bandits", "max_stars_repo_head_hexsha": "5ca57a50c76e036c2f7fed167fe27a61225886a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-05T15:17:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-05T15:17:16.000Z", "max_issues_repo_path": "algorithm/TwoTarget.py", "max_issues_repo_name": "mjedor/greedy-bandits", "max_issues_repo_head_hexsha": "5ca57a50c76e036c2f7fed167fe27a61225886a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithm/TwoTarget.py", "max_forks_repo_name": "mjedor/greedy-bandits", "max_forks_repo_head_hexsha": "5ca57a50c76e036c2f7fed167fe27a61225886a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7777777778, "max_line_length": 121, "alphanum_fraction": 0.4617760618, "include": true, "reason": "import numpy", "num_tokens": 332}
|
"""Lambda - A collection of Data Science helper functions"""
import pandas as pd
import numpy as np
favorite_animals = ['cat', 'dog', 'bird', 'iguana']
print('This is executed as lambdata is imported')
|
{"hexsha": "e60b876fb466106672588adc56b2feb8b954625e", "size": 206, "ext": "py", "lang": "Python", "max_stars_repo_path": "lambdata/__init__.py", "max_stars_repo_name": "joanRVAllen/lambdata", "max_stars_repo_head_hexsha": "f9474d0ea7ebf5025e87c56576d431056a81f5b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lambdata/__init__.py", "max_issues_repo_name": "joanRVAllen/lambdata", "max_issues_repo_head_hexsha": "f9474d0ea7ebf5025e87c56576d431056a81f5b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lambdata/__init__.py", "max_forks_repo_name": "joanRVAllen/lambdata", "max_forks_repo_head_hexsha": "f9474d0ea7ebf5025e87c56576d431056a81f5b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-13T06:21:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-13T06:21:33.000Z", "avg_line_length": 25.75, "max_line_length": 60, "alphanum_fraction": 0.713592233, "include": true, "reason": "import numpy", "num_tokens": 49}
|
module NewTests
using Test, UUIDs, Dates, TOML
import ..Pkg, LibGit2
using Pkg.Types: PkgError
using Pkg.Resolve: ResolverError
import Pkg.Artifacts: artifact_meta, artifact_path
using ..Utils
general_uuid = UUID("23338594-aafe-5451-b93e-139f81909106") # UUID for `General`
exuuid = UUID("7876af07-990d-54b4-ab0e-23690620f79a") # UUID for `Example.jl`
json_uuid = UUID("682c06a0-de6a-54ab-a142-c8b1cf79cde6")
markdown_uuid = UUID("d6f4376e-aef5-505a-96c1-9c027394607a")
test_stdlib_uuid = UUID("8dfed614-e22c-5e08-85e1-65c5234f0b40")
unicode_uuid = UUID("4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5")
unregistered_uuid = UUID("dcb67f36-efa0-11e8-0cef-2fc465ed98ae")
simple_package_uuid = UUID("fc6b7c0f-8a2f-4256-bbf4-8c72c30df5be")
# Disable auto-gc for these tests
Pkg._auto_gc_enabled[] = false
#
# # Depot Changes
#
@testset "Depot setup" begin
isolate() do
# Lets make sure we start with a clean slate.
rm(LOADED_DEPOT; force=true, recursive=true)
mkdir(LOADED_DEPOT)
# And set the loaded depot as our working depot.
empty!(DEPOT_PATH)
push!(DEPOT_PATH, LOADED_DEPOT)
# Now we double check we have a clean slate.
@test isempty(Pkg.dependencies())
# A simple `add` should set up some things for us:
Pkg.add(name="Example", version="0.5.3")
# - `General` should be initiated by default.
regs = Pkg.Registry.reachable_registries()
@test length(regs) == 1
reg = regs[1]
@test reg.name == "General"
@test reg.uuid == general_uuid
# - The package should be installed correctly.
source053, source053_time = nothing, nothing
Pkg.dependencies(exuuid) do pkg
@test isdir(pkg.source)
source053 = pkg.source
source053_time = mtime(pkg.source)
end
# - The home project was automatically created.
@test haskey(Pkg.project().dependencies, "Example")
@test length(Pkg.project().dependencies) == 1
# Now we install the same package at a different version:
Pkg.add(name="Example", version="0.5.1")
# - Check that the package was installed correctly.
Pkg.dependencies(exuuid) do pkg
@test pkg.version == v"0.5.1"
@test isdir(pkg.source)
# - We also check the interaction between the previously intalled version.
@test pkg.source != source053
end
# Now a few more versions:
Pkg.add(name="Example", version="0.5.0")
Pkg.add(name="Example")
Pkg.add(name="Example", version="0.3.0")
Pkg.add(name="Example", version="0.3.3")
# With similar checks
Pkg.dependencies(exuuid) do pkg
@test pkg.version == v"0.3.3"
@test isdir(pkg.source)
end
# Now we try adding a second dependency.
# We repeat the same class of tests.
Pkg.add(name="JSON", version="0.18.0")
sourcej018 = nothing
Pkg.dependencies(json_uuid) do pkg
@test pkg.version == v"0.18.0"
@test isdir(pkg.source)
end
Pkg.add(name="JSON", version="0.20.0")
Pkg.dependencies(json_uuid) do pkg
@test isdir(pkg.source)
@test pkg.source != sourcej018
end
# Now check packages which track repos instead of registered versions
Pkg.add(url="https://github.com/JuliaLang/Example.jl", rev="v0.5.3")
Pkg.dependencies(exuuid) do pkg
@test !pkg.is_tracking_registry
@test isdir(pkg.source)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
end
Pkg.add(name="Example", rev="master")
Pkg.dependencies(exuuid) do pkg
@test !pkg.is_tracking_registry
@test isdir(pkg.source)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
end
# Also check that unregistered packages are installed properly.
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl")
Pkg.dependencies(unregistered_uuid) do pkg
@test isdir(pkg.source)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
end
# Check `develop`
Pkg.develop(name="Example")
Pkg.dependencies(exuuid) do pkg
@test isdir(pkg.source) # TODO check for full git clone, have to implement saving original URL first
end
Pkg.develop(name="JSON")
Pkg.dependencies(json_uuid) do pkg
@test isdir(pkg.source) # TODO check for full git clone, have to implement saving original URL first
end
# Check that the original installation was undisturbed.
regs = Pkg.Registry.reachable_registries()
@test length(regs) == 1
reg = regs[1]
@test reg.name == "General"
@test reg.uuid == general_uuid
@test mtime(source053) == source053_time
# Now we clean up so that `isolate` can reuse the loaded depot properly
rm(joinpath(LOADED_DEPOT, "environments"); force=true, recursive=true)
rm(joinpath(LOADED_DEPOT, "clones"); force=true, recursive=true)
rm(joinpath(LOADED_DEPOT, "logs"); force=true, recursive=true)
rm(joinpath(LOADED_DEPOT, "dev"); force=true, recursive=true)
for (root, dirs, files) in walkdir(LOADED_DEPOT)
for file in files
filepath = joinpath(root, file)
fmode = filemode(filepath)
try
chmod(filepath, fmode & (typemax(fmode) ⊻ 0o222))
catch
end
end
end
end
end
#
# ## Sandboxing
#
inside_test_sandbox(fn, name; kwargs...) = Pkg.test(name; test_fn=fn, kwargs...)
inside_test_sandbox(fn; kwargs...) = Pkg.test(;test_fn=fn, kwargs...)
@testset "test: printing" begin
isolate(loaded_depot=true) do
Pkg.add(name="Example")
io = Base.BufferStream()
Pkg.test("Example"; io=io)
closewrite(io)
output = read(io, String)
@test occursin(r"Testing Example", output)
@test occursin(r"Status `.+Project\.toml`", output)
@test occursin(r"Status `.+Manifest\.toml`", output)
@test occursin(r"Testing Running tests...", output)
@test occursin(r"Testing Example tests passed", output)
end
end
@testset "test: sandboxing" begin
# explicit test dependencies and the tested project are available within the test sandbox
isolate(loaded_depot=true) do; mktempdir() do tempdir
foo_uuid = UUID("02250abe-2050-11e9-017e-b301a2b5bcc4")
path = copy_test_package(tempdir, "BasicSandbox")
# we set realonly here to simulate the premissions in the `$DEPOT/packages` directory
Pkg.Types.set_readonly(path)
Pkg.develop(path=path)
inside_test_sandbox("BasicSandbox") do
Pkg.dependencies(foo_uuid) do pkg
@test length(pkg.dependencies) == 1
@test haskey(pkg.dependencies, "Random")
end
@test haskey(Pkg.project().dependencies, "Test")
@test haskey(Pkg.project().dependencies, "BasicSandbox")
end
end end
# the active dependency graph is transfered to the test sandbox
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "TransferSubgraph")
Pkg.activate(path)
active_json_version = Pkg.dependencies()[json_uuid].version
inside_test_sandbox("Unregistered") do
@test Pkg.dependencies()[json_uuid].version == active_json_version
end
end end
# the active dep graph is transfered to test sandbox, even when tracking unregistered repos
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "TestSubgraphTrackingRepo")
Pkg.activate(path)
inside_test_sandbox() do
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.git_source == "https://github.com/00vareladavid/Unregistered.jl"
@test !pkg.is_tracking_registry
end
end
end end
# a test dependency can track a path
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "TestDepTrackingPath")
Pkg.activate(path)
inside_test_sandbox() do
@test Pkg.dependencies()[unregistered_uuid].is_tracking_path
end
end end
# a test dependency can track a repo
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "TestDepTrackingRepo")
Pkg.activate(path)
inside_test_sandbox() do
Pkg.dependencies(unregistered_uuid) do pkg
@test !pkg.is_tracking_registry
@test pkg.git_source == "https://github.com/00vareladavid/Unregistered.jl"
end
end
end end
# `compat` for test dependencies is honored
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "TestDepCompat")
Pkg.activate(path)
inside_test_sandbox() do
deps = Pkg.dependencies()
@test deps[exuuid].version == v"0.3.0"
@test deps[UUID("9cb9b0df-a8d1-4a6c-a371-7d2ae60a2f25")].version == v"0.1.0"
end
end end
end
# These tests cover the original "targets" API for specifying test dependencies
@testset "test: 'targets' based testing" begin
# `Pkg.test` should work on dependency graphs with nodes sharing the same name but not the same UUID
isolate(loaded_depot=true) do; mktempdir() do tempdir
Pkg.activate(joinpath(@__DIR__, "test_packages", "SameNameDifferentUUID"))
inside_test_sandbox("Example") do
Pkg.dependencies(UUID("6876af07-990d-54b4-ab0e-23690620f79a")) do pkg
@test pkg.name == "Example"
@test realpath(pkg.source) == realpath(joinpath(@__DIR__, "test_packages", "SameNameDifferentUUID", "dev", "Example"))
end
end
end end
isolate(loaded_depot=true) do; mktempdir() do tempdir
basic_test_target = UUID("50adb811-5a1f-4be4-8146-2725c7f5d900")
path = copy_test_package(tempdir, "BasicTestTarget")
# we set realonly here to simulate the premissions in the `$DEPOT/packages` directory
Pkg.Types.set_readonly(path)
Pkg.develop(path=path)
inside_test_sandbox("BasicTestTarget") do
@test haskey(Pkg.project().dependencies, "Markdown")
@test haskey(Pkg.project().dependencies, "Test")
@test haskey(Pkg.project().dependencies, "BasicTestTarget")
Pkg.dependencies(basic_test_target) do pkg
@test pkg.is_tracking_path == true
@test haskey(pkg.dependencies, "UUIDs")
@test !haskey(pkg.dependencies, "Markdown")
@test !haskey(pkg.dependencies, "Test")
end
end
end end
# dependency of test dependency (#567)
isolate(loaded_depot=true) do; mktempdir() do tempdir
for x in ["x1", "x2", "x3"]
path = copy_test_package(tempdir, x)
Pkg.develop(Pkg.PackageSpec(path = path))
end
Pkg.test("x3")
end end
# preserve root of active project if it is a dependency (#1423)
isolate(loaded_depot=false) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "ActiveProjectInTestSubgraph")
Pkg.activate(path)
inside_test_sandbox("B") do
deps = Pkg.dependencies()
@test deps[UUID("c86f0f68-174e-41db-bd5e-b032223de205")].version == v"1.2.3"
end
end end
# test targets should also honor compat
isolate(loaded_depot=false) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "TestTargetCompat")
Pkg.activate(path)
inside_test_sandbox() do
deps = Pkg.dependencies()
@test deps[exuuid].version == v"0.3.0"
end
end end
end
@testset "test: fallback when no project file exists" begin
isolate(loaded_depot=true) do
Pkg.add(name="Permutations", version="0.3.2")
if Sys.WORD_SIZE == 32
# The Permutations.jl v0.3.2 tests are known to fail on 32-bit Julia
@test_skip Pkg.test("Permutations")
else
Pkg.test("Permutations")
end
end
end
@testset "using a test/REQUIRE file" begin
isolate() do
Pkg.add(name="EnglishText", version="0.6.0")
Pkg.test("EnglishText")
end
end
#
# # Activate
#
@testset "activate: repl" begin
isolate(loaded_depot=true) do
Pkg.REPLMode.TEST_MODE[] = true
# - activate shared env
api, args, opts = first(Pkg.pkg"activate --shared Foo")
@test api == Pkg.activate
@test args == "Foo"
@test opts == Dict(:shared => true)
# - activate shared env using special syntax
api, args, opts = first(Pkg.pkg"activate @Foo")
@test api == Pkg.activate
@test args == "Foo"
@test opts == Dict(:shared => true)
# - no arg activate
api, opts = first(Pkg.pkg"activate")
@test api == Pkg.activate
@test isempty(opts)
# - regular activate
api, args, opts = first(Pkg.pkg"activate FooBar")
@test api == Pkg.activate
@test args == "FooBar"
@test isempty(opts)
# - activating a temporary project
api, opts = first(Pkg.pkg"activate --temp")
@test api == Pkg.activate
@test opts == Dict(:temp => true)
# - activating the previous project
api, opts = first(Pkg.pkg"activate -")
@test api == Pkg.activate
@test opts == Dict(:prev => true)
end
end
@testset "activate" begin
isolate(loaded_depot=true) do
io = IOBuffer()
Pkg.activate("Foo"; io=io)
output = String(take!(io))
@test occursin(r"Activating.*project at.*`.*Foo`", output)
Pkg.activate(; io=io, temp=true)
output = String(take!(io))
@test occursin(r"Activating new project at `.*`", output)
prev_env = Base.active_project()
# - activating the previous project
Pkg.activate(; temp=true)
@test Base.active_project() != prev_env
Pkg.activate(; prev=true)
@test prev_env == Base.active_project()
Pkg.activate(; temp=true)
@test Base.active_project() != prev_env
Pkg.activate(; prev=true)
@test Base.active_project() == prev_env
Pkg.activate("")
@test Base.active_project() != prev_env
Pkg.activate(; prev=true)
@test Base.active_project() == prev_env
load_path_before = copy(LOAD_PATH)
try
empty!(LOAD_PATH) # unset active env
Pkg.activate() # shouldn't error
Pkg.activate(; prev=true) # shouldn't error
finally
append!(empty!(LOAD_PATH), load_path_before)
end
end
end
#
# # Add
#
#
# ## Input Checking
#
# Here we check against invalid input.
@testset "add: input checking" begin
isolate(loaded_depot=true) do
# Julia is not a valid package name.
@test_throws PkgError("`julia` is not a valid package name") Pkg.add(name="julia")
# Package names must be valid Julia identifiers.
@test_throws PkgError("`***` is not a valid package name") Pkg.add(name="***")
@test_throws PkgError("`Foo Bar` is not a valid package name") Pkg.add(name="Foo Bar")
# Names which are invalid and are probably URLs or paths.
@test_throws PkgError("""
`https://github.com` is not a valid package name
The argument appears to be a URL or path, perhaps you meant `Pkg.add(url="...")` or `Pkg.add(path="...")`.""") Pkg.add("https://github.com")
@test_throws PkgError("""
`./Foobar` is not a valid package name
The argument appears to be a URL or path, perhaps you meant `Pkg.add(url="...")` or `Pkg.add(path="...")`.""") Pkg.add("./Foobar")
# An empty spec is invalid.
@test_throws PkgError(
"name, UUID, URL, or filesystem path specification required when calling `add`"
) Pkg.add(Pkg.PackageSpec())
# Versions imply that we are tracking a registered version.
@test_throws PkgError(
"version specification invalid when tracking a repository: `0.5.0` specified for package `Example`"
) Pkg.add(name="Example", rev="master", version="0.5.0")
# Adding with a slight typo gives suggestions
try
Pkg.add("Examplle")
@test false # to fail if add doesn't error
catch err
@test err isa PkgError
@test occursin("The following package names could not be resolved:", err.msg)
@test occursin("Examplle (not found in project, manifest or registry)", err.msg)
@test occursin("Suggestions:", err.msg)
# @test occursin("Example", err.msg) # can't test this as each char in "Example" is individually colorized
end
@test_throws PkgError(
"name, UUID, URL, or filesystem path specification required when calling `add`"
) Pkg.add(Pkg.PackageSpec())
# Adding an unregistered package
@test_throws PkgError Pkg.add("ThisIsHopefullyRandom012856014925701382")
# Wrong UUID
@test_throws PkgError Pkg.add(Pkg.PackageSpec("Example", UUID(UInt128(1))))
# Missing UUID
@test_throws PkgError Pkg.add(Pkg.PackageSpec(uuid = uuid4()))
# Two packages with the same name
@test_throws PkgError(
"it is invalid to specify multiple packages with the same name: `Example`"
) Pkg.add([(;name="Example"), (;name="Example",version="0.5.0")])
end
# Unregistered UUID in manifest
isolate(loaded_depot=true) do; mktempdir() do tempdir
package_path = copy_test_package(tempdir, "UnregisteredUUID")
Pkg.activate(package_path)
@test_throws PkgError("expected package `Example [142fd7e7]` to be registered") Pkg.add("JSON")
end end
# empty git repo (no commits)
isolate(loaded_depot=true) do; mktempdir() do tempdir
close(LibGit2.init(tempdir))
try Pkg.add(path=tempdir)
@test false # to fail if add doesn't error
catch err
@test err isa PkgError
@test match(r"^invalid git HEAD", err.msg) !== nothing
end
end end
end
#
# ## Changes to the active project
#
# Here we can use a loaded depot becuase we are only checking changes to the active project.
# We check that `add` supports basic operations on a clean project.
# The package should be added as a direct dependency.
@testset "add: changes to the active project" begin
# Basic add
isolate(loaded_depot=true) do
Pkg.add(Pkg.PackageSpec("Example"))
Pkg.dependencies(exuuid) do ex
@test ex.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
end
# Basic add by version
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.5.0")
Pkg.dependencies(exuuid) do ex
@test ex.is_tracking_registry
@test ex.version == v"0.5.0"
end
@test haskey(Pkg.project().dependencies, "Example")
end
# Basic Add by VersionRange
#= TODO
isolate(loaded_depot=true) do
# TODO this test is leaky. Will version="0.3.0-0.3.2" suffice?
range = VersionRange("0.3.0-0.3.2")
Pkg.add(Pkg.PackageSpec(TEST_PKG.name, Pkg.Types.VersionSpec(range)))
Pkg.dependencies(exuuid) do pkg
@test pkg.is_tracking_registry
@test pkg.version in range
end
@test Pkg.dependencies()[TEST_PKG.uuid].version == v"0.3.2"
end
=#
# Basic add by URL
isolate(loaded_depot=true) do
Pkg.add(url="https://github.com/JuliaLang/Example.jl", rev="v0.5.3")
Pkg.dependencies(exuuid) do ex
@test !ex.is_tracking_registry
@test ex.git_source == "https://github.com/JuliaLang/Example.jl"
@test ex.git_revision == "v0.5.3"
end
@test haskey(Pkg.project().dependencies, "Example")
end
# Basic add by git revision
isolate(loaded_depot=true) do
Pkg.add(name="Example", rev="master")
Pkg.dependencies(exuuid) do ex
@test !ex.is_tracking_registry
@test ex.git_source == "https://github.com/JuliaLang/Example.jl.git"
@test ex.git_revision == "master"
end
@test haskey(Pkg.project().dependencies, "Example")
end
# Adding stdlibs should work.
isolate(loaded_depot=true) do
profile_uuid = UUID("9abbd945-dff8-562f-b5e8-e1ebf5ef1b79")
# - Adding a stdlib by name.
Pkg.add("Markdown")
Pkg.dependencies(markdown_uuid) do pkg
@test pkg.name == "Markdown"
end
# - Adding a stdlib by UUID.
Pkg.add(uuid=profile_uuid)
Pkg.dependencies(profile_uuid) do pkg
@test pkg.name == "Profile"
end
# - Adding a stdlib by name/UUID.
Pkg.add(name="Markdown", uuid=markdown_uuid)
Pkg.dependencies(markdown_uuid) do pkg
@test pkg.name == "Markdown"
end
end
# Basic add by local path.
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = git_init_package(tempdir, joinpath(@__DIR__, "test_packages", "SimplePackage"))
Pkg.add(path=path)
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.git_source == realpath(path)
# We take care to check that the project file has been parsed correctly.
@test pkg.name == "SimplePackage"
@test pkg.version == v"0.2.0"
@test haskey(pkg.dependencies, "Example")
@test haskey(pkg.dependencies, "Markdown")
end
@test haskey(Pkg.project().dependencies, "SimplePackage")
@test length(Pkg.project().dependencies) == 1
end end
# add when depot does not exist should create the default project in the correct location
isolate() do; mktempdir() do tempdir
empty!(DEPOT_PATH)
push!(DEPOT_PATH, tempdir)
rm(tempdir; force=true, recursive=true)
@test !isdir(first(DEPOT_PATH))
Pkg.add("JSON")
@test dirname(dirname(Pkg.project().path)) == realpath(joinpath(tempdir, "environments"))
end end
end
# Here we can use a loaded depot becuase we are only checking changes to the active project.
@testset "add: package state changes" begin
# Check that `add` on an already added stdlib works.
# Stdlibs are special cased throughtout the codebase.
isolate(loaded_depot=true) do
Pkg.add("Markdown")
Pkg.add("Markdown")
Pkg.dependencies(markdown_uuid) do pkg
@test pkg.name == "Markdown"
end
@test haskey(Pkg.project().dependencies, "Markdown")
end
# Double add should not change state, this would be an unnecessary change.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add("Example")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
end
# Adding a new package should not alter the version of existing packages.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add("Test")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
end
# Add by version should not override pinned version.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
Pkg.pin("Example")
Pkg.dependencies(exuuid) do ex
@test ex.version == v"0.3.0"
@test ex.is_tracking_registry
@test ex.is_pinned
end
Pkg.add(name="Example", version="0.5.0")
# We check that the package state is left unchanged.
Pkg.dependencies(exuuid) do ex
@test ex.version == v"0.3.0"
@test ex.is_tracking_registry
@test ex.is_pinned
end
end
# Add by version should override add by repo.
isolate(loaded_depot=true) do
Pkg.add(name="Example", rev="master")
# First we check that we are not tracking a registered version.
Pkg.dependencies(exuuid) do ex
@test ex.git_revision == "master"
@test !ex.is_tracking_registry
end
Pkg.add(name="Example", version="0.3.0")
# We should now be tracking a registered version.
Pkg.dependencies(exuuid) do ex
@test ex.version == v"0.3.0"
@test ex.git_revision === nothing
@test ex.is_tracking_registry
end
end
# Add by version should override add by repo, even for indirect dependencies.
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = git_init_package(tempdir, joinpath(@__DIR__, "test_packages", "DependsOnExample"))
Pkg.add(path=path)
Pkg.add(name="Example", rev="master")
@test !Pkg.dependencies()[exuuid].is_tracking_registry
# Now we remove the package as a direct dependency.
# The package should still exist as an indirect dependency becuse `DependsOnExample` depends on it.
Pkg.rm("Example")
Pkg.add(name="Example", version="0.3.0")
# Now we check that we are tracking a registered version.
Pkg.dependencies(exuuid) do ex
@test ex.version == v"0.3.0"
@test ex.is_tracking_registry
end
end end
# Add by URL should not override pin.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
Pkg.pin(name="Example")
Pkg.dependencies(exuuid) do ex
@test ex.is_pinned
@test ex.is_tracking_registry
@test ex.version == v"0.3.0"
end
Pkg.add(url="https://github.com/JuliaLang/Example.jl")
Pkg.dependencies(exuuid) do ex
@test ex.is_pinned
@test ex.is_tracking_registry
@test ex.version == v"0.3.0"
end
end
# It should be possible to switch branches by reusing the URL.
isolate(loaded_depot=true) do
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl", rev="0.2.0")
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.git_source == "https://github.com/00vareladavid/Unregistered.jl"
@test !pkg.is_tracking_registry
@test pkg.git_revision == "0.2.0"
# We check that we have the correct branch by checking its dependencies.
@test haskey(pkg.dependencies, "Example")
end
# Now we refer to it by name so to check that we reuse the URL.
Pkg.add(name="Unregistered", rev="0.1.0")
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.git_source == "https://github.com/00vareladavid/Unregistered.jl"
@test !pkg.is_tracking_registry
@test pkg.git_revision == "0.1.0"
# We check that we have the correct branch by checking its dependencies.
@test !haskey(pkg.dependencies, "Example")
end
end
# add should resolve the correct versions even when the manifest is out of sync with the project compat
isolate(loaded_depot=true) do; mktempdir() do tempdir
Pkg.activate(copy_test_package(tempdir, "CompatOutOfSync"))
Pkg.add("Libdl")
Pkg.dependencies(exuuid) do pkg
@test pkg.version == v"0.3.0"
end
end end
# Preserve syntax
# These tests mostly check the REPL side correctness.
# - Normal add should not change the existing version.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add(name="JSON", version="0.18.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
@test Pkg.dependencies()[json_uuid].version == v"0.18.0"
end
# - `tiered` is the default option.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add(Pkg.PackageSpec(;name="JSON", version="0.18.0"); preserve=Pkg.PRESERVE_TIERED)
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
@test Pkg.dependencies()[json_uuid].version == v"0.18.0"
end
# - `all` should succeed in the same way.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add(Pkg.PackageSpec(;name="JSON", version="0.18.0"); preserve=Pkg.PRESERVE_ALL)
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
@test Pkg.dependencies()[json_uuid].version == v"0.18.0"
end
# - `direct` should also succeed in the same way.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add(Pkg.PackageSpec(;name="JSON", version="0.18.0"); preserve=Pkg.PRESERVE_DIRECT)
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
@test Pkg.dependencies()[json_uuid].version == v"0.18.0"
end
# - `semver` should update `Example` to the highest semver compatible version.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add(Pkg.PackageSpec(;name="JSON", version="0.18.0"); preserve=Pkg.PRESERVE_SEMVER)
@test Pkg.dependencies()[exuuid].version == v"0.3.3"
@test Pkg.dependencies()[json_uuid].version == v"0.18.0"
end
#- `none` should update `Example` to the highest compatible version.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.add(Pkg.PackageSpec(;name="JSON", version="0.18.0"); preserve=Pkg.PRESERVE_NONE)
@test Pkg.dependencies()[exuuid].version == v"0.5.3"
@test Pkg.dependencies()[json_uuid].version == v"0.18.0"
end
end
#
# ## Repo Handling
#
@testset "add: repo handling" begin
# Dependencies added with an absolute path should be stored as absolute paths.
# This tests shows that, packages added with an absolute path will not break
# if the project is moved to a new position.
# We can use the loaded depot here, it will help us avoid the original clone.
isolate(loaded_depot=true) do; mktempdir() do tempdir
empty_package = UUID("26187899-7657-4a90-a2f6-e79e0214bedc")
path = git_init_package(tempdir, joinpath(@__DIR__, "test_packages", "EmptyPackage"))
path = abspath(path)
Pkg.add(path=path)
# Now we try to find the package.
rm(joinpath(DEPOT_PATH[1], "packages"); recursive=true)
@test !isdir(Pkg.dependencies()[empty_package].source)
Pkg.instantiate()
@test isdir(Pkg.dependencies()[empty_package].source)
# Now we move the project and should still be able to find the package.
mktempdir() do other_dir
cp(dirname(Base.active_project()), other_dir; force=true)
Pkg.activate(other_dir)
rm(joinpath(DEPOT_PATH[1], "packages"); recursive=true)
@test !isdir(Pkg.dependencies()[empty_package].source)
Pkg.instantiate()
end
end end
# Dependencies added with relative paths should be stored relative to the active project.
# This test shows that packages added with a relative path will not break
# as long as they maintain the same relative position to the project.
# We can use the loaded depot here, it will help us avoid the original clone.
isolate(loaded_depot=true) do; mktempdir() do tempdir
empty_package = UUID("26187899-7657-4a90-a2f6-e79e0214bedc")
path = git_init_package(tempdir, joinpath(@__DIR__, "test_packages", "EmptyPackage"))
# We add the package using a relative path.
cd(path) do
Pkg.add(path=".")
manifest = Pkg.Types.read_manifest(joinpath(dirname(Base.active_project()), "Manifest.toml"))
# Test that the relative path is canonicalized.
repo = string("../../../", basename(tempdir), "/EmptyPackage")
@test manifest[empty_package].repo.source == repo
end
# Now we try to find the package.
rm(joinpath(DEPOT_PATH[1], "packages"); recursive=true)
rm(joinpath(DEPOT_PATH[1], "clones"); recursive=true)
Pkg.instantiate()
# Test that Operations.is_instantiated works with relative path
@test Pkg.Operations.is_instantiated(Pkg.Types.EnvCache())
# Now we destroy the relative position and should not be able to find the package.
rm(joinpath(DEPOT_PATH[1], "packages"); recursive=true)
# Test that Operations.is_instantiated works with relative path
@test !Pkg.Operations.is_instantiated(Pkg.Types.EnvCache())
mktempdir() do other_dir
cp(dirname(Base.active_project()), other_dir; force=true)
Pkg.activate(other_dir)
@test_throws PkgError Pkg.instantiate() # TODO is there a way to pattern match on just part of the err message?
end
end end
# Now we test packages added by URL.
isolate(loaded_depot=true) do
# Details: `master` is past `0.1.0`
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl", rev="0.1.0")
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.name == "Unregistered"
@test isdir(pkg.source)
end
@test haskey(Pkg.project().dependencies, "Unregistered")
# Now we remove the source so that we have to load it again.
# We should reuse the existing clone in this case.
rm(joinpath(DEPOT_PATH[1], "packages"); recursive=true)
Pkg.instantiate()
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.name == "Unregistered"
@test isdir(pkg.source)
end
@test haskey(Pkg.project().dependencies, "Unregistered")
# Now we remove the source _and_ our cache, we have no choice to re-clone the remote.
# We should still be able to find the source.
rm(joinpath(DEPOT_PATH[1], "packages"); recursive=true)
rm(joinpath(DEPOT_PATH[1], "clones"); recursive=true)
Pkg.instantiate()
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.name == "Unregistered"
@test isdir(pkg.source)
end
@test haskey(Pkg.project().dependencies, "Unregistered")
end
end
#
# ## Resolve tiers
#
@testset "add: resolve tiers" begin
isolate(loaded_depot=true) do; mktempdir() do tmp
# All
copy_test_package(tmp, "ShouldPreserveAll"; use_pkg=false)
Pkg.activate(joinpath(tmp, "ShouldPreserveAll"))
parsers_uuid = UUID("69de0a69-1ddd-5017-9359-2bf0b02dc9f0")
original_parsers_version = Pkg.dependencies()[parsers_uuid].version
Pkg.add(name="Example", version="0.5.0")
@test Pkg.dependencies()[parsers_uuid].version == original_parsers_version
# Direct
copy_test_package(tmp, "ShouldPreserveDirect"; use_pkg=false)
Pkg.activate(joinpath(tmp, "ShouldPreserveDirect"))
ordered_collections = UUID("bac558e1-5e72-5ebc-8fee-abe8a469f55d")
Pkg.add(uuid=ordered_collections, version="1.0.1")
lazy_json = UUID("fc18253b-5e1b-504c-a4a2-9ece4944c004")
data_structures = UUID("864edb3b-99cc-5e75-8d2d-829cb0a9cfe8")
@test Pkg.dependencies()[lazy_json].version == v"0.1.0" # stayed the same
@test Pkg.dependencies()[data_structures].version == v"0.16.1" # forced to change
@test Pkg.dependencies()[ordered_collections].version == v"1.0.1" # sanity check
# SEMVER
copy_test_package(tmp, "ShouldPreserveSemver"; use_pkg=false)
Pkg.activate(joinpath(tmp, "ShouldPreserveSemver"))
light_graphs = UUID("093fc24a-ae57-5d10-9952-331d41423f4d")
meta_graphs = UUID("626554b9-1ddb-594c-aa3c-2596fe9399a5")
light_graphs_version = Pkg.dependencies()[light_graphs].version
Pkg.add(uuid=meta_graphs, version="0.6.4")
@test Pkg.dependencies()[meta_graphs].version == v"0.6.4" # sanity check
# did not break semver
@test Pkg.dependencies()[light_graphs].version in Pkg.Types.semver_spec("$(light_graphs_version)")
# did change version
@test Pkg.dependencies()[light_graphs].version != light_graphs_version
# NONE
copy_test_package(tmp, "ShouldPreserveNone"; use_pkg=false)
Pkg.activate(joinpath(tmp, "ShouldPreserveNone"))
array_interface = UUID("4fba245c-0d91-5ea0-9b3e-6abc04ee57a9")
diff_eq_diff_tools = UUID("01453d9d-ee7c-5054-8395-0335cb756afa")
Pkg.add(uuid=diff_eq_diff_tools, version="1.0.0")
@test Pkg.dependencies()[diff_eq_diff_tools].version == v"1.0.0" # sanity check
@test Pkg.dependencies()[array_interface].version in Pkg.Types.semver_spec("1") # had to make breaking change
end end
end
#
# ## REPL
#
@testset "add: REPL" begin
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
# Add using UUID syntax
api, args, opts = first(Pkg.pkg"add 7876af07-990d-54b4-ab0e-23690620f79a")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;uuid=UUID("7876af07-990d-54b4-ab0e-23690620f79a"))]
@test isempty(opts)
# Add using `name=UUID` syntax.
api, args, opts = first(Pkg.pkg"add Example=7876af07-990d-54b4-ab0e-23690620f79a")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example", uuid=UUID("7876af07-990d-54b4-ab0e-23690620f79a"))]
@test isempty(opts)
# Add using git revision syntax.
api, args, opts = first(Pkg.pkg"add Example#master")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example", rev="master")]
@test isempty(opts)
# Add using git revision syntax.
api,args, opt = first(Pkg.pkg"add Example#v0.5.3")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example", rev="v0.5.3")]
@test isempty(opts)
# Add using registered version syntax.
api, args, opts = first(Pkg.pkg"add Example@0.5.0")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example", version="0.5.0")]
@test isempty(opts)
# Add using direct URL syntax.
api, args, opts = first(Pkg.pkg"add https://github.com/00vareladavid/Unregistered.jl#0.1.0")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;url="https://github.com/00vareladavid/Unregistered.jl", rev="0.1.0")]
@test isempty(opts)
# Add using preserve option
api, args, opts = first(Pkg.pkg"add --preserve=none Example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:preserve => Pkg.PRESERVE_NONE)
api, args, opts = first(Pkg.pkg"add --preserve=semver Example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:preserve => Pkg.PRESERVE_SEMVER)
api, args, opts = first(Pkg.pkg"add --preserve=tiered Example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:preserve => Pkg.PRESERVE_TIERED)
api, args, opts = first(Pkg.pkg"add --preserve=all Example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:preserve => Pkg.PRESERVE_ALL)
api, args, opts = first(Pkg.pkg"add --preserve=direct Example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:preserve => Pkg.PRESERVE_DIRECT)
end
# check casesensitive resolution of paths
isolate() do; cd_tempdir() do dir
Pkg.REPLMode.TEST_MODE[] = true
mkdir("example")
api, args, opts = first(Pkg.pkg"add Example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="Example")]
@test isempty(opts)
api, args, opts = first(Pkg.pkg"add example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;name="example")]
@test isempty(opts)
@test_throws PkgError Pkg.pkg"add ./Example"
api, args, opts = first(Pkg.pkg"add ./example")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;path="example")]
@test isempty(opts)
cd("example")
api, args, opts = first(Pkg.pkg"add .")
@test api == Pkg.add
@test args == [Pkg.PackageSpec(;path=".")]
@test isempty(opts)
end end
isolate() do; cd_tempdir() do dir
# adding a nonexistent directory
@test_throws PkgError("`some/really/random/Dir` appears to be a local path, but directory does not exist"
) Pkg.pkg"add some/really/random/Dir"
# warn if not explicit about adding directory
mkdir("Example")
@test_logs (:info, r"Use `./Example` to add or develop the local directory at `.*`.") match_mode=:any Pkg.pkg"add Example"
end end
end
#
# # Develop
#
#
# ## Input Checking
#
@testset "develop: input checking" begin
isolate(loaded_depot=true) do
# Julia is not a valid package name.
@test_throws PkgError("`julia` is not a valid package name") Pkg.develop(name="julia")
# Package names must be valid Julia identifiers.
@test_throws PkgError("`***` is not a valid package name") Pkg.develop(name="***")
@test_throws PkgError("`Foo Bar` is not a valid package name") Pkg.develop(name="Foo Bar")
# Names which are invalid and are probably URLs or paths.
@test_throws PkgError("""
`https://github.com` is not a valid package name
The argument appears to be a URL or path, perhaps you meant `Pkg.develop(url="...")` or `Pkg.develop(path="...")`.""") Pkg.develop("https://github.com")
@test_throws PkgError("""
`./Foobar` is not a valid package name
The argument appears to be a URL or path, perhaps you meant `Pkg.develop(url="...")` or `Pkg.develop(path="...")`.""") Pkg.develop("./Foobar")
# An empty spec is invalid.
@test_throws PkgError(
"name, UUID, URL, or filesystem path specification required when calling `develop`"
) Pkg.develop(Pkg.PackageSpec())
# git revisions imply that `develop` tracks a git repo.
@test_throws PkgError(
"rev argument not supported by `develop`; consider using `add` instead"
) Pkg.develop(name="Example", rev="master")
# Adding an unregistered package by name.
@test_throws PkgError Pkg.develop("ThisIsHopefullyRandom012856014925701382")
# Wrong UUID
@test_throws PkgError Pkg.develop(Pkg.PackageSpec("Example", UUID(UInt128(1))))
# Missing UUID
@test_throws PkgError Pkg.develop(Pkg.PackageSpec(uuid = uuid4()))
# Two packages with the same name
@test_throws PkgError(
"it is invalid to specify multiple packages with the same UUID: `Example [7876af07]`"
) Pkg.develop([(;name="Example"), (;uuid=exuuid)])
end
end
#
# ## Changes to the project
#
@testset "develop: changes to the active project" begin
# It is possible to `develop` by specifying a registered name.
isolate(loaded_depot=true) do
Pkg.develop("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(Pkg.devdir(), "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
end
# Develop with shared=false
isolate(loaded_depot=true) do
Pkg.develop("Example"; shared=false)
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(dirname(Pkg.project().path), "dev", "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
end
# It is possible to develop by specifying a registered UUID.
isolate(loaded_depot=true) do
Pkg.develop(uuid=exuuid)
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(DEPOT_PATH[1], "dev", "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
end
# It is possible to develop by specifying a URL.
isolate(loaded_depot=true) do
Pkg.develop(url="https://github.com/JuliaLang/Example.jl")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(DEPOT_PATH[1], "dev", "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
end
# It is possible to develop by directly specifying a path.
isolate(loaded_depot=true) do; mktempdir() do tempdir
copy_test_package(tempdir, "SimplePackage")
path = joinpath(tempdir, "SimplePackage")
Pkg.develop(path=path)
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test realpath(pkg.source) == realpath(path)
@test !pkg.is_tracking_registry
@test haskey(pkg.dependencies, "Example")
@test haskey(pkg.dependencies, "Markdown")
end
@test haskey(Pkg.project().dependencies, "SimplePackage")
end end
# recursive `dev`
isolate(loaded_depot=true) do
Pkg.develop(path=joinpath(@__DIR__, "test_packages", "A"))
Pkg.dependencies(UUID("0829fd7c-1e7e-4927-9afa-b8c61d5e0e42")) do pkg # dep A
@test haskey(pkg.dependencies, "B")
@test haskey(pkg.dependencies, "C")
@test Base.samefile(pkg.source, joinpath(@__DIR__, "test_packages", "A"))
end
Pkg.dependencies(UUID("4ee78ca3-4e78-462f-a078-747ed543fa85")) do pkg # dep C
@test haskey(pkg.dependencies, "D")
@test Base.samefile(pkg.source, joinpath(@__DIR__, "test_packages", "A", "dev", "C"))
end
Pkg.dependencies(UUID("dd0d8fba-d7c4-4f8e-a2bb-3a090b3e34f1")) do pkg # dep B
@test Base.samefile(pkg.source, joinpath(@__DIR__, "test_packages", "A", "dev", "B"))
end
Pkg.dependencies(UUID("bf733257-898a-45a0-b2f2-c1c188bdd879")) do pkg # dep D
@test Base.samefile(pkg.source, joinpath(@__DIR__, "test_packages", "A", "dev", "D"))
end
end
# primary depot is a relative path
isolate() do; cd_tempdir() do dir
empty!(DEPOT_PATH)
push!(DEPOT_PATH, "temp")
Pkg.develop("JSON")
Pkg.dependencies(json_uuid) do pkg
@test Base.samefile(pkg.source, abspath(joinpath("temp", "dev", "JSON")))
end
end end
end
@testset "develop: interaction with `JULIA_PKG_DEVDIR`" begin
# A shared `develop` should obey `JULIA_PKG_DEVDIR`.
isolate(loaded_depot=true) do; mktempdir() do tempdir
withenv("JULIA_PKG_DEVDIR" => tempdir) do
Pkg.develop("Example")
end
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(tempdir, "Example"))
end
@test haskey(Pkg.project().dependencies, "Example")
end end
# A local `develop` should not be affected by `JULIA_PKG_DEVDIR`
isolate(loaded_depot=true) do; mktempdir() do tempdir
withenv("JULIA_PKG_DEVDIR" => tempdir) do
Pkg.develop("Example"; shared=false)
end
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(dirname(Pkg.project().path), "dev", "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
end end
end
@testset "develop: path handling" begin
# Relative paths
isolate(loaded_depot=true) do
project_path = dirname(Pkg.project().path)
mkpath(project_path)
copy_test_package(project_path, "SimplePackage")
package_path = joinpath(project_path, "SimplePackage")
# Now we `develop` using a relative path.
cd(project_path) do
Pkg.develop(Pkg.PackageSpec(path="SimplePackage"))
end
# Check that everything went ok.
original_source = nothing
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test isdir(pkg.source)
@test Base.samefile(pkg.source, package_path)
original_source = pkg.source
end
# Now we move the project, but preserve the relative structure.
mktempdir() do tempdir
cp(project_path, tempdir; force=true)
Pkg.activate(tempdir)
# We check that we can still find the source.
Pkg.dependencies(simple_package_uuid) do pkg
@test isdir(pkg.source)
@test Base.samefile(pkg.source, realpath(joinpath(tempdir, "SimplePackage")))
end
end
end
# Absolute paths
isolate(loaded_depot=true) do; mktempdir() do tempdir
copy_test_package(tempdir, "SimplePackage")
package_path = joinpath(tempdir, "SimplePackage")
Pkg.activate(tempdir)
Pkg.develop(path=package_path)
original_source = nothing
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test isdir(pkg.source)
@test realpath(pkg.source) == realpath(package_path)
original_source = pkg.source
end
mktempdir() do tempdir2
cp(joinpath(tempdir, "Project.toml"), joinpath(tempdir2, "Project.toml"))
cp(joinpath(tempdir, "Manifest.toml"), joinpath(tempdir2, "Manifest.toml"))
Pkg.activate(tempdir2)
Pkg.dependencies(simple_package_uuid) do pkg
@test isdir(pkg.source)
@test Base.samefile(pkg.source, original_source)
end
end
end end
# ### Special casing on path handling
# "." style path
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "SimplePackage")
cd(path) do
Pkg.pkg"develop ."
end
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test isdir(pkg.source)
@test pkg.is_tracking_path
end
end end
# ".." style path
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "SimplePackage")
cd(joinpath(path, "src")) do
Pkg.pkg"develop .."
end
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test isdir(pkg.source)
@test pkg.is_tracking_path
end
end end
# Local directory name. This must be prepended by "./".
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "SimplePackage")
cd(dirname(path)) do
Pkg.pkg"develop ./SimplePackage"
end
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test isdir(pkg.source)
@test pkg.is_tracking_path
end
end end
end
@testset "develop: package state changes" begin
# Developing an existing package which is tracking the registry should just override.
isolate(loaded_depot=true) do
Pkg.add("Example")
Pkg.develop("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(DEPOT_PATH[1], "dev", "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
@test length(Pkg.project().dependencies) == 1
end
# Developing an existing package which is tracking a repo should just override.
isolate(loaded_depot=true) do
Pkg.add(name="Example", rev="master")
Pkg.develop("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(DEPOT_PATH[1], "dev", "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
@test length(Pkg.project().dependencies) == 1
end
# Develop with different target path should override old path with target path.
isolate(loaded_depot=true) do
Pkg.develop("Example")
Pkg.develop("Example"; shared=false)
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test Base.samefile(pkg.source, joinpath(dirname(Pkg.project().path), "dev", "Example"))
@test !pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Example")
@test length(Pkg.project().dependencies) == 1
end
# develop tries to resolve from the manifest
isolate(loaded_depot=true) do
remote_url = "https://github.com/00vareladavid/Unregistered.jl"
Pkg.add(Pkg.PackageSpec(url=remote_url))
Pkg.develop("Unregistered")
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.name == "Unregistered"
end
end
end
#
# ## REPL
#
@testset "develop: REPL" begin
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
# registered name
api, args, opts = first(Pkg.pkg"develop Example")
@test api == Pkg.develop
@test args == [Pkg.PackageSpec(;name="Example")]
@test isempty(opts)
# registered uuid
api, args, opts = first(Pkg.pkg"develop 7876af07-990d-54b4-ab0e-23690620f79a")
@test api == Pkg.develop
@test args == [Pkg.PackageSpec(;uuid=UUID("7876af07-990d-54b4-ab0e-23690620f79a"))]
@test isempty(opts)
# name=uuid
api, args, opts = first(Pkg.pkg"develop Example=7876af07-990d-54b4-ab0e-23690620f79a")
@test api == Pkg.develop
@test args == [Pkg.PackageSpec(;name="Example", uuid=UUID("7876af07-990d-54b4-ab0e-23690620f79a"))]
@test isempty(opts)
# local flag
api, args, opts = first(Pkg.pkg"develop --local Example")
@test api == Pkg.develop
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:shared => false)
# shared flag
api, args, opts = first(Pkg.pkg"develop --shared Example")
@test api == Pkg.develop
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:shared => true)
# URL
api, args, opts = first(Pkg.pkg"develop https://github.com/JuliaLang/Example.jl")
@test api == Pkg.develop
@test args == [Pkg.PackageSpec(;url="https://github.com/JuliaLang/Example.jl")]
@test isempty(opts)
# develop using preserve option
api, args, opts = first(Pkg.pkg"dev --preserve=none Example")
@test api == Pkg.develop
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:preserve => Pkg.PRESERVE_NONE)
end
end
#
# # Instantiate
#
@testset "instantiate: input checking" begin
# Unregistered UUID in manifest
isolate(loaded_depot=true) do; mktempdir() do tempdir
package_path = copy_test_package(tempdir, "UnregisteredUUID")
Pkg.activate(package_path)
@test_throws PkgError("expected package `Example [142fd7e7]` to be registered") Pkg.update()
end end
end
@testset "instantiate: changes to the active project" begin
# Instantiate should preserve tree hash for regularly versioned packages.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
th = nothing
Pkg.dependencies(exuuid) do pkg
th = pkg.tree_hash
@test pkg.name == "Example"
@test pkg.version == v"0.3.0"
@test isdir(pkg.source)
end
rm(joinpath(DEPOT_PATH[1], "packages"); force=true, recursive=true)
rm(joinpath(DEPOT_PATH[1], "clones"); force=true, recursive=true)
Pkg.instantiate()
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.version == v"0.3.0"
@test isdir(pkg.source)
@test pkg.tree_hash == th
end
end
# `instantiate` should preserve tree hash for packages tracking repos.
isolate(loaded_depot=true) do
Pkg.add(name="Example", rev="v0.5.3")
th = nothing
Pkg.dependencies(exuuid) do pkg
th = pkg.tree_hash
@test pkg.name == "Example"
@test isdir(pkg.source)
end
rm(joinpath(DEPOT_PATH[1], "packages"); force=true, recursive=true)
rm(joinpath(DEPOT_PATH[1], "clones"); force=true, recursive=true)
Pkg.instantiate()
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test isdir(pkg.source)
end
end
# `instantiate` should check for a consistent dependency graph.
# Otherwise it is not clear what to instantiate.
isolate(loaded_depot=true) do; mktempdir() do tempdir
copy_test_package(tempdir, "ExtraDirectDep")
Pkg.activate(joinpath(tempdir, "ExtraDirectDep"))
@test_throws PkgError Pkg.instantiate()
end end
# However, if `manifest=false`, we know to instantiate from the direct dependencies.
isolate(loaded_depot=true) do; mktempdir() do tempdir
copy_test_package(tempdir, "ExtraDirectDep")
Pkg.activate(joinpath(tempdir, "ExtraDirectDep"))
Pkg.instantiate(;manifest=false)
@test haskey(Pkg.project().dependencies, "Example")
@test haskey(Pkg.project().dependencies, "Unicode")
end end
# `instantiate` lonely manifest
isolate(loaded_depot=true) do
manifest_dir = joinpath(@__DIR__, "manifest", "noproject")
cd(manifest_dir) do
try
Pkg.activate(".")
Pkg.instantiate()
@test Base.active_project() == abspath("Project.toml")
@test isinstalled("Example")
@test isinstalled("x1")
finally
rm("Project.toml"; force=true)
end
end
end
# `instantiate` on a lonely manifest should detect duplicate names
isolate(loaded_depot=true) do; mktempdir() do tempdir
simple_package_path = copy_test_package(tempdir, "SimplePackage")
unregistered_example_path = copy_test_package(tempdir, "Example")
Pkg.develop(path=simple_package_path)
Pkg.develop(path=unregistered_example_path)
rm(Pkg.project().path)
@test_throws PkgError Pkg.instantiate()
end end
# verbose smoke test
isolate(loaded_depot=true) do
Pkg.instantiate(;verbose=true)
end
end
@testset "instantiate: caching" begin
# Instantiate should not override existing source.
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
th, t1 = nothing, nothing
Pkg.dependencies(exuuid) do pkg
th = pkg.tree_hash
@test pkg.name == "Example"
@test pkg.version == v"0.3.0"
@test isdir(pkg.source)
t1 = mtime(pkg.source)
end
Pkg.instantiate()
Pkg.dependencies(exuuid) do pkg
@test pkg.tree_hash == th
@test pkg.name == "Example"
@test pkg.version == v"0.3.0"
@test isdir(pkg.source)
@test mtime(pkg.source) == t1
end
end
# TODO check registry updates
end
#
# ## REPL
#
@testset "instantiate: REPL" begin
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, opts = first(Pkg.pkg"instantiate --verbose")
@test api == Pkg.instantiate
@test opts == Dict(:verbose => true)
api, opts = first(Pkg.pkg"instantiate -v")
@test api == Pkg.instantiate
@test opts == Dict(:verbose => true)
end
end
#
# # Update
#
@testset "update: input checking" begin
# Unregistered UUID in manifest
isolate(loaded_depot=true) do; mktempdir() do tempdir
package_path = copy_test_package(tempdir, "UnregisteredUUID")
Pkg.activate(package_path)
@test_throws PkgError("expected package `Example [142fd7e7]` to be registered") Pkg.update()
end end
# package does not exist in the manifest
isolate(loaded_depot=true) do
@test_throws PkgError Pkg.update("Example")
end
end
@testset "update: changes to the active project" begin
# Basic testing of UPLEVEL
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.update(; level = Pkg.UPLEVEL_FIXED)
@test Pkg.dependencies()[exuuid].version == v"0.3.0"
Pkg.update(; level = Pkg.UPLEVEL_PATCH)
@test Pkg.dependencies()[exuuid].version == v"0.3.3"
Pkg.update(; level = Pkg.UPLEVEL_MINOR)
@test Pkg.dependencies()[exuuid].version.minor != 3
end
# `update` should prune manifest
isolate(loaded_depot=true) do; mktempdir() do tempdir
copy_test_package(tempdir, "Unpruned")
Pkg.activate(joinpath(tempdir, "Unpruned"))
Pkg.update()
@test haskey(Pkg.project().dependencies, "Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.version > v"0.4.0"
end
@test !haskey(Pkg.dependencies(), unicode_uuid)
end end
# `up` should work without a manifest
isolate(loaded_depot=true) do; mktempdir() do tempdir
copy_test_package(tempdir, "SimplePackage")
Pkg.activate(joinpath(tempdir, "SimplePackage"))
Pkg.update()
@test haskey(Pkg.project().dependencies, "Example")
@test haskey(Pkg.project().dependencies, "Markdown")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_tracking_registry
end
end end
end
@testset "update: package state changes" begin
# basic update on old registered package
isolate(loaded_depot=true) do
Pkg.add(name="Example", version="0.3.0")
Pkg.update()
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.version > v"0.3.0"
end
end
# `update` should not update `pin`ed packages
isolate(loaded_depot=true) do
Pkg.add(name="Example",version="0.3.0")
Pkg.pin("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_pinned
@test pkg.version == v"0.3.0"
end
Pkg.update()
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_pinned
@test pkg.version == v"0.3.0"
end
end
# stdlib special casing
isolate(loaded_depot=true) do
Pkg.add("Markdown")
Pkg.update()
Pkg.dependencies(markdown_uuid) do pkg
@test pkg.name == "Markdown"
end
end
# up should not affect `dev` packages
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "SimplePackage")
Pkg.develop(path=path)
state = Pkg.dependencies()[simple_package_uuid]
Pkg.update()
@test Pkg.dependencies()[simple_package_uuid] == state
end end
# up and packages tracking repos
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = git_init_package(tempdir, joinpath(@__DIR__, "test_packages", "SimplePackage"))
Pkg.add(path=path)
# test everything went ok
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test pkg.version == v"0.2.0"
@test haskey(pkg.dependencies, "Example")
@test haskey(pkg.dependencies, "Markdown")
@test !haskey(pkg.dependencies, "Unicode")
end
simple_package_node = Pkg.dependencies()[simple_package_uuid]
# now we bump the remote veresion
mv(joinpath(path, "Project2.toml"), joinpath(path, "Project.toml"); force=true)
new_commit = nothing
LibGit2.with(LibGit2.GitRepo(path)) do repo
LibGit2.add!(repo, "*")
new_commit = string(LibGit2.commit(repo, "bump version"; author=TEST_SIG, committer=TEST_SIG))
end
# update with UPLEVEL != UPLEVEL_MAJOR should not update packages tracking repos
Pkg.update(; level=Pkg.UPLEVEL_MINOR)
@test simple_package_node == Pkg.dependencies()[simple_package_uuid]
Pkg.update(; level=Pkg.UPLEVEL_PATCH)
@test simple_package_node == Pkg.dependencies()[simple_package_uuid]
Pkg.update(; level=Pkg.UPLEVEL_FIXED)
@test simple_package_node == Pkg.dependencies()[simple_package_uuid]
# Update should not modify pinned packages which are tracking repos
Pkg.pin("SimplePackage")
Pkg.update()
Pkg.free("SimplePackage")
@test simple_package_node == Pkg.dependencies()[simple_package_uuid]
# update should update packages tracking repos if UPLEVEL_MAJOR
Pkg.update()
if !Sys.iswindows() # this test is very flaky on Windows, why?
Pkg.dependencies(simple_package_uuid) do pkg
@test pkg.name == "SimplePackage"
@test pkg.version == v"0.3.0"
@test !haskey(pkg.dependencies, "Example")
@test haskey(pkg.dependencies, "Markdown")
@test haskey(pkg.dependencies, "Unicode")
end
end
end end
# make sure that we preserve the state of packages which are not the target
isolate(loaded_depot=true) do
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl")
Pkg.develop("Example")
Pkg.add(name="JSON", version="0.18.0")
Pkg.add("Markdown")
Pkg.add("Unicode")
Pkg.update("Unicode")
Pkg.dependencies(unregistered_uuid) do pkg
@test pkg.name == "Unregistered"
@test pkg.git_revision == "master"
end
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_tracking_path
end
Pkg.dependencies(json_uuid) do pkg
@test pkg.name == "JSON"
@test pkg.version == v"0.18.0"
@test pkg.is_tracking_registry
end
@test haskey(Pkg.project().dependencies, "Markdown")
@test haskey(Pkg.project().dependencies, "Unicode")
end
# `--fixed` should prevent the target package from being updated, but update other dependencies
isolate(loaded_depot=true) do
Pkg.add( name="Example", version="0.3.0")
Pkg.add( name="JSON", version="0.18.0")
Pkg.update("JSON"; level=Pkg.UPLEVEL_FIXED)
Pkg.dependencies(json_uuid) do pkg
@test pkg.version == v"0.18.0"
end
Pkg.dependencies(exuuid) do pkg
@test pkg.version > v"0.3.0"
end
end
end
@testset "update: REPL" begin
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, opts = first(Pkg.pkg"up")
@test api == Pkg.update
@test isempty(opts)
end
end
@testset "update: caching" begin
# `up` should detect broken local packages
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = git_init_package(tempdir, joinpath(@__DIR__, "test_packages", "SimplePackage"))
Pkg.add(path=path)
rm(joinpath(path, ".git"); force=true, recursive=true)
@test_throws PkgError Pkg.update()
end end
end
#
# # Pin
#
@testset "pin: input checking" begin
# a package must exist in the dep graph in order to be pinned
isolate(loaded_depot=true) do
@test_throws PkgError Pkg.pin("Example")
end
# pinning to an arbritrary version should check for unregistered packages
isolate(loaded_depot=true) do
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl")
@test_throws PkgError("unable to pin unregistered package `Unregistered [dcb67f36]` to an arbritrary version"
) Pkg.pin(name="Unregistered", version="0.1.0")
end
# pinning to an abritrary version should check version exists
isolate(loaded_depot=true) do
Pkg.add(name="Example",rev="master")
@test_throws ResolverError Pkg.pin(name="Example",version="100.0.0")
end
end
@testset "pin: package state changes" begin
# regular registered package
isolate(loaded_depot=true) do
Pkg.add( name="Example", version="0.3.3")
Pkg.pin("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_pinned
end
end
# packge tracking repo
isolate(loaded_depot=true) do
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl")
Pkg.pin("Unregistered")
Pkg.dependencies(unregistered_uuid) do pkg
@test !pkg.is_tracking_registry
@test pkg.is_pinned
end
end
# versioned pin
isolate(loaded_depot=true) do
Pkg.add( name="Example", version="0.3.3")
Pkg.pin( name="Example", version="0.5.1")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_pinned
end
end
# pin should check for a valid version number
isolate(loaded_depot=true) do
Pkg.add(name="Example", rev="master")
@test_throws ResolverError Pkg.pin(name="Example",version="100.0.0") # TODO maybe make a PkgError
end
end
#
# # Free
#
@testset "free: input checking" begin
# free checks for exisiting packge
isolate(loaded_depot=true) do
@test_throws PkgError Pkg.free("Example")
end
# free checks for unpinned package
isolate(loaded_depot=true) do
Pkg.add("Unicode")
@test_throws PkgError(string("expected package `Unicode [4ec0a83e]` to be",
" pinned, tracking a path, or tracking a repository"
)) Pkg.free("Unicode")
end
end
@testset "free: package state changes" begin
# free pinned package
isolate(loaded_depot=true) do
Pkg.add("Example")
Pkg.pin("Example")
Pkg.free("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test !pkg.is_pinned
end
end
# free package tracking repo
isolate(loaded_depot=true) do
Pkg.add( name="Example", rev="master")
Pkg.free("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_tracking_registry
end
end
# free developed packge
isolate(loaded_depot=true) do
Pkg.develop("Example")
Pkg.free("Example")
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test pkg.is_tracking_registry
end
end
# free should error when called on packages tracking unregistered packages
isolate(loaded_depot=true) do
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl")
@test_throws PkgError("unable to free unregistered package `Unregistered [dcb67f36]`") Pkg.free("Unregistered")
end
isolate(loaded_depot=true) do
Pkg.develop(url="https://github.com/00vareladavid/Unregistered.jl")
@test_throws PkgError("unable to free unregistered package `Unregistered [dcb67f36]`") Pkg.free("Unregistered")
end
end
#
# ## REPL commands
#
@testset "free: REPL" begin
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, args, opts = first(Pkg.pkg"free Example")
@test api == Pkg.free
@test args == [Pkg.PackageSpec(;name="Example")]
@test isempty(opts)
end
end
#
# # Resolve
#
@testset "resolve" begin
# resolve should ignore `extras`
isolate(loaded_depot=true) do; mktempdir() do tempdir
package_path = copy_test_package(tempdir, "TestTarget")
Pkg.activate(package_path)
Pkg.resolve()
@test !haskey(Pkg.dependencies(), markdown_uuid)
@test !haskey(Pkg.dependencies(), test_stdlib_uuid)
end end
end
#
# # Test
#
@testset "test" begin
# stdlib special casing
isolate(loaded_depot=true) do
Pkg.add("UUIDs")
Pkg.test("UUIDs")
end
# test args smoketest
isolate(loaded_depot=true) do; mktempdir() do tempdir
copy_test_package(tempdir, "TestArguments")
Pkg.activate(joinpath(tempdir, "TestArguments"))
# test the old code path (no test/Project.toml)
Pkg.test("TestArguments"; test_args=`a b`, julia_args=`--quiet --check-bounds=no`)
Pkg.test("TestArguments"; test_args=["a", "b"], julia_args=["--quiet", "--check-bounds=no"])
# test new code path
touch(joinpath(tempdir, "TestArguments", "test", "Project.toml"))
Pkg.test("TestArguments"; test_args=`a b`, julia_args=`--quiet --check-bounds=no`)
Pkg.test("TestArguments"; test_args=["a", "b"], julia_args=["--quiet", "--check-bounds=no"])
end end
end
#
# # rm
#
@testset "rm" begin
# simple rm
isolate(loaded_depot=true) do
Pkg.add("Example")
Pkg.rm("Example")
@test isempty(Pkg.project().dependencies)
@test isempty(Pkg.dependencies())
end
# remove should not alter other dependencies
isolate(loaded_depot=true) do
Pkg.add([(;name="Example"),
(;name="JSON", version="0.18.0"),])
json = Pkg.dependencies()[json_uuid]
Pkg.rm("Example")
@test Pkg.dependencies()[json_uuid] == json
@test haskey(Pkg.project().dependencies, "JSON")
end
# rm should remove unused compat entries
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "BasicCompat")
Pkg.activate(path)
# TODO interface for `compat`
@test haskey(Pkg.Types.Context().env.project.compat, "Example")
@test haskey(Pkg.Types.Context().env.project.compat, "julia")
Pkg.rm("Example")
@test !haskey(Pkg.Types.Context().env.project.compat, "Example")
@test haskey(Pkg.Types.Context().env.project.compat, "julia")
end end
# rm should not unnecessarily remove compat entries
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "CompatExtras")
Pkg.activate(path)
@test haskey(Pkg.Types.Context().env.project.compat, "Aqua")
@test haskey(Pkg.Types.Context().env.project.compat, "DataFrames")
Pkg.rm("DataFrames")
@test !haskey(Pkg.Types.Context().env.project.compat, "DataFrames")
@test haskey(Pkg.Types.Context().env.project.compat, "Aqua")
end end
# rm removes unused recursive depdencies
isolate(loaded_depot=true) do; mktempdir() do tempdir
path = copy_test_package(tempdir, "SimplePackage")
Pkg.develop(path=path)
Pkg.add(name="JSON", version="0.18.0")
Pkg.rm("SimplePackage")
@test haskey(Pkg.dependencies(), markdown_uuid)
@test !haskey(Pkg.dependencies(), simple_package_uuid)
@test !haskey(Pkg.dependencies(), exuuid)
@test haskey(Pkg.dependencies(), json_uuid)
end end
# rm manifest mode
isolate(loaded_depot=true) do
Pkg.add("Example")
Pkg.add(name="JSON", version="0.18.0")
Pkg.rm("Random"; mode=Pkg.PKGMODE_MANIFEST)
@test haskey(Pkg.dependencies(), exuuid)
@test !haskey(Pkg.dependencies(), json_uuid)
end
# rm nonexistent packages warns but does not error
isolate(loaded_depot=true) do
Pkg.add("Example")
@test_logs (:warn, r"not in project, ignoring") Pkg.rm(name="FooBar", uuid=UUIDs.UUID(0))
@test_logs (:warn, r"not in manifest, ignoring") Pkg.rm(name="FooBar", uuid=UUIDs.UUID(0); mode=Pkg.PKGMODE_MANIFEST)
end
end
@testset "rm: REPL" begin
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, args, opts = first(Pkg.pkg"rm Example")
@test api == Pkg.rm
@test args == [Pkg.PackageSpec(;name="Example")]
@test isempty(opts)
api, args, opts = first(Pkg.pkg"rm --project Example")
@test api == Pkg.rm
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:mode => Pkg.PKGMODE_PROJECT)
api, args, opts = first(Pkg.pkg"rm --manifest Example")
@test api == Pkg.rm
@test args == [Pkg.PackageSpec(;name="Example")]
@test opts == Dict(:mode => Pkg.PKGMODE_MANIFEST)
end
end
#
# # `all` operations
#
@testset "all" begin
# pin all, free all, rm all packages
isolate(loaded_depot=true) do
Pkg.add("Example")
Pkg.pin(all_pkgs = true)
Pkg.free(all_pkgs = true)
Pkg.dependencies(exuuid) do pkg
@test pkg.name == "Example"
@test !pkg.is_pinned
end
Pkg.add("Profile")
Pkg.pin("Example")
Pkg.free(all_pkgs = true) # test that this doesn't error because Profile is already free
Pkg.rm(all_pkgs = true)
@test !haskey(Pkg.dependencies(), exuuid)
# test that the noops don't error
Pkg.rm(all_pkgs = true)
Pkg.pin(all_pkgs = true)
Pkg.free(all_pkgs = true)
end
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, args, opts = first(Pkg.pkg"pin --all")
@test api == Pkg.pin
@test isempty(args)
@test opts == Dict(:all_pkgs => true)
api, args, opts = first(Pkg.pkg"free --all")
@test api == Pkg.free
@test isempty(args)
@test opts == Dict(:all_pkgs => true)
api, args, opts = first(Pkg.pkg"rm --all")
@test api == Pkg.rm
@test isempty(args)
@test opts == Dict(:all_pkgs => true)
end
end
#
# # build
#
@testset "build" begin
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, args, opts = first(Pkg.pkg"build")
@test api == Pkg.build
@test isempty(args)
@test isempty(opts)
api, args, opts = first(Pkg.pkg"build Example")
@test api == Pkg.build
@test args == [Pkg.PackageSpec(;name="Example")]
@test isempty(opts)
api, args, opts = first(Pkg.pkg"build --verbose")
@test api == Pkg.build
@test isempty(args)
@test opts == Dict(:verbose => true)
api, args, opts = first(Pkg.pkg"build -v Foo Bar")
@test api == Pkg.build
@test args == [Pkg.PackageSpec(;name="Foo"), Pkg.PackageSpec(;name="Bar")]
@test opts == Dict(:verbose => true)
end
# Test package that fails build
isolate(loaded_depot=true) do; mktempdir() do tempdir
package_path = copy_test_package(tempdir, "FailBuild")
Pkg.activate(package_path)
@test_throws PkgError Pkg.build()
end end
# Build log location
isolate(loaded_depot=true) do; mktempdir() do tmp
path = git_init_package(tmp, joinpath(@__DIR__, "test_packages", "FailBuild"))
# Log file in the directory when it is deved
Pkg.develop(path=path; io=devnull)
log_file_dev = joinpath(path, "deps", "build.log")
@test !isfile(log_file_dev)
@test_throws PkgError Pkg.build("FailBuild"; io=devnull)
@test isfile(log_file_dev)
@test occursin("oops", read(log_file_dev, String))
# Log file in scratchspace when added
addpath = dirname(dirname(Base.find_package("FailBuild")))
log_file_add = joinpath(path, "deps", "build.log")
@test_throws PkgError Pkg.add(path=path; io=devnull)
@test !isfile(joinpath(Base.find_package("FailBuild"), "..", "..", "deps", "build.log"))
log_file_add = joinpath(DEPOT_PATH[1], "scratchspaces",
"44cfe95a-1eb2-52ea-b672-e2afdf69b78f", "f99d57aad0e5eb2434491b47bac92bb88d463001", "build.log")
@test isfile(log_file_add)
@test occursin("oops", read(log_file_add, String))
end end
end
#
# # GC
#
@testset "gc" begin
# REPL
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, opts = first(Pkg.pkg"gc")
@test api == Pkg.gc
@test isempty(opts)
api, opts = first(Pkg.pkg"gc --all")
@test api == Pkg.gc
@test opts[:collect_delay] == Hour(0)
end
end
#
# # precompile
#
@testset "precompile" begin
# REPL
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, opts = first(Pkg.pkg"precompile")
@test api == Pkg.precompile
@test isempty(opts)
api, arg, opts = first(Pkg.pkg"precompile Foo")
@test api == Pkg.precompile
@test arg == "Foo"
@test isempty(opts)
api, arg1, arg2, opts = first(Pkg.pkg"precompile Foo Bar")
@test api == Pkg.precompile
@test arg1 == "Foo"
@test arg2 == "Bar"
@test isempty(opts)
end
end
#
# # generate
#
@testset "generate" begin
# REPL
isolate() do
Pkg.REPLMode.TEST_MODE[] = true
api, arg, opts = first(Pkg.pkg"generate Foo")
@test api == Pkg.API.generate
@test arg == "Foo"
@test isempty(opts)
mktempdir() do dir
api, arg, opts = first(Pkg.REPLMode.pkgstr("generate $(joinpath(dir, "Foo"))"))
@test arg == joinpath(dir, "Foo")
# issue #1435
if !Sys.iswindows()
withenv("HOME" => dir) do
api, arg, opts = first(Pkg.REPLMode.pkgstr("generate ~/Bar"))
@test arg == joinpath(dir, "Bar")
end
end
end
end
end
#
# # Status
#
@testset "Pkg.status" begin
# other
isolate(loaded_depot=true) do
@test_deprecated Pkg.status(Pkg.PKGMODE_MANIFEST)
@test_logs (:warn, r"diff option only available") match_mode=:any Pkg.status(diff=true)
end
# State changes
isolate(loaded_depot=true) do
io = IOBuffer()
# Basic Add
Pkg.add(Pkg.PackageSpec(; name="Example", version="0.3.0"); io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project\.toml`", output)
@test occursin(r"\[7876af07\] \+ Example v0\.3\.0", output)
@test occursin(r"Updating `.+Manifest\.toml`", output)
@test occursin(r"\[7876af07\] \+ Example v0\.3\.0", output)
# Double add should not claim "Updating"
Pkg.add(Pkg.PackageSpec(; name="Example", version="0.3.0"); io=io)
output = String(take!(io))
@test occursin(r"No Changes to `.+Project\.toml`", output)
@test occursin(r"No Changes to `.+Manifest\.toml`", output)
# From tracking registry to tracking repo
Pkg.add(Pkg.PackageSpec(; name="Example", rev="master"); io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v0\.3\.0 ⇒ v\d\.\d\.\d `https://github\.com/JuliaLang/Example\.jl\.git#master`", output)
@test occursin(r"Updating `.+Manifest\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v0\.3\.0 ⇒ v\d\.\d\.\d `https://github.com/JuliaLang/Example.jl.git#master`", output)
# From tracking repo to tracking path
Pkg.develop("Example"; io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d `https://github\.com/JuliaLang/Example\.jl\.git#master` ⇒ v\d\.\d\.\d `.+`", output)
@test occursin(r"Updating `.+Manifest\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d `https://github\.com/JuliaLang/Example\.jl\.git#master` ⇒ v\d\.\d\.\d `.+`", output)
# From tracking path to tracking repo
Pkg.add(Pkg.PackageSpec(; name="Example", rev="master"); io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d `.+` ⇒ v\d\.\d\.\d `https://github.com/JuliaLang/Example.jl.git#master`", output)
@test occursin(r"Updating `.+Manifest\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d `.+` ⇒ v\d\.\d\.\d `https://github.com/JuliaLang/Example.jl.git#master`", output)
# From tracking repo to tracking registered version
Pkg.free("Example"; io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d `https://github.com/JuliaLang/Example.jl.git#master` ⇒ v\d\.\d\.\d", output)
@test occursin(r"Updating `.+Manifest\.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d `https://github.com/JuliaLang/Example.jl.git#master` ⇒ v\d\.\d\.\d", output)
# Removing registered version
Pkg.rm("Example"; io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project.toml`", output)
@test occursin(r"\[7876af07\] - Example v\d\.\d\.\d", output)
@test occursin(r"Updating `.+Manifest.toml`", output)
@test occursin(r"\[7876af07\] - Example v\d\.\d\.\d", output)
# Pinning a registered package
Pkg.add("Example")
Pkg.pin("Example"; io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d ⇒ v\d\.\d\.\d ⚲", output)
@test occursin(r"Updating `.+Manifest.toml`", output)
# Free a pinned package
Pkg.free("Example"; io=io)
output = String(take!(io))
@test occursin(r"Updating `.+Project.toml`", output)
@test occursin(r"\[7876af07\] ~ Example v\d\.\d\.\d ⚲ ⇒ v\d\.\d\.\d", output)
@test occursin(r"Updating `.+Manifest.toml`", output)
end
# Project Status API
isolate(loaded_depot=true) do
Pkg.Registry.add(Pkg.RegistrySpec[], io=devnull) # load reg before io capturing
io = PipeBuffer()
## empty project
Pkg.status(;io=io)
@test occursin(r"Status `.+Project.toml` \(empty project\)", readline(io))
## loaded project
Pkg.add("Markdown")
Pkg.add( name="JSON", version="0.18.0")
Pkg.develop("Example")
Pkg.add(url="https://github.com/00vareladavid/Unregistered.jl")
Pkg.status(; io = io)
@test occursin(r"Status `.+Project\.toml`", readline(io))
@test occursin(r"\[7876af07\] Example\s*v\d\.\d\.\d\s*`.+`", readline(io))
@test occursin(r"\[682c06a0\] JSON\s*v0.18.0", readline(io))
@test occursin(r"\[dcb67f36\] Unregistered\s*v\d\.\d\.\d\s*`https://github\.com/00vareladavid/Unregistered\.jl#master`", readline(io))
@test occursin(r"\[d6f4376e\] Markdown", readline(io))
end
## status warns when package not installed
isolate() do
Pkg.Registry.add(Pkg.RegistrySpec[], io=devnull) # load reg before io capturing
Pkg.activate(joinpath(@__DIR__, "test_packages", "Status"))
io = PipeBuffer()
Pkg.status(; io=io)
@test occursin(r"Status `.+Project.toml`", readline(io))
@test occursin(r"→⌃ \[7876af07\] Example\s*v\d\.\d\.\d", readline(io))
@test occursin(r"\[d6f4376e\] Markdown", readline(io))
@test "Info Packages marked with → are not downloaded, use `instantiate` to download" == strip(readline(io))
@test "Info Packages marked with ⌃ have new versions available" == strip(readline(io))
Pkg.status(;io=io, mode=Pkg.PKGMODE_MANIFEST)
@test occursin(r"Status `.+Manifest.toml`", readline(io))
@test occursin(r"→⌃ \[7876af07\] Example\s*v\d\.\d\.\d", readline(io))
@test occursin(r"\[2a0f44e3\] Base64", readline(io))
@test occursin(r"\[d6f4376e\] Markdown", readline(io))
@test "Info Packages marked with → are not downloaded, use `instantiate` to download" == strip(readline(io))
@test "Info Packages marked with ⌃ have new versions available" == strip(readline(io))
end
# Manifest Status API
isolate(loaded_depot=true) do
Pkg.Registry.add(Pkg.RegistrySpec[], io=devnull) # load reg before io capturing
io = PipeBuffer()
## empty manifest
Pkg.status(;io=io, mode=Pkg.PKGMODE_MANIFEST)
@test occursin(r"Status `.+Manifest\.toml` \(empty manifest\)", readline(io))
# loaded manifest
Pkg.add( name="Example", version="0.3.0")
Pkg.add("Markdown")
Pkg.status(; io=io, mode=Pkg.PKGMODE_MANIFEST)
@test occursin(r"Status `.+Manifest.toml`", readline(io))
@test occursin(r"\[7876af07\] Example\s*v0\.3\.0", readline(io))
@test occursin(r"\[2a0f44e3\] Base64", readline(io))
@test occursin(r"\[d6f4376e\] Markdown", readline(io))
end
# Diff API
isolate(loaded_depot=true) do
Pkg.Registry.add(Pkg.RegistrySpec[], io=devnull) # load reg before io capturing
io = PipeBuffer()
projdir = dirname(Pkg.project().path)
mkpath(projdir)
git_init_and_commit(projdir)
## empty project + empty diff
Pkg.status(; io=io, diff=true)
@test occursin(r"No Changes to `.+Project\.toml`", readline(io))
Pkg.status(; io=io, mode=Pkg.PKGMODE_MANIFEST, diff=true)
@test occursin(r"No Changes to `.+Manifest\.toml`", readline(io))
### empty diff + filter
Pkg.status("Example"; io=io, diff=true)
@test occursin(r"No Changes to `.+Project\.toml`", readline(io))
## non-empty project but empty diff
Pkg.add("Markdown")
git_init_and_commit(dirname(Pkg.project().path))
Pkg.status(; io=io, diff=true)
@test occursin(r"No Changes to `.+Project\.toml`", readline(io))
Pkg.status(; io=io, mode=Pkg.PKGMODE_MANIFEST, diff=true)
@test occursin(r"No Changes to `.+Manifest\.toml`", readline(io))
### filter should still show "empty diff"
Pkg.status("Example"; io=io, diff=true)
@test occursin(r"No Changes to `.+Project\.toml`", readline(io))
## non-empty project + non-empty diff
Pkg.rm("Markdown")
Pkg.add(name="Example", version="0.3.0")
## diff project
Pkg.status(; io=io, diff=true)
@test occursin(r"Diff `.+Project\.toml`", readline(io))
@test occursin(r"\[7876af07\] \+ Example\s*v0\.3\.0", readline(io))
@test occursin(r"\[d6f4376e\] - Markdown", readline(io))
@test occursin("Info Packages marked with ⌃ have new versions available", readline(io))
## diff manifest
Pkg.status(; io=io, mode=Pkg.PKGMODE_MANIFEST, diff=true)
@test occursin(r"Diff `.+Manifest.toml`", readline(io))
@test occursin(r"\[7876af07\] \+ Example\s*v0\.3\.0", readline(io))
@test occursin(r"\[2a0f44e3\] - Base64", readline(io))
@test occursin(r"\[d6f4376e\] - Markdown", readline(io))
@test occursin("Info Packages marked with ⌃ have new versions available", readline(io))
## diff project with filtering
Pkg.status("Markdown"; io=io, diff=true)
@test occursin(r"Diff `.+Project\.toml`", readline(io))
@test occursin(r"\[d6f4376e\] - Markdown", readline(io))
## empty diff + filter
Pkg.status("Base64"; io=io, diff=true)
@test occursin(r"No Matches in diff for `.+Project\.toml`", readline(io))
## diff manifest with filtering
Pkg.status("Base64"; io=io, mode=Pkg.PKGMODE_MANIFEST, diff=true)
@test occursin(r"Diff `.+Manifest.toml`", readline(io))
@test occursin(r"\[2a0f44e3\] - Base64", readline(io))
## manifest diff + empty filter
Pkg.status("FooBar"; io=io, mode=Pkg.PKGMODE_MANIFEST, diff=true)
@test occursin(r"No Matches in diff for `.+Manifest.toml`", readline(io))
end
# Outdated API
isolate(loaded_depot=true) do
Pkg.Registry.add(Pkg.RegistrySpec[], io=devnull) # load reg before io capturing
Pkg.add("Example"; io=devnull)
v = Pkg.dependencies()[exuuid].version
io = IOBuffer()
Pkg.add(Pkg.PackageSpec(name="Example", version="0.4.0"); io=devnull)
Pkg.status(; outdated=true, io=io)
str = String(take!(io))
@test occursin(Regex("⌃\\s*\\[7876af07\\] Example\\s*v0.4.0\\s*\\(<v$v\\)"), str)
open(Base.active_project(), "a") do io
write(io, """
[compat]
Example = "0.4.1"
""")
end
Pkg.status(; outdated=true, io=io)
str = String(take!(io))
@test occursin(Regex("⌃\\s*\\[7876af07\\] Example\\s*v0.4.0\\s*\\[<v0.4.1\\], \\(<v$v\\)"), str)
end
end
#
# # compat
#
@testset "Pkg.compat" begin
# State changes
isolate(loaded_depot=true) do
Pkg.add("Example")
iob = IOBuffer()
Pkg.status(compat=true, io = iob)
output = String(take!(iob))
@test occursin(r"Compat `.+Project.toml`", output)
@test occursin(r"\[7876af07\] *Example *none", output)
@test occursin(r"julia *none", output)
Pkg.compat("Example", "0.2,0.3")
@test Pkg.Operations.get_compat_str(Pkg.Types.Context().env.project, "Example") == "0.2,0.3"
Pkg.status(compat=true, io = iob)
output = String(take!(iob))
@test occursin(r"Compat `.+Project.toml`", output)
@test occursin(r"\[7876af07\] *Example *0.2,0.3", output)
@test occursin(r"julia *none", output)
Pkg.compat("Example", nothing)
Pkg.compat("julia", "1.8")
@test Pkg.Operations.get_compat_str(Pkg.Types.Context().env.project, "Example") == nothing
@test Pkg.Operations.get_compat_str(Pkg.Types.Context().env.project, "julia") == "1.8"
Pkg.status(compat=true, io = iob)
output = String(take!(iob))
@test occursin(r"Compat `.+Project.toml`", output)
@test occursin(r"\[7876af07\] *Example *none", output)
@test occursin(r"julia *1.8", output)
end
end
#
# # Caching
#
@testset "Repo caching" begin
# Add by URL should not overwrite files.
isolate(loaded_depot=true) do
Pkg.add(url="https://github.com/JuliaLang/Example.jl")
s1, t1, c1 = 0, 0, 0
Pkg.dependencies(exuuid) do pkg
@test isdir(pkg.source)
s1 = pkg.source
c1 = Pkg.Types.add_repo_cache_path(pkg.git_source)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
t1 = mtime(pkg.source)
end
Pkg.add(url="https://github.com/JuliaLang/Example.jl")
Pkg.dependencies(exuuid) do pkg
@test isdir(pkg.source)
@test Base.samefile(pkg.source, s1)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
@test Pkg.Types.add_repo_cache_path(pkg.git_source) == c1
@test mtime(pkg.source) == t1
end
end
# Add by URL should not overwrite files, even across projects
isolate(loaded_depot=true) do
# Make sure we have everything downloaded
Pkg.add(url="https://github.com/JuliaLang/Example.jl")
s1, t1, c1 = 0, 0, 0
Pkg.dependencies(exuuid) do pkg
@test isdir(pkg.source)
s1 = pkg.source
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
c1 = Pkg.Types.add_repo_cache_path(pkg.git_source)
t1 = mtime(pkg.source)
end
# Now we activate a new project and make sure it is clean.
Pkg.activate("Foo"; shared=true)
@test isempty(Pkg.project().dependencies)
@test isempty(Pkg.dependencies())
# Finally, add the same URL, we should reuse the existing directories.
Pkg.add(url="https://github.com/JuliaLang/Example.jl")
Pkg.dependencies(exuuid) do pkg
@test isdir(pkg.source)
@test Base.samefile(pkg.source, s1)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
@test Pkg.Types.add_repo_cache_path(pkg.git_source) == c1
@test mtime(pkg.source) == t1
end
end
isolate(loaded_depot=true) do; mktempdir() do tempdir
empty_package = UUID("26187899-7657-4a90-a2f6-e79e0214bedc")
path = git_init_package(tempdir, joinpath(@__DIR__, "test_packages", "EmptyPackage"))
Pkg.add(path=path)
# We check that the package was installed correctly.
cache, original_master = 0, 0
Pkg.dependencies(empty_package) do pkg
@test pkg.name == "EmptyPackage"
@test isdir(pkg.source)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
cache = Pkg.Types.add_repo_cache_path(pkg.git_source)
LibGit2.with(LibGit2.GitRepo(cache)) do repo
original_master = string(LibGit2.GitHash(LibGit2.GitObject(repo, "refs/heads/master")))
end
end
@test haskey(Pkg.project().dependencies, "EmptyPackage")
# Now we add a commit upstream, if we fetch uneccesarily, we should be able to see it in our clone.
write(joinpath(path, "Foo.txt"), "Hello\n")
new_commit = nothing
LibGit2.with(LibGit2.GitRepo(path)) do repo
LibGit2.add!(repo, "*")
new_commit = string(LibGit2.commit(repo, "new commit"; author=TEST_SIG, committer=TEST_SIG))
end
# Use clone to generate source, _without_ unecessarily updating the clone
rm(joinpath(DEPOT_PATH[1], "packages"); force=true, recursive=true)
Pkg.instantiate()
# check that `master` on the clone has not changed
Pkg.dependencies(empty_package) do pkg
@test pkg.name == "EmptyPackage"
@test isdir(pkg.source)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
cache = Pkg.Types.add_repo_cache_path(pkg.git_source)
LibGit2.with(LibGit2.GitRepo(cache)) do repo
@test original_master == string(LibGit2.GitHash(LibGit2.GitObject(repo, "refs/heads/master")))
end
end
@test haskey(Pkg.project().dependencies, "EmptyPackage")
# Now we nuke the clones. This will force a fresh clone.
# We should see `master` on the new clone reflect the new commit.
rm(joinpath(DEPOT_PATH[1], "packages"); force=true, recursive=true)
rm(joinpath(DEPOT_PATH[1], "clones"); force=true, recursive=true)
Pkg.instantiate()
Pkg.dependencies(empty_package) do pkg
@test pkg.name == "EmptyPackage"
@test isdir(pkg.source)
@test isdir(Pkg.Types.add_repo_cache_path(pkg.git_source))
cache = Pkg.Types.add_repo_cache_path(pkg.git_source)
LibGit2.with(LibGit2.GitRepo(cache)) do repo
@test new_commit == string(LibGit2.GitHash(LibGit2.GitObject(repo, "refs/heads/master")))
end
end
@test haskey(Pkg.project().dependencies, "EmptyPackage")
end end
end
#
# # Project files
#
@testset "project files" begin
# reading corrupted project files
isolate(loaded_depot=true) do
dir = joinpath(@__DIR__, "project", "bad")
for bad_project in joinpath.(dir, readdir(dir))
@test_throws PkgError Pkg.Types.read_project(bad_project)
end
end
# reading corrupted manifest files
isolate(loaded_depot=true) do
dir = joinpath(@__DIR__, "manifest", "bad")
for bad_manifest in joinpath.(dir, readdir(dir))
@test_throws PkgError Pkg.Types.read_manifest(bad_manifest)
end
end
# pruning manifest
dir = joinpath(@__DIR__, "manifest", "unpruned")
isolate(loaded_depot=true) do
mktempdir() do tmp
cp(dir, joinpath(tmp, "unpruned"))
Pkg.activate(joinpath(tmp, "unpruned"))
Pkg.resolve()
@test !occursin("Crayons", read(joinpath(tmp, "unpruned", "Manifest.toml"), String))
end
end
# manifest read/write
isolate() do # TODO rewrite using IOBuffer
manifestdir = joinpath(@__DIR__, "manifest", "good")
temp = joinpath(mktempdir(), "x.toml")
for testfile in joinpath.(manifestdir, readdir(manifestdir))
a = Pkg.Types.read_manifest(testfile)
Pkg.Types.write_manifest(a, temp)
b = Pkg.Types.read_manifest(temp)
for (uuid, x) in a
y = b[uuid]
for property in propertynames(x)
# `other` caches the *whole* input dictionary. its ok to mutate the fields of
# the input dictionary if that field will eventually be overwriten on `write_manifest`
property == :other && continue
@test getproperty(x, property) == getproperty(y, property)
end
end
end
rm(dirname(temp); recursive = true, force = true)
end
# project read/write
isolate() do
projectdir = joinpath(@__DIR__, "project", "good")
temp = joinpath(mktempdir(), "x.toml")
for testfile in joinpath.(projectdir, readdir(projectdir))
a = Pkg.Types.read_project(testfile)
Pkg.Types.write_project(a, temp)
b = Pkg.Types.read_project(temp)
for property in propertynames(a)
@test getproperty(a, property) == getproperty(b, property)
end
@test a == b
end
rm(dirname(temp); recursive = true, force = true)
end
# canonicalized relative paths in manifest
isolate() do
mktempdir() do tmp; cd(tmp) do
write("Manifest.toml",
"""
[[Foo]]
path = "bar/Foo"
uuid = "824dc81a-29a7-11e9-3958-fba342a32644"
version = "0.1.0"
""")
manifest = Pkg.Types.read_manifest("Manifest.toml")
package = manifest[Base.UUID("824dc81a-29a7-11e9-3958-fba342a32644")]
@test package.path == (Sys.iswindows() ? "bar\\Foo" : "bar/Foo")
Pkg.Types.write_manifest(manifest, "Manifest.toml")
@test occursin("path = \"bar/Foo\"", read("Manifest.toml", String))
end end
end
# create manifest file similar to project file
isolate(loaded_depot=true) do
cd_tempdir() do dir
touch(joinpath(dir, "Project.toml"))
Pkg.activate(".")
Pkg.add("Example")
@test isfile(joinpath(dir, "Manifest.toml"))
@test !isfile(joinpath(dir, "JuliaManifest.toml"))
end
cd_tempdir() do dir
touch(joinpath(dir, "JuliaProject.toml"))
Pkg.activate(".")
Pkg.add("Example")
@test !isfile(joinpath(dir, "Manifest.toml"))
@test isfile(joinpath(dir, "JuliaManifest.toml"))
end
end
end
@testset "cycles" begin
isolate(loaded_depot=true) do
cd_tempdir() do dir
Pkg.generate("Cycle_A")
cycle_a_uuid = Pkg.Types.read_project("Cycle_A/Project.toml").uuid
Pkg.generate("Cycle_B")
cycle_b_uuid = Pkg.Types.read_project("Cycle_A/Project.toml").uuid
Pkg.activate("Cycle_A")
Pkg.develop(Pkg.PackageSpec(path="Cycle_B"))
Pkg.activate("Cycle_B")
Pkg.develop(Pkg.PackageSpec(path="Cycle_A"))
manifest_b = Pkg.Types.read_manifest("Cycle_B/Manifest.toml")
@test cycle_a_uuid in keys(manifest_b)
@test_broken !(cycle_b_uuid in keys(manifest_b))
end
end
end
#
# # Other
#
# Note: these tests should be run on clean depots
@testset "downloads" begin
for v in (nothing, "true")
withenv("JULIA_PKG_USE_CLI_GIT" => v) do
# libgit2 downloads
isolate() do
Pkg.add("Example"; use_git_for_all_downloads=true)
@test haskey(Pkg.dependencies(), exuuid)
@eval import $(Symbol(TEST_PKG.name))
@test_throws SystemError open(pathof(eval(Symbol(TEST_PKG.name))), "w") do io end # check read-only
Pkg.rm(TEST_PKG.name)
end
isolate() do
@testset "libgit2 downloads" begin
Pkg.add(TEST_PKG.name; use_git_for_all_downloads=true)
@test haskey(Pkg.dependencies(), TEST_PKG.uuid)
Pkg.rm(TEST_PKG.name)
end
@testset "tarball downloads" begin
Pkg.add("JSON"; use_only_tarballs_for_downloads=true)
@test "JSON" in [pkg.name for (uuid, pkg) in Pkg.dependencies()]
Pkg.rm("JSON")
end
end
end
end
end
@testset "package name in resolver errors" begin
isolate(loaded_depot=true) do
try
Pkg.add(name="Example", version = v"55")
catch e
@test occursin(TEST_PKG.name, sprint(showerror, e))
end
end
end
@testset "API details" begin
# API should not mutate
isolate() do
package_names = ["JSON"]
packages = Pkg.PackageSpec.(package_names)
Pkg.add(packages)
@test [p.name for p in packages] == package_names
end
# API should accept `AbstractString` arguments
isolate() do
Pkg.add(strip(" Example "))
Pkg.rm(strip(" Example "))
end
end
@testset "REPL error handling" begin
isolate() do
# PackageSpec tokens
@test_throws PkgError Pkg.pkg"add FooBar Example#foobar#foobar"
@test_throws PkgError Pkg.pkg"up Example#foobar@0.0.0"
@test_throws PkgError Pkg.pkg"pin Example@0.0.0@0.0.1"
@test_throws PkgError Pkg.pkg"up #foobar"
@test_throws PkgError Pkg.pkg"add @0.0.1"
@test_throws PkgError Pkg.pkg"add JSON Example#foobar#foobar LazyJSON"
# Argument count
@test_throws PkgError Pkg.pkg"activate one two"
@test_throws PkgError Pkg.pkg"activate one two three"
# invalid options
@test_throws PkgError Pkg.pkg"rm --minor Example"
@test_throws PkgError Pkg.pkg"pin --project Example"
# conflicting options
@test_throws PkgError Pkg.pkg"up --major --minor"
@test_throws PkgError Pkg.pkg"rm --project --manifest"
end
end
tree_hash(root::AbstractString; kwargs...) = bytes2hex(@inferred Pkg.GitTools.tree_hash(root; kwargs...))
@testset "git tree hash computation" begin
mktempdir() do dir
# test "well known" empty tree hash
@test "4b825dc642cb6eb9a060e54bf8d69288fbee4904" == tree_hash(dir)
# create a text file
file = joinpath(dir, "hello.txt")
open(file, write=true) do io
println(io, "Hello, world.")
end
chmod(file, 0o644)
# reference hash generated with command-line git
@test "0a890bd10328d68f6d85efd2535e3a4c588ee8e6" == tree_hash(dir)
# test with various executable bits set
chmod(file, 0o645) # other x bit doesn't matter
@test "0a890bd10328d68f6d85efd2535e3a4c588ee8e6" == tree_hash(dir)
chmod(file, 0o654) # group x bit doesn't matter
@test "0a890bd10328d68f6d85efd2535e3a4c588ee8e6" == tree_hash(dir)
chmod(file, 0o744) # user x bit matters
@test "952cfce0fb589c02736482fa75f9f9bb492242f8" == tree_hash(dir)
end
# Test for empty directory hashing
mktempdir() do dir
@test "4b825dc642cb6eb9a060e54bf8d69288fbee4904" == tree_hash(dir)
# Directories containing other empty directories are also empty
mkdir(joinpath(dir, "foo"))
mkdir(joinpath(dir, "foo", "bar"))
@test "4b825dc642cb6eb9a060e54bf8d69288fbee4904" == tree_hash(dir)
# Directories containing symlinks (even if they point to other directories)
# are NOT empty:
if !Sys.iswindows()
symlink("bar", joinpath(dir, "foo", "bar_link"))
@test "8bc80be82b2ae4bd69f50a1a077a81b8678c9024" == tree_hash(dir)
end
end
# Test for directory with .git hashing
mktempdir() do dir
mkdir(joinpath(dir, "Foo"))
mkdir(joinpath(dir, "FooGit"))
mkdir(joinpath(dir, "FooGit", ".git"))
write(joinpath(dir, "Foo", "foo"), "foo")
chmod(joinpath(dir, "Foo", "foo"), 0o644)
write(joinpath(dir, "FooGit", "foo"), "foo")
chmod(joinpath(dir, "FooGit", "foo"), 0o644)
write(joinpath(dir, "FooGit", ".git", "foo"), "foo")
chmod(joinpath(dir, "FooGit", ".git", "foo"), 0o644)
@test tree_hash(joinpath(dir, "Foo")) ==
tree_hash(joinpath(dir, "FooGit")) ==
"2f42e2c1c1afd4ef8c66a2aaba5d5e1baddcab33"
end
# Test for symlinks that are a prefix of another directory, causing sorting issues
if !Sys.iswindows()
mktempdir() do dir
mkdir(joinpath(dir, "5.28.1"))
write(joinpath(dir, "5.28.1", "foo"), "")
chmod(joinpath(dir, "5.28.1", "foo"), 0o644)
symlink("5.28.1", joinpath(dir, "5.28"))
@test tree_hash(dir) == "5e50a4254773a7c689bebca79e2954630cab9c04"
end
end
end
@testset "multiple registries overlapping version ranges for different versions" begin
isolate(loaded_depot=true) do
# Add a new registry
dp = DEPOT_PATH[1]
newreg = joinpath(dp, "registries", "NewReg")
mkpath(newreg)
write(joinpath(newreg, "Registry.toml"), """
name = "NewReg"
uuid = "23338594-aafe-5451-b93e-139f81909106"
repo = "whydoineedthis?"
[packages]
7876af07-990d-54b4-ab0e-23690620f79a = { name = "Example", path = "E/Example" }
""")
example_path = joinpath(newreg, "E", "Example")
mkpath(example_path)
write(joinpath(example_path, "Package.toml"), """
name = "Example"
uuid = "7876af07-990d-54b4-ab0e-23690620f79a"
repo = "https://github.com/JuliaLang/Example.jl.git"
""")
write(joinpath(example_path, "Versions.toml"), """
["0.99.99"]
git-tree-sha1 = "46e44e869b4d90b96bd8ed1fdcf32244fddfb6cc"
""")
write(joinpath(example_path, "Compat.toml"), """
["0"]
julia = "0.0"
""")
# This shouldn't cause a resolver error
Pkg.add("Example")
end
end
@testset "not collecting multiple package instances #1570" begin
isolate(loaded_depot=true) do
cd_tempdir() do dir
Pkg.generate("A")
Pkg.generate("B")
Pkg.activate("B")
Pkg.develop(Pkg.PackageSpec(path="A"))
Pkg.activate(".")
Pkg.develop(Pkg.PackageSpec(path="A"))
Pkg.develop(Pkg.PackageSpec(path="B"))
end
end
end
@testset "cyclic dependency graph" begin
isolate(loaded_depot=true) do
cd_tempdir() do dir
Pkg.generate("A")
Pkg.generate("B")
Pkg.activate("A")
Pkg.develop(path="B")
git_init_and_commit("A")
Pkg.activate("B")
# This shouldn't error even though A has a dependency on B
Pkg.add(path="A")
end
end
# test #2302
isolate(loaded_depot=true) do
cd_tempdir() do dir
Pkg.generate("A")
Pkg.generate("B")
git_init_and_commit("B")
Pkg.develop(path="B")
Pkg.activate("A")
Pkg.add(path="B")
git_init_and_commit("A")
Pkg.activate("B")
# This shouldn't error even though A has a dependency on B
Pkg.add(path="A")
end
end
end
@testset "Offline mode" begin
isolate(loaded_depot=false) do
# cache this version
Pkg.add(Pkg.PackageSpec(uuid=exuuid, version=v"0.5.1"))
@test Pkg.dependencies()[exuuid].version == v"0.5.1"
Pkg.offline()
# Pkg.update() should not error/warn and keep Example at 0.5.1
@test_logs Pkg.update()
@test Pkg.dependencies()[exuuid].version == v"0.5.1"
try
Pkg.add(Pkg.PackageSpec(uuid=exuuid, version=v"0.5.3"))
catch e
@test e isa ResolverError
# `\S*` in regex below will allow for ANSI color escape codes in the logs
@test occursin(r"possible versions are: \S*0\.5\.1\S* or uninstalled", e.msg)
end
Pkg.offline(false)
end
end
@testset "relative depot path" begin
isolate(loaded_depot=false) do
mktempdir() do tmp
ENV["JULIA_DEPOT_PATH"] = "tmp"
Base.init_depot_path()
Pkg.Registry.DEFAULT_REGISTRIES[1].url = Utils.REGISTRY_DIR
Pkg.Registry.DEFAULT_REGISTRIES[1].path = nothing
cp(joinpath(@__DIR__, "test_packages", "BasicSandbox"), joinpath(tmp, "BasicSandbox"))
git_init_and_commit(joinpath(tmp, "BasicSandbox"))
cd(tmp) do
Pkg.add(path="BasicSandbox")
end
end
end
end
using Pkg.Types: is_stdlib
@testset "is_stdlib() across versions" begin
networkoptions_uuid = UUID("ca575930-c2e3-43a9-ace4-1e988b2c1908")
pkg_uuid = UUID("44cfe95a-1eb2-52ea-b672-e2afdf69b78f")
# Assume we're running on v1.6+
@test is_stdlib(networkoptions_uuid)
@test is_stdlib(networkoptions_uuid, v"1.6")
@test !is_stdlib(networkoptions_uuid, v"1.5")
@test !is_stdlib(networkoptions_uuid, v"1.0.0")
@test !is_stdlib(networkoptions_uuid, v"0.7")
@test !is_stdlib(networkoptions_uuid, nothing)
# Pkg is an unregistered stdlib
@test is_stdlib(pkg_uuid)
@test is_stdlib(pkg_uuid, v"1.0")
@test is_stdlib(pkg_uuid, v"1.6")
@test is_stdlib(pkg_uuid, v"999.999.999")
@test is_stdlib(pkg_uuid, v"0.7")
@test is_stdlib(pkg_uuid, nothing)
end
@testset "Pkg.add() with julia_version" begin
# A package with artifacts that went from normal package -> stdlib
gmp_jll_uuid = "781609d7-10c4-51f6-84f2-b8444358ff6d"
# A package that has always only ever been an stdlib
linalg_uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
# A package that went from normal package - >stdlib
networkoptions_uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
function get_manifest_block(name)
manifest_path = joinpath(dirname(Base.active_project()), "Manifest.toml")
@test isfile(manifest_path)
deps = Base.get_deps(TOML.parsefile(manifest_path))
@test haskey(deps, name)
return only(deps[name])
end
isolate(loaded_depot=true) do
# Next, test that if we ask for `v1.5` it DOES have a version, and that GMP_jll installs v6.1.X
Pkg.add(["NetworkOptions", "GMP_jll"]; julia_version=v"1.5")
no_block = get_manifest_block("NetworkOptions")
@test haskey(no_block, "uuid")
@test no_block["uuid"] == networkoptions_uuid
@test haskey(no_block, "version")
gmp_block = get_manifest_block("GMP_jll")
@test haskey(gmp_block, "uuid")
@test gmp_block["uuid"] == gmp_jll_uuid
@test haskey(gmp_block, "version")
@test startswith(gmp_block["version"], "6.1.2")
# Test that the artifact of GMP_jll contains the right library
@test haskey(gmp_block, "git-tree-sha1")
gmp_jll_dir = Pkg.Operations.find_installed("GMP_jll", Base.UUID(gmp_jll_uuid), Base.SHA1(gmp_block["git-tree-sha1"]))
@test isdir(gmp_jll_dir)
artifacts_toml = joinpath(gmp_jll_dir, "Artifacts.toml")
@test isfile(artifacts_toml)
meta = artifact_meta("GMP", artifacts_toml)
@test meta !== nothing
gmp_artifact_path = artifact_path(Base.SHA1(meta["git-tree-sha1"]))
@test isdir(gmp_artifact_path)
# On linux, we can check the filename to ensure it's grabbing the correct library
if Sys.islinux()
libgmp_filename = joinpath(gmp_artifact_path, "lib", "libgmp.so.10.3.2")
@test isfile(libgmp_filename)
end
end
# Next, test that if we ask for `v1.6`, GMP_jll gets `v6.2.0`, and for `v1.7`, it gets `v6.2.1`
function do_gmp_test(julia_version, gmp_version)
isolate(loaded_depot=true) do
Pkg.add("GMP_jll"; julia_version)
gmp_block = get_manifest_block("GMP_jll")
@test haskey(gmp_block, "uuid")
@test gmp_block["uuid"] == gmp_jll_uuid
@test haskey(gmp_block, "version")
@test startswith(gmp_block["version"], string(gmp_version))
end
end
do_gmp_test(v"1.6", v"6.2.0")
do_gmp_test(v"1.7", v"6.2.1")
isolate(loaded_depot=true) do
# Next, test that if we ask for `nothing`, NetworkOptions has a `version` but `LinearAlgebra` does not.
Pkg.add(["LinearAlgebra", "NetworkOptions"]; julia_version=nothing)
no_block = get_manifest_block("NetworkOptions")
@test haskey(no_block, "uuid")
@test no_block["uuid"] == networkoptions_uuid
@test haskey(no_block, "version")
linalg_block = get_manifest_block("LinearAlgebra")
@test haskey(linalg_block, "uuid")
@test linalg_block["uuid"] == linalg_uuid
@test !haskey(linalg_block, "version")
end
isolate(loaded_depot=true) do
# Next, test that stdlibs do not get dependencies from the registry
# NOTE: this test depends on the fact that in Julia v1.6+ we added
# "fake" JLLs that do not depend on Pkg while the "normal" p7zip_jll does.
# A future p7zip_jll in the registry may not depend on Pkg, so be sure
# to verify your assumptions when updating this test.
Pkg.add("p7zip_jll")
p7zip_jll_uuid = UUID("3f19e933-33d8-53b3-aaab-bd5110c3b7a0")
@test !("Pkg" in keys(Pkg.dependencies()[p7zip_jll_uuid].dependencies))
end
end
@testset "Issue #2931" begin
isolate(loaded_depot=false) do
temp_pkg_dir() do path
name = "Example"
version = "0.5.3"
tree_hash = Base.SHA1("46e44e869b4d90b96bd8ed1fdcf32244fddfb6cc")
# Install Example.jl
Pkg.add(; name, version)
# Force empty version number in the manifest
ctx = Pkg.Types.Context()
ctx.env.manifest[exuuid].version = nothing
# Delete directory where the package would be installed
pkg_dir = Pkg.Operations.find_installed(name, exuuid, tree_hash)
rm(pkg_dir; recursive=true, force=true)
# (Re-)download sources
Pkg.Operations.download_source(ctx)
# Make sure the package directory is there
@test isdir(pkg_dir)
end
end
end
if :version in fieldnames(Base.PkgOrigin)
@testset "sysimage functionality" begin
old_sysimage_modules = copy(Base._sysimage_modules)
old_pkgorigins = copy(Base.pkgorigins)
try
# Fake having a packages in the sysimage.
json_pkgid = Base.PkgId(json_uuid, "JSON")
push!(Base._sysimage_modules, json_pkgid)
Base.pkgorigins[json_pkgid] = Base.PkgOrigin(nothing, nothing, v"0.20.1")
isolate(loaded_depot=true) do
Pkg.add("JSON"; io=devnull)
Pkg.dependencies(json_uuid) do pkg
pkg.version == v"0.20.1"
end
io = IOBuffer()
Pkg.status(; outdated=true, io=io)
str = String(take!(io))
@test occursin("⌅ [682c06a0] JSON v0.20.1", str)
@test occursin("[sysimage]", str)
@test_throws PkgError Pkg.add(name="JSON", rev="master"; io=devnull)
@test_throws PkgError Pkg.develop("JSON"; io=devnull)
Pkg.respect_sysimage_versions(false)
Pkg.add("JSON"; io=devnull)
Pkg.dependencies(json_uuid) do pkg
pkg.version != v"0.20.1"
end
end
finally
copy!(Base._sysimage_modules, old_sysimage_modules)
copy!(Base.pkgorigins, old_pkgorigins)
Pkg.respect_sysimage_versions(true)
end
end
end
end #module
|
{"hexsha": "a62509fc8a82e3faf0262aba00b0db8440198c14", "size": 121104, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/new.jl", "max_stars_repo_name": "barucden/Pkg.jl", "max_stars_repo_head_hexsha": "1c84da1a29b35f2ab0b2715a45a3b8644461a45c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/new.jl", "max_issues_repo_name": "barucden/Pkg.jl", "max_issues_repo_head_hexsha": "1c84da1a29b35f2ab0b2715a45a3b8644461a45c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/new.jl", "max_forks_repo_name": "barucden/Pkg.jl", "max_forks_repo_head_hexsha": "1c84da1a29b35f2ab0b2715a45a3b8644461a45c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2760736196, "max_line_length": 160, "alphanum_fraction": 0.6153719117, "num_tokens": 32640}
|
// Copyright Sebastian Ramacher, 2007.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_PTR_CONTAINER_DETAIL_SERIALIZE_PTR_MAP_ADAPTER_HPP
#define BOOST_PTR_CONTAINER_DETAIL_SERIALIZE_PTR_MAP_ADAPTER_HPP
#include <boost/ptr_container/ptr_map_adapter.hpp>
#include <boost/ptr_container/detail/serialize_xml_names.hpp>
#include <boost/serialization/split_free.hpp>
namespace boost
{
namespace serialization
{
template<class Archive, class T, class VoidPtrMap, class CloneAllocator, bool Ordered>
void save(Archive& ar, const ptr_container_detail::ptr_map_adapter_base<T, VoidPtrMap, CloneAllocator,Ordered>& c, unsigned int /*version*/)
{
typedef ptr_container_detail::ptr_map_adapter_base<T, VoidPtrMap, CloneAllocator,Ordered> container;
typedef BOOST_DEDUCED_TYPENAME container::const_iterator const_iterator;
ar << boost::serialization::make_nvp( ptr_container_detail::count(),
ptr_container_detail::serialize_as_const(c.size()) );
const_iterator i = c.begin(), e = c.end();
for(; i != e; ++i)
{
ar << boost::serialization::make_nvp( ptr_container_detail::first(), i->first );
ar << boost::serialization::make_nvp( ptr_container_detail::second(),
ptr_container_detail::serialize_as_const(i->second) );
}
}
template<class Archive, class T, class VoidPtrMap, class CloneAllocator, bool Ordered>
void load(Archive& ar, ptr_map_adapter<T, VoidPtrMap, CloneAllocator,Ordered>& c, unsigned int /*version*/)
{
typedef ptr_map_adapter<T, VoidPtrMap, CloneAllocator,Ordered> container;
typedef BOOST_DEDUCED_TYPENAME container::key_type key_type;
typedef BOOST_DEDUCED_TYPENAME container::size_type size_type;
typedef BOOST_DEDUCED_TYPENAME container::iterator iterator;
c.clear();
size_type n;
ar >> boost::serialization::make_nvp( ptr_container_detail::count(), n );
for(size_type i = 0u; i != n; ++i)
{
key_type key;
T* value;
ar >> boost::serialization::make_nvp( ptr_container_detail::first(), key );
ar >> boost::serialization::make_nvp( ptr_container_detail::second(), value );
std::pair<iterator, bool> p = c.insert(key, value);
ar.reset_object_address(&p.first->first, &key);
}
}
template<class Archive, class T, class VoidPtrMap, class CloneAllocator, bool Ordered>
void load(Archive& ar, ptr_multimap_adapter<T, VoidPtrMap, CloneAllocator,Ordered>& c, unsigned int /*version*/)
{
typedef ptr_multimap_adapter<T, VoidPtrMap, CloneAllocator,Ordered> container;
typedef BOOST_DEDUCED_TYPENAME container::key_type key_type;
typedef BOOST_DEDUCED_TYPENAME container::size_type size_type;
typedef BOOST_DEDUCED_TYPENAME container::iterator iterator;
c.clear();
size_type n;
ar >> boost::serialization::make_nvp( ptr_container_detail::count(), n );
for(size_type i = 0u; i != n; ++i)
{
key_type key;
T* value;
ar >> boost::serialization::make_nvp( ptr_container_detail::first(), key );
ar >> boost::serialization::make_nvp( ptr_container_detail::second(), value );
iterator p = c.insert(key, value);
ar.reset_object_address(&p->first, &key);
}
}
} // namespace serialization
} // namespace boost
#endif
|
{"hexsha": "a7b10d22adb12cda0ec0244aa82745338fc652bd", "size": 3448, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/lib/boost/ptr_container/detail/serialize_ptr_map_adapter.hpp", "max_stars_repo_name": "EricBoittier/vina-carb-docker", "max_stars_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 30.0, "max_stars_repo_stars_event_min_datetime": "2016-04-23T04:55:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-19T10:26:27.000Z", "max_issues_repo_path": "src/lib/boost/ptr_container/detail/serialize_ptr_map_adapter.hpp", "max_issues_repo_name": "EricBoittier/vina-carb-docker", "max_issues_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11.0, "max_issues_repo_issues_event_min_datetime": "2016-11-22T13:14:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T00:56:51.000Z", "max_forks_repo_path": "src/lib/boost/ptr_container/detail/serialize_ptr_map_adapter.hpp", "max_forks_repo_name": "EricBoittier/vina-carb-docker", "max_forks_repo_head_hexsha": "e8730d1ef90395e3d7ed3ad00264702313b0766a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2016-04-26T13:16:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T06:13:14.000Z", "avg_line_length": 39.632183908, "max_line_length": 140, "alphanum_fraction": 0.7091067285, "num_tokens": 811}
|
# Image Display module
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.widgets import SpanSelector
from matplotlib.colors import LinearSegmentedColormap
from skimage import transform
import numpy as np
class GUIDisplayOverlap(object):
def __init__(self, data_em):
# 2D array to display with calibration cal in nm/pixel
self.image_data_1 = data_em.holo_1
self.image_data_2 = data_em.holo_2
# Window for image display + matplotlib parameters
self.fig_image = plt.figure(num='align images', figsize=(10, 7), dpi=100)
# Layout figure
self.gs_fig_image = gridspec.GridSpec(8, 8)
# Contrast histogram display and span selector
self.ax_contrast = plt.subplot(self.gs_fig_image[0, 1:6])
self.contrastbins = 256
self.cmin = np.min([np.min(self.image_data_1), np.min(self.image_data_2)])
self.cmax = np.max([np.max(self.image_data_1), np.max(self.image_data_2)])
self.imhist, self.imbins = np.histogram(self.image_data_1, bins=self.contrastbins)
self.imhist, self.imbins = np.histogram(self.image_data_2, bins=self.contrastbins)
self.ax_contrast_span = None
self.plot_contrast_histogram()
# Define image axis
self.ax_image = plt.subplot(self.gs_fig_image[1:-1, 0:-1])
self.ax_image.set_axis_off()
cdict_red = {'red': [(0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (0.85, 1.0, 1.0), (1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0)]}
cdict_blue = {'red': [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (0.85, 1.0, 1.0), (1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (0.85, 1.0, 1.0), (1.0, 1.0, 1.0)]}
self.cmap_1 = LinearSegmentedColormap('dark_red', cdict_red)
self.cmap_2 = LinearSegmentedColormap('dark_blue', cdict_blue)
self.ratio = np.mean(self.image_data_2)/np.mean(self.image_data_1)
self.image_1 = self.ax_image.imshow(self.ratio * self.image_data_1, cmap=self.cmap_1, alpha=1)
self.image_2 = self.ax_image.imshow(self.image_data_2, cmap=self.cmap_2, alpha=0.5)
self.cid = self.connect()
self.shift = np.array([0, 0])
self.scale = np.array([1, 1])
self.rotation = 0
self.shear = 0
self.translation = np.array([0, 0])
self.deform = transform.AffineTransform(scale=self.scale, rotation=self.rotation, shear=self.shear,
translation=self.translation)
def connect(self):
self.cid = self.fig_image.canvas.mpl_connect('key_press_event', self.transformation)
self.cid1 = self.fig_image.canvas.mpl_connect('close_event', self.handle_close)
return self.cid
def transformation(self, event):
h_shift = np.array([0, 1])
v_shift = np.array([-1, 0])
rot_step = np.pi/180
shear_step = np.pi/180
magnify_step = np.array([0.01, 0.01])
h_step = np.array([-1, 0])
v_step = np.array([0, 1])
if event.key == 'up':
self.shift = np.add(self.shift, v_shift)
print(self.shift)
self.translation = np.add(self.translation, v_step)
self.update_tform([1, 1], 0, 0, v_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt+up':
self.shift = np.add(self.shift, 10 * v_shift)
print(self.shift)
self.translation = np.add(self.translation, 10 * v_step)
self.update_tform([1, 1], 0, 0, 10 * v_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'down':
self.shift = np.subtract(self.shift, v_shift)
print(self.shift)
self.translation = np.subtract(self.translation, 1 * v_step)
self.update_tform([1, 1], 0, 0, -1 * v_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt+down':
self.shift = np.subtract(self.shift, 10 * v_shift)
print(self.shift)
self.translation = np.subtract(self.translation, 10 * v_step)
self.update_tform([1, 1], 0, 0, -10 * v_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'right':
self.shift = np.add(self.shift, h_shift)
print(self.shift)
self.translation = np.add(self.translation, h_step)
self.update_tform([1, 1], 0, 0, h_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt+right':
self.shift = np.add(self.shift, 10 * h_shift)
print(self.shift)
self.translation = np.add(self.translation, 10 * h_step)
self.update_tform([1, 1], 0, 0, 10 * h_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'left':
self.shift = np.subtract(self.shift, h_shift)
print(self.shift)
self.translation = np.subtract(self.translation, 1 * h_step)
self.update_tform([1, 1], 0, 0, -1 * h_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt+left':
self.shift = np.subtract(self.shift, 10 * h_shift)
print(self.shift)
self.translation = np.subtract(self.translation, 10 * h_step)
self.update_tform([1, 1], 0, 0, -10 * h_step)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == '+':
self.scale = np.subtract(self.scale, magnify_step)
print(self.scale)
self.update_tform(np.array([1, 1]) - magnify_step, 0, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt++':
self.scale = np.subtract(self.scale, 10 * magnify_step)
print(self.scale)
self.update_tform(np.array([1, 1]) - 10 * magnify_step, 0, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == '-':
self.scale = np.add(self.scale, magnify_step)
print(self.scale)
self.update_tform(np.array([1, 1]) + magnify_step, 0, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt+-':
self.scale = np.add(self.scale, 10 * magnify_step)
print(self.scale)
self.update_tform(np.array([1, 1]) + 10 * magnify_step, 0, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == '/':
self.rotation = np.add(self.rotation, rot_step)
print(self.rotation)
self.update_tform([1, 1], rot_step, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt+/':
self.rotation = np.add(self.rotation, 10 * rot_step)
print(self.rotation)
self.update_tform([1, 1], 10 * rot_step, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == '*':
self.rotation = np.subtract(self.rotation, rot_step)
print(self.rotation)
self.update_tform([1, 1], - rot_step, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'alt+*':
self.rotation = np.subtract(self.rotation, 10 * rot_step)
print(self.rotation)
self.update_tform([1, 1], - 10 * rot_step, 0, [0, 0])
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
if event.key == 'm':
self.scale = [0.96, 0.96]
self.shear = 0
self.rotation = -0.21
self.translation = [-205, 327]
self.update_tform(self.scale, self.rotation, self.shear, self.translation)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
print('shift test')
if event.key == 'n':
self.scale = [0.98, 0.98]
self.shear = 0
self.rotation = -0.21
self.translation = [-232, 180]
self.update_tform(self.scale, self.rotation, self.shear, self.translation)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
print('shift test')
if event.key == 'b': # bis and 15
self.scale = [0.97, 0.97]
self.shear = 0
self.rotation = -0.21
self.translation = [-275, 305]
self.update_tform(self.scale, self.rotation, self.shear, self.translation)
self.image_data_2 = transform.warp(self.image_data_2, self.deform, preserve_range=1)
self.update_image()
print('shift test')
if event.key == 'shift':
print('A faire')
def disconnect(self):
self.fig_image.canvas.mpl_disconnect(self.cid)
self.fig_image.canvas.mpl_disconnect(self.cid1)
def handle_close(self, event):
self.disconnect()
plt.close(self.fig_image)
print('plot closed')
def update_image(self):
self.ax_image.cla()
self.image_1 = self.ax_image.imshow(self.ratio * self.image_data_1, cmap=self.cmap_1, alpha=1)
self.image_2 = self.ax_image.imshow(self.image_data_2, cmap=self.cmap_2, alpha=0.5)
self.image_1.set_clim(vmin=self.cmin, vmax=self.cmax)
self.image_2.set_clim(vmin=self.cmin, vmax=self.cmax)
plt.draw()
def update_tform(self, scale, rotation, shear, translation):
self.deform = transform.AffineTransform(scale=scale, rotation=rotation, shear=shear, translation=translation)
def update_image_clim(self):
self.image_1.set_clim(vmin=self.cmin, vmax=self.cmax)
self.image_2.set_clim(vmin=self.cmin, vmax=self.cmax)
def contrast_span(self, cmin, cmax):
self.cmin = cmin
self.cmax = cmax
self.update_image_clim()
def update_cmin(self, event):
self.cmin = float(event)
self.contrast_span(self.cmin, self.cmax)
def update_cmax(self, event):
self.cmax = float(event)
self.contrast_span(self.cmin, self.cmax)
def plot_contrast_histogram(self):
self.ax_contrast.cla()
self.ax_contrast.plot(self.imbins[:-1], self.imhist, color='k')
self.ax_contrast.set_axis_off()
self.ax_contrast_span = SpanSelector(self.ax_contrast, self.contrast_span, 'horizontal',
span_stays=True, rectprops=dict(alpha=0.5, facecolor='green'))
|
{"hexsha": "642013ed2eed3443f4f43374af1c94ca71f779bd", "size": 11907, "ext": "py", "lang": "Python", "max_stars_repo_path": "guidisplay.py", "max_stars_repo_name": "slimpotatoes/Holo_EM_2_keVs", "max_stars_repo_head_hexsha": "e17a773092ff18b429ebd46ca380478d9fb3b819", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "guidisplay.py", "max_issues_repo_name": "slimpotatoes/Holo_EM_2_keVs", "max_issues_repo_head_hexsha": "e17a773092ff18b429ebd46ca380478d9fb3b819", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "guidisplay.py", "max_forks_repo_name": "slimpotatoes/Holo_EM_2_keVs", "max_forks_repo_head_hexsha": "e17a773092ff18b429ebd46ca380478d9fb3b819", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.628, "max_line_length": 117, "alphanum_fraction": 0.5963718821, "include": true, "reason": "import numpy", "num_tokens": 3200}
|
import setup
setup.deal_with_path()
import numpy as np
import math
import linreg_qr
import linreg_setup
def test_accuracy(qrMthd="HouseHolder", colTrunc=False):
print("*"*120)
if colTrunc:
print("Test the accuracy of federated QR implemented by ", qrMthd, " method with column pivoting")
else:
print("Test the accuracy of federated QR implemented by ", qrMthd, " method without column pivoting")
nTrain = 1000
nInfer = 10
dataMax = 10**4
print("The test data are generated randomly and consist of ", nTrain, " samples")
print("The features are generated with variate scales. The largest scale is ", dataMax)
print("The label values are generated by adding the multiplication of the features and some random generated weights with some perturbation")
clientIdWLabel = 1
print("The label is kept by beh second client")
print("="*60)
print("Problem1: Need not to add extra random features and the test features are full rank")
nFeatures = [3, 3, 5, 11, 8]
encryLv = 3
XTrain, YTrain, XInfer = linreg_setup.generate_fullrank_test_data(nTrain, nInfer, nFeatures, dataMax)
clientMap, coordinator= linreg_setup.setup_problem(XTrain, YTrain, nFeatures, clientIdWLabel, encryLv, XInfer, qrMthd, colTrunc)
# XTrain, XInfer = linreg_setup.switch_order_4_dataset(XTrain, nFeatures, clientIdWLabel, XInfer)
weights = linreg_qr.solve_weights(clientMap, coordinator, encryLv, qrMthd, colTrunc)
XTrain = np.append(XTrain, np.ones([nTrain, 1]), axis=1)
weightsExact = np.linalg.lstsq(XTrain, YTrain, rcond=None)[0]
err = weights-weightsExact
print("The relative error of QR with ", qrMthd, " method based on inf-norm is: ", np.linalg.norm(err, np.inf)/np.linalg.norm(weightsExact, np.inf))
print("The relative error of QR with ", qrMthd, " method based on 2-norm is: ", np.linalg.norm(err)/np.linalg.norm(weightsExact))
print("="*60)
print("Problem2: Need to add extra random features and the test features are full rank")
nFeatures = [4, 1, 15, 2, 8]
encryLv = 3
XTrain, YTrain, XInfer = linreg_setup.generate_fullrank_test_data(nTrain, nInfer, nFeatures, dataMax)
clientMap, coordinator= linreg_setup.setup_problem(XTrain, YTrain, nFeatures, clientIdWLabel, encryLv, XInfer, qrMthd, colTrunc)
# XTrain, XInfer = linreg_setup.switch_order_4_dataset(XTrain, nFeatures, clientIdWLabel, XInfer)
weights = linreg_qr.solve_weights(clientMap, coordinator, encryLv, qrMthd, colTrunc)
XTrain = np.append(XTrain, np.ones([nTrain, 1]), axis=1)
weightsExact = np.linalg.lstsq(XTrain, YTrain, rcond=None)[0]
err = weights-weightsExact
print("The relative error of QR with ", qrMthd, " method based on inf-norm is: ", np.linalg.norm(err, np.inf)/np.linalg.norm(weightsExact, np.inf))
print("The relative error of QR with ", qrMthd, " method based on 2-norm is: ", np.linalg.norm(err)/np.linalg.norm(weightsExact))
print("="*60)
print("Problem3: Need to add extra random features and the active client does not offer any feature. The test features are full rank")
nFeatures = [2, 0, 15, 11, 2]
encryLv = 3
XTrain, YTrain, XInfer = linreg_setup.generate_fullrank_test_data(nTrain, nInfer, nFeatures, dataMax)
clientMap, coordinator= linreg_setup.setup_problem(XTrain, YTrain, nFeatures, clientIdWLabel, encryLv, XInfer, qrMthd, colTrunc)
# XTrain, XInfer = linreg_setup.switch_order_4_dataset(XTrain, nFeatures, clientIdWLabel, XInfer)
weights = linreg_qr.solve_weights(clientMap, coordinator, encryLv, qrMthd, colTrunc)
XTrain = np.append(XTrain, np.ones([nTrain, 1]), axis=1)
weightsExact = np.linalg.lstsq(XTrain, YTrain, rcond=None)[0]
err = weights-weightsExact
print("The relative error of QR with ", qrMthd, " method based on inf-norm is: ", np.linalg.norm(err, np.inf)/np.linalg.norm(weightsExact, np.inf))
print("The relative error of QR with ", qrMthd, " method based on 2-norm is: ", np.linalg.norm(err)/np.linalg.norm(weightsExact))
if colTrunc:
print("="*60)
print("Problem4: Need not to add extra random features and the test features have rank deficient columns at both active and negative client.")
nFeatures = [3, 3, 5, 11, 8]
rankDefiColIds = [4, 11]
encryLv = 3
XTrain, YTrain, XInfer = linreg_setup.generate_rankdefi_test_data(nTrain, nInfer, nFeatures, dataMax, rankDefiColIds)
clientMap, coordinator= linreg_setup.setup_problem(XTrain, YTrain, nFeatures, clientIdWLabel, encryLv, XInfer, qrMthd, colTrunc)
# XTrain, XInfer = linreg_setup.switch_order_4_dataset(XTrain, nFeatures, clientIdWLabel, XInfer)
weights = linreg_qr.solve_weights(clientMap, coordinator, encryLv, qrMthd, colTrunc)
XTrain = np.append(XTrain, np.ones([nTrain, 1]), axis=1)
rmse = np.linalg.norm(np.matmul(XTrain, weights)-YTrain)/math.sqrt(nTrain)
weightsExact = np.linalg.lstsq(XTrain, YTrain, rcond=None)[0]
rmseExact = np.linalg.norm(np.matmul(XTrain, weightsExact)-YTrain)/math.sqrt(nTrain)
print("The root mean square error of federated QR with ", qrMthd, " method based on is: ", rmse)
print("The root mean square error of numpy least square is: ", rmseExact)
XInfer = np.append(XInfer, np.ones([nInfer, 1]), axis=1)
prediction = np.matmul(XInfer, weights)
predictionExact = np.matmul(XInfer, weightsExact)
diff = prediction-predictionExact
print("The relative difference of prediction based on inf-norm is: ", np.linalg.norm(diff, np.inf)/np.linalg.norm(predictionExact, np.inf))
print("The relative difference of prediction based on 2-norm is: ", np.linalg.norm(diff)/np.linalg.norm(predictionExact))
print("="*60)
print("Problem5: Need to add extra random features and one of the client offers the test features which are all have rank deficient columns.")
nFeatures = [3, 1, 3, 11, 12]
rankDefiColIds = [3, 4, 5, 6]
encryLv = 3
XTrain, YTrain, XInfer = linreg_setup.generate_rankdefi_test_data(nTrain, nInfer, nFeatures, dataMax, rankDefiColIds)
clientMap, coordinator= linreg_setup.setup_problem(XTrain, YTrain, nFeatures, clientIdWLabel, encryLv, XInfer, qrMthd, colTrunc)
# XTrain, XInfer = linreg_setup.switch_order_4_dataset(XTrain, nFeatures, clientIdWLabel, XInfer)
weights = linreg_qr.solve_weights(clientMap, coordinator, encryLv, qrMthd, colTrunc)
XTrain = np.append(XTrain, np.ones([nTrain, 1]), axis=1)
rmse = np.linalg.norm(np.matmul(XTrain, weights)-YTrain)/math.sqrt(nTrain)
weightsExact = np.linalg.lstsq(XTrain, YTrain, rcond=None)[0]
rmseExact = np.linalg.norm(np.matmul(XTrain, weightsExact)-YTrain)/math.sqrt(nTrain)
print("The root mean square error of federated QR with ", qrMthd, " method based on is: ", rmse)
print("The root mean square error of numpy least square is: ", rmseExact)
XInfer = np.append(XInfer, np.ones([nInfer, 1]), axis=1)
prediction = np.matmul(XInfer, weights)
predictionExact = np.matmul(XInfer, weightsExact)
diff = prediction-predictionExact
print("The relative difference of prediction based on inf-norm is: ", np.linalg.norm(diff, np.inf)/np.linalg.norm(predictionExact, np.inf))
print("The relative difference of prediction based on 2-norm is: ", np.linalg.norm(diff)/np.linalg.norm(predictionExact))
print("="*60)
print("Problem6: Use the prestored classification data as the training and inference data which is rank definicient.")
encryLv = 3
clientMap, coordinator, XTrain, YTrain, XInfer = linreg_setup.setup_problem_4_prestored_data(encryLv, qrMthd, colTrunc)
# XTrain, XInfer = linreg_setup.switch_order_4_dataset(XTrain, nFeatures, clientIdWLabel, XInfer)
nTrain = XTrain.shape[0]
nInfer = XInfer.shape[0]
weights = linreg_qr.solve_weights(clientMap, coordinator, encryLv, qrMthd, colTrunc)
XTrain = np.append(XTrain, np.ones([nTrain, 1]), axis=1)
rmse = np.linalg.norm(np.matmul(XTrain, weights)-YTrain)/math.sqrt(nTrain)
weightsExact = np.linalg.lstsq(XTrain, YTrain, rcond=None)[0]
rmseExact = np.linalg.norm(np.matmul(XTrain, weightsExact)-YTrain)/math.sqrt(nTrain)
print("The root mean square error of federated QR with ", qrMthd, " method based on is: ", rmse)
print("The root mean square error of numpy least square is: ", rmseExact)
XInfer = np.append(XInfer, np.ones([nInfer, 1]), axis=1)
prediction = np.matmul(XInfer, weights)
predictionExact = np.matmul(XInfer, weightsExact)
diff = prediction-predictionExact
print("The relative difference of prediction based on inf-norm is: ", np.linalg.norm(diff, np.inf)/np.linalg.norm(predictionExact, np.inf))
print("The relative difference of prediction based on 2-norm is: ", np.linalg.norm(diff)/np.linalg.norm(predictionExact))
# Start all the test for accuracy.
if __name__ == "__main__":
test_accuracy("HouseHolder", False)
test_accuracy("GramSchmidt", False)
test_accuracy("HouseHolder", True)
test_accuracy("GramSchmidt", True)
|
{"hexsha": "d27b6e224f53a820caf4848cc76fd64d0f4c7d64", "size": 9262, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/linear_regression/test_qr.py", "max_stars_repo_name": "flyingcat047/fedlearn-algo", "max_stars_repo_head_hexsha": "4f7a9012f49720744a502aa9a192a285694bd415", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demos/linear_regression/test_qr.py", "max_issues_repo_name": "flyingcat047/fedlearn-algo", "max_issues_repo_head_hexsha": "4f7a9012f49720744a502aa9a192a285694bd415", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demos/linear_regression/test_qr.py", "max_forks_repo_name": "flyingcat047/fedlearn-algo", "max_forks_repo_head_hexsha": "4f7a9012f49720744a502aa9a192a285694bd415", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 66.6330935252, "max_line_length": 151, "alphanum_fraction": 0.70837832, "include": true, "reason": "import numpy", "num_tokens": 2582}
|
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.hdr_ipt` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.models import XYZ_to_hdr_IPT, hdr_IPT_to_XYZ
from colour.models.hdr_ipt import exponent_hdr_IPT
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['TestExponent_hdr_IPT', 'TestXYZ_to_hdr_IPT', 'TestHdr_IPT_to_XYZ']
class TestExponent_hdr_IPT(unittest.TestCase):
"""
Defines :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
definition unit tests methods.
"""
def test_exponent_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition.
"""
self.assertAlmostEqual(
exponent_hdr_IPT(0.2, 100), 0.482020919845900, places=7)
self.assertAlmostEqual(
exponent_hdr_IPT(0.4, 100), 0.667413581325092, places=7)
self.assertAlmostEqual(
exponent_hdr_IPT(0.4, 100, method='Fairchild 2010'),
1.219933220992410,
places=7)
self.assertAlmostEqual(
exponent_hdr_IPT(0.2, 1000), 0.723031379768850, places=7)
def test_n_dimensional_exponent_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition
n-dimensional arrays support.
"""
Y_s = 0.2
Y_abs = 100
epsilon = exponent_hdr_IPT(Y_s, Y_abs)
Y_s = np.tile(Y_s, 6)
Y_abs = np.tile(Y_abs, 6)
epsilon = np.tile(epsilon, 6)
np.testing.assert_almost_equal(
exponent_hdr_IPT(Y_s, Y_abs), epsilon, decimal=7)
Y_s = np.reshape(Y_s, (2, 3))
Y_abs = np.reshape(Y_abs, (2, 3))
epsilon = np.reshape(epsilon, (2, 3))
np.testing.assert_almost_equal(
exponent_hdr_IPT(Y_s, Y_abs), epsilon, decimal=7)
Y_s = np.reshape(Y_s, (2, 3, 1))
Y_abs = np.reshape(Y_abs, (2, 3, 1))
epsilon = np.reshape(epsilon, (2, 3, 1))
np.testing.assert_almost_equal(
exponent_hdr_IPT(Y_s, Y_abs), epsilon, decimal=7)
def test_domain_range_scale_exponent_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition domain
and range scale support.
"""
Y_s = 0.2
Y_abs = 100
epsilon = exponent_hdr_IPT(Y_s, Y_abs)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
exponent_hdr_IPT(Y_s * factor, Y_abs), epsilon, decimal=7)
@ignore_numpy_errors
def test_nan_exponent_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition nan
support.
"""
cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
exponent_hdr_IPT(cases, cases)
class TestXYZ_to_hdr_IPT(unittest.TestCase):
"""
Defines :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition unit tests
methods.
"""
def test_XYZ_to_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([48.39376346, 42.44990202, 22.01954033]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(
np.array([0.20654008, 0.12197225, 0.05136952]),
method='Fairchild 2010'),
np.array([30.02873147, 83.93845061, 34.90287382]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(
np.array([0.20654008, 0.12197225, 0.05136952]), Y_s=0.5),
np.array([20.75088680, 37.98300971, 16.66974299]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(
np.array([0.07818780, 0.06157201, 0.28099326]), Y_abs=1000),
np.array([23.83205010, -5.98739209, -32.74311745]),
decimal=7)
def test_n_dimensional_XYZ_to_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition
n-dimensional support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
Y_s = 0.2
Y_abs = 100
IPT_hdr = XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs)
XYZ = np.tile(XYZ, (6, 1))
IPT_hdr = np.tile(IPT_hdr, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7)
Y_s = np.tile(Y_s, 6)
Y_abs = np.tile(Y_abs, 6)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
Y_s = np.reshape(Y_s, (2, 3))
Y_abs = np.reshape(Y_abs, (2, 3))
IPT_hdr = np.reshape(IPT_hdr, (2, 3, 3))
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7)
def test_domain_range_scale_XYZ_to_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition domain
and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
Y_s = 0.2
Y_abs = 100
IPT_hdr = XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs)
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(XYZ * factor_a, Y_s * factor_a, Y_abs),
IPT_hdr * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_hdr_IPT(self):
"""
Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
Y_s = case[0]
Y_abs = case[0]
XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs)
class TestHdr_IPT_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition unit tests
methods.
"""
def test_hdr_IPT_to_XYZ(self):
"""
Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(np.array([48.39376346, 42.44990202, 22.01954033])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(
np.array([30.02873147, 83.93845061, 34.90287382]),
method='Fairchild 2010'),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(
np.array([20.75088680, 37.98300971, 16.66974299]), Y_s=0.5),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(
np.array([23.83205010, -5.98739209, -32.74311745]),
Y_abs=1000),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7)
def test_n_dimensional_hdr_IPT_to_XYZ(self):
"""
Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition
n-dimensional support.
"""
IPT_hdr = np.array([48.39376346, 42.44990202, 22.01954033])
Y_s = 0.2
Y_abs = 100
XYZ = hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs)
IPT_hdr = np.tile(IPT_hdr, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs), XYZ, decimal=7)
Y_s = np.tile(Y_s, 6)
Y_abs = np.tile(Y_abs, 6)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs), XYZ, decimal=7)
IPT_hdr = np.reshape(IPT_hdr, (2, 3, 3))
Y_s = np.reshape(Y_s, (2, 3))
Y_abs = np.reshape(Y_abs, (2, 3))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs), XYZ, decimal=7)
def test_domain_range_scale_hdr_IPT_to_XYZ(self):
"""
Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition domain
and range scale support.
"""
IPT_hdr = np.array([24.88927680, -11.44574144, 1.63147707])
Y_s = 0.2
Y_abs = 100
XYZ = hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs)
d_r = (('reference', 1, 1, 1), (1, 0.01, 1, 1), (100, 1, 100, 100))
for scale, factor_a, factor_b, factor_c in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(IPT_hdr * factor_a, Y_s * factor_b, Y_abs),
XYZ * factor_c,
decimal=7)
@ignore_numpy_errors
def test_nan_hdr_IPT_to_XYZ(self):
"""
Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
IPT_hdr = np.array(case)
Y_s = case[0]
Y_abs = case[0]
hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "e03ff5e4259124c1c9d3de60611b3773baa988b8", "size": 9950, "ext": "py", "lang": "Python", "max_stars_repo_path": "colour/models/tests/test_hdr_ipt.py", "max_stars_repo_name": "gutenzwerg/colour", "max_stars_repo_head_hexsha": "299eceb57483213e2544d532a6d3727887e49426", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-06-18T18:53:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T21:02:45.000Z", "max_issues_repo_path": "colour/models/tests/test_hdr_ipt.py", "max_issues_repo_name": "jedypod/colour", "max_issues_repo_head_hexsha": "24c98dc6e8ac041f8a823a9fce83857faaebc367", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "colour/models/tests/test_hdr_ipt.py", "max_forks_repo_name": "jedypod/colour", "max_forks_repo_head_hexsha": "24c98dc6e8ac041f8a823a9fce83857faaebc367", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8382838284, "max_line_length": 78, "alphanum_fraction": 0.5886432161, "include": true, "reason": "import numpy", "num_tokens": 2937}
|
[STATEMENT]
lemma equalOn_UnD: "equalOn (A Un B) f g ==> equalOn A f g & equalOn B f g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. equalOn (A \<union> B) f g \<Longrightarrow> equalOn A f g \<and> equalOn B f g
[PROOF STEP]
by(auto simp: equalOn_def)
\<comment> \<open>FIXME move following elsewhere?\<close>
|
{"llama_tokens": 124, "file": "Completeness_Base", "length": 1}
|
[STATEMENT]
lemma SET025_4:
"EQU001_0_ax equal &
(\<forall>Y X. member(X::'a,Y) --> little_set(X)) &
(\<forall>X Y. little_set(f1(X::'a,Y)) | equal(X::'a,Y)) &
(\<forall>X Y. member(f1(X::'a,Y),X) | member(f1(X::'a,Y),Y) | equal(X::'a,Y)) &
(\<forall>X Y. member(f1(X::'a,Y),X) & member(f1(X::'a,Y),Y) --> equal(X::'a,Y)) &
(\<forall>X U Y. member(U::'a,non_ordered_pair(X::'a,Y)) --> equal(U::'a,X) | equal(U::'a,Y)) &
(\<forall>Y U X. little_set(U) & equal(U::'a,X) --> member(U::'a,non_ordered_pair(X::'a,Y))) &
(\<forall>X U Y. little_set(U) & equal(U::'a,Y) --> member(U::'a,non_ordered_pair(X::'a,Y))) &
(\<forall>X Y. little_set(non_ordered_pair(X::'a,Y))) &
(\<forall>X. equal(singleton_set(X),non_ordered_pair(X::'a,X))) &
(\<forall>X Y. equal(ordered_pair(X::'a,Y),non_ordered_pair(singleton_set(X),non_ordered_pair(X::'a,Y)))) &
(\<forall>X. ordered_pair_predicate(X) --> little_set(f2(X))) &
(\<forall>X. ordered_pair_predicate(X) --> little_set(f3(X))) &
(\<forall>X. ordered_pair_predicate(X) --> equal(X::'a,ordered_pair(f2(X),f3(X)))) &
(\<forall>X Y Z. little_set(Y) & little_set(Z) & equal(X::'a,ordered_pair(Y::'a,Z)) --> ordered_pair_predicate(X)) &
(\<forall>Z X. member(Z::'a,first(X)) --> little_set(f4(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,first(X)) --> little_set(f5(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,first(X)) --> equal(X::'a,ordered_pair(f4(Z::'a,X),f5(Z::'a,X)))) &
(\<forall>Z X. member(Z::'a,first(X)) --> member(Z::'a,f4(Z::'a,X))) &
(\<forall>X V Z U. little_set(U) & little_set(V) & equal(X::'a,ordered_pair(U::'a,V)) & member(Z::'a,U) --> member(Z::'a,first(X))) &
(\<forall>Z X. member(Z::'a,second(X)) --> little_set(f6(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,second(X)) --> little_set(f7(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,second(X)) --> equal(X::'a,ordered_pair(f6(Z::'a,X),f7(Z::'a,X)))) &
(\<forall>Z X. member(Z::'a,second(X)) --> member(Z::'a,f7(Z::'a,X))) &
(\<forall>X U Z V. little_set(U) & little_set(V) & equal(X::'a,ordered_pair(U::'a,V)) & member(Z::'a,V) --> member(Z::'a,second(X))) &
(\<forall>Z. member(Z::'a,estin) --> ordered_pair_predicate(Z)) &
(\<forall>Z. member(Z::'a,estin) --> member(first(Z),second(Z))) &
(\<forall>Z. little_set(Z) & ordered_pair_predicate(Z) & member(first(Z),second(Z)) --> member(Z::'a,estin)) &
(\<forall>Y Z X. member(Z::'a,intersection(X::'a,Y)) --> member(Z::'a,X)) &
(\<forall>X Z Y. member(Z::'a,intersection(X::'a,Y)) --> member(Z::'a,Y)) &
(\<forall>X Z Y. member(Z::'a,X) & member(Z::'a,Y) --> member(Z::'a,intersection(X::'a,Y))) &
(\<forall>Z X. ~(member(Z::'a,complement(X)) & member(Z::'a,X))) &
(\<forall>Z X. little_set(Z) --> member(Z::'a,complement(X)) | member(Z::'a,X)) &
(\<forall>X Y. equal(union(X::'a,Y),complement(intersection(complement(X),complement(Y))))) &
(\<forall>Z X. member(Z::'a,domain_of(X)) --> ordered_pair_predicate(f8(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,domain_of(X)) --> member(f8(Z::'a,X),X)) &
(\<forall>Z X. member(Z::'a,domain_of(X)) --> equal(Z::'a,first(f8(Z::'a,X)))) &
(\<forall>X Z Xp. little_set(Z) & ordered_pair_predicate(Xp) & member(Xp::'a,X) & equal(Z::'a,first(Xp)) --> member(Z::'a,domain_of(X))) &
(\<forall>X Y Z. member(Z::'a,cross_product(X::'a,Y)) --> ordered_pair_predicate(Z)) &
(\<forall>Y Z X. member(Z::'a,cross_product(X::'a,Y)) --> member(first(Z),X)) &
(\<forall>X Z Y. member(Z::'a,cross_product(X::'a,Y)) --> member(second(Z),Y)) &
(\<forall>X Z Y. little_set(Z) & ordered_pair_predicate(Z) & member(first(Z),X) & member(second(Z),Y) --> member(Z::'a,cross_product(X::'a,Y))) &
(\<forall>X Z. member(Z::'a,inv1 X) --> ordered_pair_predicate(Z)) &
(\<forall>Z X. member(Z::'a,inv1 X) --> member(ordered_pair(second(Z),first(Z)),X)) &
(\<forall>Z X. little_set(Z) & ordered_pair_predicate(Z) & member(ordered_pair(second(Z),first(Z)),X) --> member(Z::'a,inv1 X)) &
(\<forall>Z X. member(Z::'a,rot_right(X)) --> little_set(f9(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,rot_right(X)) --> little_set(f10(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,rot_right(X)) --> little_set(f11(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,rot_right(X)) --> equal(Z::'a,ordered_pair(f9(Z::'a,X),ordered_pair(f10(Z::'a,X),f11(Z::'a,X))))) &
(\<forall>Z X. member(Z::'a,rot_right(X)) --> member(ordered_pair(f10(Z::'a,X),ordered_pair(f11(Z::'a,X),f9(Z::'a,X))),X)) &
(\<forall>Z V W U X. little_set(Z) & little_set(U) & little_set(V) & little_set(W) & equal(Z::'a,ordered_pair(U::'a,ordered_pair(V::'a,W))) & member(ordered_pair(V::'a,ordered_pair(W::'a,U)),X) --> member(Z::'a,rot_right(X))) &
(\<forall>Z X. member(Z::'a,flip_range_of(X)) --> little_set(f12(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,flip_range_of(X)) --> little_set(f13(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,flip_range_of(X)) --> little_set(f14(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,flip_range_of(X)) --> equal(Z::'a,ordered_pair(f12(Z::'a,X),ordered_pair(f13(Z::'a,X),f14(Z::'a,X))))) &
(\<forall>Z X. member(Z::'a,flip_range_of(X)) --> member(ordered_pair(f12(Z::'a,X),ordered_pair(f14(Z::'a,X),f13(Z::'a,X))),X)) &
(\<forall>Z U W V X. little_set(Z) & little_set(U) & little_set(V) & little_set(W) & equal(Z::'a,ordered_pair(U::'a,ordered_pair(V::'a,W))) & member(ordered_pair(U::'a,ordered_pair(W::'a,V)),X) --> member(Z::'a,flip_range_of(X))) &
(\<forall>X. equal(successor(X),union(X::'a,singleton_set(X)))) &
(\<forall>Z. ~member(Z::'a,empty_set)) &
(\<forall>Z. little_set(Z) --> member(Z::'a,universal_set)) &
(little_set(infinity)) &
(member(empty_set::'a,infinity)) &
(\<forall>X. member(X::'a,infinity) --> member(successor(X),infinity)) &
(\<forall>Z X. member(Z::'a,sigma(X)) --> member(f16(Z::'a,X),X)) &
(\<forall>Z X. member(Z::'a,sigma(X)) --> member(Z::'a,f16(Z::'a,X))) &
(\<forall>X Z Y. member(Y::'a,X) & member(Z::'a,Y) --> member(Z::'a,sigma(X))) &
(\<forall>U. little_set(U) --> little_set(sigma(U))) &
(\<forall>X U Y. ssubset(X::'a,Y) & member(U::'a,X) --> member(U::'a,Y)) &
(\<forall>Y X. ssubset(X::'a,Y) | member(f17(X::'a,Y),X)) &
(\<forall>X Y. member(f17(X::'a,Y),Y) --> ssubset(X::'a,Y)) &
(\<forall>X Y. proper_subset(X::'a,Y) --> ssubset(X::'a,Y)) &
(\<forall>X Y. ~(proper_subset(X::'a,Y) & equal(X::'a,Y))) &
(\<forall>X Y. ssubset(X::'a,Y) --> proper_subset(X::'a,Y) | equal(X::'a,Y)) &
(\<forall>Z X. member(Z::'a,powerset(X)) --> ssubset(Z::'a,X)) &
(\<forall>Z X. little_set(Z) & ssubset(Z::'a,X) --> member(Z::'a,powerset(X))) &
(\<forall>U. little_set(U) --> little_set(powerset(U))) &
(\<forall>Z X. relation(Z) & member(X::'a,Z) --> ordered_pair_predicate(X)) &
(\<forall>Z. relation(Z) | member(f18(Z),Z)) &
(\<forall>Z. ordered_pair_predicate(f18(Z)) --> relation(Z)) &
(\<forall>U X V W. single_valued_set(X) & little_set(U) & little_set(V) & little_set(W) & member(ordered_pair(U::'a,V),X) & member(ordered_pair(U::'a,W),X) --> equal(V::'a,W)) &
(\<forall>X. single_valued_set(X) | little_set(f19(X))) &
(\<forall>X. single_valued_set(X) | little_set(f20(X))) &
(\<forall>X. single_valued_set(X) | little_set(f21(X))) &
(\<forall>X. single_valued_set(X) | member(ordered_pair(f19(X),f20(X)),X)) &
(\<forall>X. single_valued_set(X) | member(ordered_pair(f19(X),f21(X)),X)) &
(\<forall>X. equal(f20(X),f21(X)) --> single_valued_set(X)) &
(\<forall>Xf. function(Xf) --> relation(Xf)) &
(\<forall>Xf. function(Xf) --> single_valued_set(Xf)) &
(\<forall>Xf. relation(Xf) & single_valued_set(Xf) --> function(Xf)) &
(\<forall>Z X Xf. member(Z::'a,image'(X::'a,Xf)) --> ordered_pair_predicate(f22(Z::'a,X,Xf))) &
(\<forall>Z X Xf. member(Z::'a,image'(X::'a,Xf)) --> member(f22(Z::'a,X,Xf),Xf)) &
(\<forall>Z Xf X. member(Z::'a,image'(X::'a,Xf)) --> member(first(f22(Z::'a,X,Xf)),X)) &
(\<forall>X Xf Z. member(Z::'a,image'(X::'a,Xf)) --> equal(second(f22(Z::'a,X,Xf)),Z)) &
(\<forall>Xf X Y Z. little_set(Z) & ordered_pair_predicate(Y) & member(Y::'a,Xf) & member(first(Y),X) & equal(second(Y),Z) --> member(Z::'a,image'(X::'a,Xf))) &
(\<forall>X Xf. little_set(X) & function(Xf) --> little_set(image'(X::'a,Xf))) &
(\<forall>X U Y. ~(disjoint(X::'a,Y) & member(U::'a,X) & member(U::'a,Y))) &
(\<forall>Y X. disjoint(X::'a,Y) | member(f23(X::'a,Y),X)) &
(\<forall>X Y. disjoint(X::'a,Y) | member(f23(X::'a,Y),Y)) &
(\<forall>X. equal(X::'a,empty_set) | member(f24(X),X)) &
(\<forall>X. equal(X::'a,empty_set) | disjoint(f24(X),X)) &
(function(f25)) &
(\<forall>X. little_set(X) --> equal(X::'a,empty_set) | member(f26(X),X)) &
(\<forall>X. little_set(X) --> equal(X::'a,empty_set) | member(ordered_pair(X::'a,f26(X)),f25)) &
(\<forall>Z X. member(Z::'a,range_of(X)) --> ordered_pair_predicate(f27(Z::'a,X))) &
(\<forall>Z X. member(Z::'a,range_of(X)) --> member(f27(Z::'a,X),X)) &
(\<forall>Z X. member(Z::'a,range_of(X)) --> equal(Z::'a,second(f27(Z::'a,X)))) &
(\<forall>X Z Xp. little_set(Z) & ordered_pair_predicate(Xp) & member(Xp::'a,X) & equal(Z::'a,second(Xp)) --> member(Z::'a,range_of(X))) &
(\<forall>Z. member(Z::'a,identity_relation) --> ordered_pair_predicate(Z)) &
(\<forall>Z. member(Z::'a,identity_relation) --> equal(first(Z),second(Z))) &
(\<forall>Z. little_set(Z) & ordered_pair_predicate(Z) & equal(first(Z),second(Z)) --> member(Z::'a,identity_relation)) &
(\<forall>X Y. equal(restrct(X::'a,Y),intersection(X::'a,cross_product(Y::'a,universal_set)))) &
(\<forall>Xf. one_to_one_function(Xf) --> function(Xf)) &
(\<forall>Xf. one_to_one_function(Xf) --> function(inv1 Xf)) &
(\<forall>Xf. function(Xf) & function(inv1 Xf) --> one_to_one_function(Xf)) &
(\<forall>Z Xf Y. member(Z::'a,apply(Xf::'a,Y)) --> ordered_pair_predicate(f28(Z::'a,Xf,Y))) &
(\<forall>Z Y Xf. member(Z::'a,apply(Xf::'a,Y)) --> member(f28(Z::'a,Xf,Y),Xf)) &
(\<forall>Z Xf Y. member(Z::'a,apply(Xf::'a,Y)) --> equal(first(f28(Z::'a,Xf,Y)),Y)) &
(\<forall>Z Xf Y. member(Z::'a,apply(Xf::'a,Y)) --> member(Z::'a,second(f28(Z::'a,Xf,Y)))) &
(\<forall>Xf Y Z W. ordered_pair_predicate(W) & member(W::'a,Xf) & equal(first(W),Y) & member(Z::'a,second(W)) --> member(Z::'a,apply(Xf::'a,Y))) &
(\<forall>Xf X Y. equal(apply_to_two_arguments(Xf::'a,X,Y),apply(Xf::'a,ordered_pair(X::'a,Y)))) &
(\<forall>X Y Xf. maps(Xf::'a,X,Y) --> function(Xf)) &
(\<forall>Y Xf X. maps(Xf::'a,X,Y) --> equal(domain_of(Xf),X)) &
(\<forall>X Xf Y. maps(Xf::'a,X,Y) --> ssubset(range_of(Xf),Y)) &
(\<forall>X Xf Y. function(Xf) & equal(domain_of(Xf),X) & ssubset(range_of(Xf),Y) --> maps(Xf::'a,X,Y)) &
(\<forall>Xf Xs. closed(Xs::'a,Xf) --> little_set(Xs)) &
(\<forall>Xs Xf. closed(Xs::'a,Xf) --> little_set(Xf)) &
(\<forall>Xf Xs. closed(Xs::'a,Xf) --> maps(Xf::'a,cross_product(Xs::'a,Xs),Xs)) &
(\<forall>Xf Xs. little_set(Xs) & little_set(Xf) & maps(Xf::'a,cross_product(Xs::'a,Xs),Xs) --> closed(Xs::'a,Xf)) &
(\<forall>Z Xf Xg. member(Z::'a,composition(Xf::'a,Xg)) --> little_set(f29(Z::'a,Xf,Xg))) &
(\<forall>Z Xf Xg. member(Z::'a,composition(Xf::'a,Xg)) --> little_set(f30(Z::'a,Xf,Xg))) &
(\<forall>Z Xf Xg. member(Z::'a,composition(Xf::'a,Xg)) --> little_set(f31(Z::'a,Xf,Xg))) &
(\<forall>Z Xf Xg. member(Z::'a,composition(Xf::'a,Xg)) --> equal(Z::'a,ordered_pair(f29(Z::'a,Xf,Xg),f30(Z::'a,Xf,Xg)))) &
(\<forall>Z Xg Xf. member(Z::'a,composition(Xf::'a,Xg)) --> member(ordered_pair(f29(Z::'a,Xf,Xg),f31(Z::'a,Xf,Xg)),Xf)) &
(\<forall>Z Xf Xg. member(Z::'a,composition(Xf::'a,Xg)) --> member(ordered_pair(f31(Z::'a,Xf,Xg),f30(Z::'a,Xf,Xg)),Xg)) &
(\<forall>Z X Xf W Y Xg. little_set(Z) & little_set(X) & little_set(Y) & little_set(W) & equal(Z::'a,ordered_pair(X::'a,Y)) & member(ordered_pair(X::'a,W),Xf) & member(ordered_pair(W::'a,Y),Xg) --> member(Z::'a,composition(Xf::'a,Xg))) &
(\<forall>Xh Xs2 Xf2 Xs1 Xf1. homomorphism(Xh::'a,Xs1,Xf1,Xs2,Xf2) --> closed(Xs1::'a,Xf1)) &
(\<forall>Xh Xs1 Xf1 Xs2 Xf2. homomorphism(Xh::'a,Xs1,Xf1,Xs2,Xf2) --> closed(Xs2::'a,Xf2)) &
(\<forall>Xf1 Xf2 Xh Xs1 Xs2. homomorphism(Xh::'a,Xs1,Xf1,Xs2,Xf2) --> maps(Xh::'a,Xs1,Xs2)) &
(\<forall>Xs2 Xs1 Xf1 Xf2 X Xh Y. homomorphism(Xh::'a,Xs1,Xf1,Xs2,Xf2) & member(X::'a,Xs1) & member(Y::'a,Xs1) --> equal(apply(Xh::'a,apply_to_two_arguments(Xf1::'a,X,Y)),apply_to_two_arguments(Xf2::'a,apply(Xh::'a,X),apply(Xh::'a,Y)))) &
(\<forall>Xh Xf1 Xs2 Xf2 Xs1. closed(Xs1::'a,Xf1) & closed(Xs2::'a,Xf2) & maps(Xh::'a,Xs1,Xs2) --> homomorphism(Xh::'a,Xs1,Xf1,Xs2,Xf2) | member(f32(Xh::'a,Xs1,Xf1,Xs2,Xf2),Xs1)) &
(\<forall>Xh Xf1 Xs2 Xf2 Xs1. closed(Xs1::'a,Xf1) & closed(Xs2::'a,Xf2) & maps(Xh::'a,Xs1,Xs2) --> homomorphism(Xh::'a,Xs1,Xf1,Xs2,Xf2) | member(f33(Xh::'a,Xs1,Xf1,Xs2,Xf2),Xs1)) &
(\<forall>Xh Xs1 Xf1 Xs2 Xf2. closed(Xs1::'a,Xf1) & closed(Xs2::'a,Xf2) & maps(Xh::'a,Xs1,Xs2) & equal(apply(Xh::'a,apply_to_two_arguments(Xf1::'a,f32(Xh::'a,Xs1,Xf1,Xs2,Xf2),f33(Xh::'a,Xs1,Xf1,Xs2,Xf2))),apply_to_two_arguments(Xf2::'a,apply(Xh::'a,f32(Xh::'a,Xs1,Xf1,Xs2,Xf2)),apply(Xh::'a,f33(Xh::'a,Xs1,Xf1,Xs2,Xf2)))) --> homomorphism(Xh::'a,Xs1,Xf1,Xs2,Xf2)) &
(\<forall>A B C. equal(A::'a,B) --> equal(f1(A::'a,C),f1(B::'a,C))) &
(\<forall>D F' E. equal(D::'a,E) --> equal(f1(F'::'a,D),f1(F'::'a,E))) &
(\<forall>A2 B2. equal(A2::'a,B2) --> equal(f2(A2),f2(B2))) &
(\<forall>G4 H4. equal(G4::'a,H4) --> equal(f3(G4),f3(H4))) &
(\<forall>O7 P7 Q7. equal(O7::'a,P7) --> equal(f4(O7::'a,Q7),f4(P7::'a,Q7))) &
(\<forall>R7 T7 S7. equal(R7::'a,S7) --> equal(f4(T7::'a,R7),f4(T7::'a,S7))) &
(\<forall>U7 V7 W7. equal(U7::'a,V7) --> equal(f5(U7::'a,W7),f5(V7::'a,W7))) &
(\<forall>X7 Z7 Y7. equal(X7::'a,Y7) --> equal(f5(Z7::'a,X7),f5(Z7::'a,Y7))) &
(\<forall>A8 B8 C8. equal(A8::'a,B8) --> equal(f6(A8::'a,C8),f6(B8::'a,C8))) &
(\<forall>D8 F8 E8. equal(D8::'a,E8) --> equal(f6(F8::'a,D8),f6(F8::'a,E8))) &
(\<forall>G8 H8 I8. equal(G8::'a,H8) --> equal(f7(G8::'a,I8),f7(H8::'a,I8))) &
(\<forall>J8 L8 K8. equal(J8::'a,K8) --> equal(f7(L8::'a,J8),f7(L8::'a,K8))) &
(\<forall>M8 N8 O8. equal(M8::'a,N8) --> equal(f8(M8::'a,O8),f8(N8::'a,O8))) &
(\<forall>P8 R8 Q8. equal(P8::'a,Q8) --> equal(f8(R8::'a,P8),f8(R8::'a,Q8))) &
(\<forall>S8 T8 U8. equal(S8::'a,T8) --> equal(f9(S8::'a,U8),f9(T8::'a,U8))) &
(\<forall>V8 X8 W8. equal(V8::'a,W8) --> equal(f9(X8::'a,V8),f9(X8::'a,W8))) &
(\<forall>G H I'. equal(G::'a,H) --> equal(f10(G::'a,I'),f10(H::'a,I'))) &
(\<forall>J L K'. equal(J::'a,K') --> equal(f10(L::'a,J),f10(L::'a,K'))) &
(\<forall>M N O'. equal(M::'a,N) --> equal(f11(M::'a,O'),f11(N::'a,O'))) &
(\<forall>P R Q. equal(P::'a,Q) --> equal(f11(R::'a,P),f11(R::'a,Q))) &
(\<forall>S' T' U. equal(S'::'a,T') --> equal(f12(S'::'a,U),f12(T'::'a,U))) &
(\<forall>V X W. equal(V::'a,W) --> equal(f12(X::'a,V),f12(X::'a,W))) &
(\<forall>Y Z A1. equal(Y::'a,Z) --> equal(f13(Y::'a,A1),f13(Z::'a,A1))) &
(\<forall>B1 D1 C1. equal(B1::'a,C1) --> equal(f13(D1::'a,B1),f13(D1::'a,C1))) &
(\<forall>E1 F1 G1. equal(E1::'a,F1) --> equal(f14(E1::'a,G1),f14(F1::'a,G1))) &
(\<forall>H1 J1 I1. equal(H1::'a,I1) --> equal(f14(J1::'a,H1),f14(J1::'a,I1))) &
(\<forall>K1 L1 M1. equal(K1::'a,L1) --> equal(f16(K1::'a,M1),f16(L1::'a,M1))) &
(\<forall>N1 P1 O1. equal(N1::'a,O1) --> equal(f16(P1::'a,N1),f16(P1::'a,O1))) &
(\<forall>Q1 R1 S1. equal(Q1::'a,R1) --> equal(f17(Q1::'a,S1),f17(R1::'a,S1))) &
(\<forall>T1 V1 U1. equal(T1::'a,U1) --> equal(f17(V1::'a,T1),f17(V1::'a,U1))) &
(\<forall>W1 X1. equal(W1::'a,X1) --> equal(f18(W1),f18(X1))) &
(\<forall>Y1 Z1. equal(Y1::'a,Z1) --> equal(f19(Y1),f19(Z1))) &
(\<forall>C2 D2. equal(C2::'a,D2) --> equal(f20(C2),f20(D2))) &
(\<forall>E2 F2. equal(E2::'a,F2) --> equal(f21(E2),f21(F2))) &
(\<forall>G2 H2 I2 J2. equal(G2::'a,H2) --> equal(f22(G2::'a,I2,J2),f22(H2::'a,I2,J2))) &
(\<forall>K2 M2 L2 N2. equal(K2::'a,L2) --> equal(f22(M2::'a,K2,N2),f22(M2::'a,L2,N2))) &
(\<forall>O2 Q2 R2 P2. equal(O2::'a,P2) --> equal(f22(Q2::'a,R2,O2),f22(Q2::'a,R2,P2))) &
(\<forall>S2 T2 U2. equal(S2::'a,T2) --> equal(f23(S2::'a,U2),f23(T2::'a,U2))) &
(\<forall>V2 X2 W2. equal(V2::'a,W2) --> equal(f23(X2::'a,V2),f23(X2::'a,W2))) &
(\<forall>Y2 Z2. equal(Y2::'a,Z2) --> equal(f24(Y2),f24(Z2))) &
(\<forall>A3 B3. equal(A3::'a,B3) --> equal(f26(A3),f26(B3))) &
(\<forall>C3 D3 E3. equal(C3::'a,D3) --> equal(f27(C3::'a,E3),f27(D3::'a,E3))) &
(\<forall>F3 H3 G3. equal(F3::'a,G3) --> equal(f27(H3::'a,F3),f27(H3::'a,G3))) &
(\<forall>I3 J3 K3 L3. equal(I3::'a,J3) --> equal(f28(I3::'a,K3,L3),f28(J3::'a,K3,L3))) &
(\<forall>M3 O3 N3 P3. equal(M3::'a,N3) --> equal(f28(O3::'a,M3,P3),f28(O3::'a,N3,P3))) &
(\<forall>Q3 S3 T3 R3. equal(Q3::'a,R3) --> equal(f28(S3::'a,T3,Q3),f28(S3::'a,T3,R3))) &
(\<forall>U3 V3 W3 X3. equal(U3::'a,V3) --> equal(f29(U3::'a,W3,X3),f29(V3::'a,W3,X3))) &
(\<forall>Y3 A4 Z3 B4. equal(Y3::'a,Z3) --> equal(f29(A4::'a,Y3,B4),f29(A4::'a,Z3,B4))) &
(\<forall>C4 E4 F4 D4. equal(C4::'a,D4) --> equal(f29(E4::'a,F4,C4),f29(E4::'a,F4,D4))) &
(\<forall>I4 J4 K4 L4. equal(I4::'a,J4) --> equal(f30(I4::'a,K4,L4),f30(J4::'a,K4,L4))) &
(\<forall>M4 O4 N4 P4. equal(M4::'a,N4) --> equal(f30(O4::'a,M4,P4),f30(O4::'a,N4,P4))) &
(\<forall>Q4 S4 T4 R4. equal(Q4::'a,R4) --> equal(f30(S4::'a,T4,Q4),f30(S4::'a,T4,R4))) &
(\<forall>U4 V4 W4 X4. equal(U4::'a,V4) --> equal(f31(U4::'a,W4,X4),f31(V4::'a,W4,X4))) &
(\<forall>Y4 A5 Z4 B5. equal(Y4::'a,Z4) --> equal(f31(A5::'a,Y4,B5),f31(A5::'a,Z4,B5))) &
(\<forall>C5 E5 F5 D5. equal(C5::'a,D5) --> equal(f31(E5::'a,F5,C5),f31(E5::'a,F5,D5))) &
(\<forall>G5 H5 I5 J5 K5 L5. equal(G5::'a,H5) --> equal(f32(G5::'a,I5,J5,K5,L5),f32(H5::'a,I5,J5,K5,L5))) &
(\<forall>M5 O5 N5 P5 Q5 R5. equal(M5::'a,N5) --> equal(f32(O5::'a,M5,P5,Q5,R5),f32(O5::'a,N5,P5,Q5,R5))) &
(\<forall>S5 U5 V5 T5 W5 X5. equal(S5::'a,T5) --> equal(f32(U5::'a,V5,S5,W5,X5),f32(U5::'a,V5,T5,W5,X5))) &
(\<forall>Y5 A6 B6 C6 Z5 D6. equal(Y5::'a,Z5) --> equal(f32(A6::'a,B6,C6,Y5,D6),f32(A6::'a,B6,C6,Z5,D6))) &
(\<forall>E6 G6 H6 I6 J6 F6. equal(E6::'a,F6) --> equal(f32(G6::'a,H6,I6,J6,E6),f32(G6::'a,H6,I6,J6,F6))) &
(\<forall>K6 L6 M6 N6 O6 P6. equal(K6::'a,L6) --> equal(f33(K6::'a,M6,N6,O6,P6),f33(L6::'a,M6,N6,O6,P6))) &
(\<forall>Q6 S6 R6 T6 U6 V6. equal(Q6::'a,R6) --> equal(f33(S6::'a,Q6,T6,U6,V6),f33(S6::'a,R6,T6,U6,V6))) &
(\<forall>W6 Y6 Z6 X6 A7 B7. equal(W6::'a,X6) --> equal(f33(Y6::'a,Z6,W6,A7,B7),f33(Y6::'a,Z6,X6,A7,B7))) &
(\<forall>C7 E7 F7 G7 D7 H7. equal(C7::'a,D7) --> equal(f33(E7::'a,F7,G7,C7,H7),f33(E7::'a,F7,G7,D7,H7))) &
(\<forall>I7 K7 L7 M7 N7 J7. equal(I7::'a,J7) --> equal(f33(K7::'a,L7,M7,N7,I7),f33(K7::'a,L7,M7,N7,J7))) &
(\<forall>A B C. equal(A::'a,B) --> equal(apply(A::'a,C),apply(B::'a,C))) &
(\<forall>D F' E. equal(D::'a,E) --> equal(apply(F'::'a,D),apply(F'::'a,E))) &
(\<forall>G H I' J. equal(G::'a,H) --> equal(apply_to_two_arguments(G::'a,I',J),apply_to_two_arguments(H::'a,I',J))) &
(\<forall>K' M L N. equal(K'::'a,L) --> equal(apply_to_two_arguments(M::'a,K',N),apply_to_two_arguments(M::'a,L,N))) &
(\<forall>O' Q R P. equal(O'::'a,P) --> equal(apply_to_two_arguments(Q::'a,R,O'),apply_to_two_arguments(Q::'a,R,P))) &
(\<forall>S' T'. equal(S'::'a,T') --> equal(complement(S'),complement(T'))) &
(\<forall>U V W. equal(U::'a,V) --> equal(composition(U::'a,W),composition(V::'a,W))) &
(\<forall>X Z Y. equal(X::'a,Y) --> equal(composition(Z::'a,X),composition(Z::'a,Y))) &
(\<forall>A1 B1. equal(A1::'a,B1) --> equal(inv1 A1,inv1 B1)) &
(\<forall>C1 D1 E1. equal(C1::'a,D1) --> equal(cross_product(C1::'a,E1),cross_product(D1::'a,E1))) &
(\<forall>F1 H1 G1. equal(F1::'a,G1) --> equal(cross_product(H1::'a,F1),cross_product(H1::'a,G1))) &
(\<forall>I1 J1. equal(I1::'a,J1) --> equal(domain_of(I1),domain_of(J1))) &
(\<forall>I10 J10. equal(I10::'a,J10) --> equal(first(I10),first(J10))) &
(\<forall>Q10 R10. equal(Q10::'a,R10) --> equal(flip_range_of(Q10),flip_range_of(R10))) &
(\<forall>S10 T10 U10. equal(S10::'a,T10) --> equal(image'(S10::'a,U10),image'(T10::'a,U10))) &
(\<forall>V10 X10 W10. equal(V10::'a,W10) --> equal(image'(X10::'a,V10),image'(X10::'a,W10))) &
(\<forall>Y10 Z10 A11. equal(Y10::'a,Z10) --> equal(intersection(Y10::'a,A11),intersection(Z10::'a,A11))) &
(\<forall>B11 D11 C11. equal(B11::'a,C11) --> equal(intersection(D11::'a,B11),intersection(D11::'a,C11))) &
(\<forall>E11 F11 G11. equal(E11::'a,F11) --> equal(non_ordered_pair(E11::'a,G11),non_ordered_pair(F11::'a,G11))) &
(\<forall>H11 J11 I11. equal(H11::'a,I11) --> equal(non_ordered_pair(J11::'a,H11),non_ordered_pair(J11::'a,I11))) &
(\<forall>K11 L11 M11. equal(K11::'a,L11) --> equal(ordered_pair(K11::'a,M11),ordered_pair(L11::'a,M11))) &
(\<forall>N11 P11 O11. equal(N11::'a,O11) --> equal(ordered_pair(P11::'a,N11),ordered_pair(P11::'a,O11))) &
(\<forall>Q11 R11. equal(Q11::'a,R11) --> equal(powerset(Q11),powerset(R11))) &
(\<forall>S11 T11. equal(S11::'a,T11) --> equal(range_of(S11),range_of(T11))) &
(\<forall>U11 V11 W11. equal(U11::'a,V11) --> equal(restrct(U11::'a,W11),restrct(V11::'a,W11))) &
(\<forall>X11 Z11 Y11. equal(X11::'a,Y11) --> equal(restrct(Z11::'a,X11),restrct(Z11::'a,Y11))) &
(\<forall>A12 B12. equal(A12::'a,B12) --> equal(rot_right(A12),rot_right(B12))) &
(\<forall>C12 D12. equal(C12::'a,D12) --> equal(second(C12),second(D12))) &
(\<forall>K12 L12. equal(K12::'a,L12) --> equal(sigma(K12),sigma(L12))) &
(\<forall>M12 N12. equal(M12::'a,N12) --> equal(singleton_set(M12),singleton_set(N12))) &
(\<forall>O12 P12. equal(O12::'a,P12) --> equal(successor(O12),successor(P12))) &
(\<forall>Q12 R12 S12. equal(Q12::'a,R12) --> equal(union(Q12::'a,S12),union(R12::'a,S12))) &
(\<forall>T12 V12 U12. equal(T12::'a,U12) --> equal(union(V12::'a,T12),union(V12::'a,U12))) &
(\<forall>W12 X12 Y12. equal(W12::'a,X12) & closed(W12::'a,Y12) --> closed(X12::'a,Y12)) &
(\<forall>Z12 B13 A13. equal(Z12::'a,A13) & closed(B13::'a,Z12) --> closed(B13::'a,A13)) &
(\<forall>C13 D13 E13. equal(C13::'a,D13) & disjoint(C13::'a,E13) --> disjoint(D13::'a,E13)) &
(\<forall>F13 H13 G13. equal(F13::'a,G13) & disjoint(H13::'a,F13) --> disjoint(H13::'a,G13)) &
(\<forall>I13 J13. equal(I13::'a,J13) & function(I13) --> function(J13)) &
(\<forall>K13 L13 M13 N13 O13 P13. equal(K13::'a,L13) & homomorphism(K13::'a,M13,N13,O13,P13) --> homomorphism(L13::'a,M13,N13,O13,P13)) &
(\<forall>Q13 S13 R13 T13 U13 V13. equal(Q13::'a,R13) & homomorphism(S13::'a,Q13,T13,U13,V13) --> homomorphism(S13::'a,R13,T13,U13,V13)) &
(\<forall>W13 Y13 Z13 X13 A14 B14. equal(W13::'a,X13) & homomorphism(Y13::'a,Z13,W13,A14,B14) --> homomorphism(Y13::'a,Z13,X13,A14,B14)) &
(\<forall>C14 E14 F14 G14 D14 H14. equal(C14::'a,D14) & homomorphism(E14::'a,F14,G14,C14,H14) --> homomorphism(E14::'a,F14,G14,D14,H14)) &
(\<forall>I14 K14 L14 M14 N14 J14. equal(I14::'a,J14) & homomorphism(K14::'a,L14,M14,N14,I14) --> homomorphism(K14::'a,L14,M14,N14,J14)) &
(\<forall>O14 P14. equal(O14::'a,P14) & little_set(O14) --> little_set(P14)) &
(\<forall>Q14 R14 S14 T14. equal(Q14::'a,R14) & maps(Q14::'a,S14,T14) --> maps(R14::'a,S14,T14)) &
(\<forall>U14 W14 V14 X14. equal(U14::'a,V14) & maps(W14::'a,U14,X14) --> maps(W14::'a,V14,X14)) &
(\<forall>Y14 A15 B15 Z14. equal(Y14::'a,Z14) & maps(A15::'a,B15,Y14) --> maps(A15::'a,B15,Z14)) &
(\<forall>C15 D15 E15. equal(C15::'a,D15) & member(C15::'a,E15) --> member(D15::'a,E15)) &
(\<forall>F15 H15 G15. equal(F15::'a,G15) & member(H15::'a,F15) --> member(H15::'a,G15)) &
(\<forall>I15 J15. equal(I15::'a,J15) & one_to_one_function(I15) --> one_to_one_function(J15)) &
(\<forall>K15 L15. equal(K15::'a,L15) & ordered_pair_predicate(K15) --> ordered_pair_predicate(L15)) &
(\<forall>M15 N15 O15. equal(M15::'a,N15) & proper_subset(M15::'a,O15) --> proper_subset(N15::'a,O15)) &
(\<forall>P15 R15 Q15. equal(P15::'a,Q15) & proper_subset(R15::'a,P15) --> proper_subset(R15::'a,Q15)) &
(\<forall>S15 T15. equal(S15::'a,T15) & relation(S15) --> relation(T15)) &
(\<forall>U15 V15. equal(U15::'a,V15) & single_valued_set(U15) --> single_valued_set(V15)) &
(\<forall>W15 X15 Y15. equal(W15::'a,X15) & ssubset(W15::'a,Y15) --> ssubset(X15::'a,Y15)) &
(\<forall>Z15 B16 A16. equal(Z15::'a,A16) & ssubset(B16::'a,Z15) --> ssubset(B16::'a,A16)) &
(~little_set(ordered_pair(a::'a,b))) --> False"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. EQU001_0_ax equal \<and> (\<forall>Y X. member (X, Y) \<longrightarrow> little_set X) \<and> (\<forall>X Y. little_set (f1 (X, Y)) \<or> equal (X, Y)) \<and> (\<forall>X Y. member (f1 (X, Y), X) \<or> member (f1 (X, Y), Y) \<or> equal (X, Y)) \<and> (\<forall>X Y. member (f1 (X, Y), X) \<and> member (f1 (X, Y), Y) \<longrightarrow> equal (X, Y)) \<and> (\<forall>X U Y. member (U, non_ordered_pair (X, Y)) \<longrightarrow> equal (U, X) \<or> equal (U, Y)) \<and> (\<forall>Y U X. little_set U \<and> equal (U, X) \<longrightarrow> member (U, non_ordered_pair (X, Y))) \<and> (\<forall>X U Y. little_set U \<and> equal (U, Y) \<longrightarrow> member (U, non_ordered_pair (X, Y))) \<and> (\<forall>X Y. little_set (non_ordered_pair (X, Y))) \<and> (\<forall>X. equal (singleton_set X, non_ordered_pair (X, X))) \<and> (\<forall>X Y. equal (ordered_pair (X, Y), non_ordered_pair (singleton_set X, non_ordered_pair (X, Y)))) \<and> (\<forall>X. ordered_pair_predicate X \<longrightarrow> little_set (f2 X)) \<and> (\<forall>X. ordered_pair_predicate X \<longrightarrow> little_set (f3 X)) \<and> (\<forall>X. ordered_pair_predicate X \<longrightarrow> equal (X, ordered_pair (f2 X, f3 X))) \<and> (\<forall>X Y Z. little_set Y \<and> little_set Z \<and> equal (X, ordered_pair (Y, Z)) \<longrightarrow> ordered_pair_predicate X) \<and> (\<forall>Z X. member (Z, first X) \<longrightarrow> little_set (f4 (Z, X))) \<and> (\<forall>Z X. member (Z, first X) \<longrightarrow> little_set (f5 (Z, X))) \<and> (\<forall>Z X. member (Z, first X) \<longrightarrow> equal (X, ordered_pair (f4 (Z, X), f5 (Z, X)))) \<and> (\<forall>Z X. member (Z, first X) \<longrightarrow> member (Z, f4 (Z, X))) \<and> (\<forall>X V Z U. little_set U \<and> little_set V \<and> equal (X, ordered_pair (U, V)) \<and> member (Z, U) \<longrightarrow> member (Z, first X)) \<and> (\<forall>Z X. member (Z, second X) \<longrightarrow> little_set (f6 (Z, X))) \<and> (\<forall>Z X. member (Z, second X) \<longrightarrow> little_set (f7 (Z, X))) \<and> (\<forall>Z X. member (Z, second X) \<longrightarrow> equal (X, ordered_pair (f6 (Z, X), f7 (Z, X)))) \<and> (\<forall>Z X. member (Z, second X) \<longrightarrow> member (Z, f7 (Z, X))) \<and> (\<forall>X U Z V. little_set U \<and> little_set V \<and> equal (X, ordered_pair (U, V)) \<and> member (Z, V) \<longrightarrow> member (Z, second X)) \<and> (\<forall>Z. member (Z, estin) \<longrightarrow> ordered_pair_predicate Z) \<and> (\<forall>Z. member (Z, estin) \<longrightarrow> member (first Z, second Z)) \<and> (\<forall>Z. little_set Z \<and> ordered_pair_predicate Z \<and> member (first Z, second Z) \<longrightarrow> member (Z, estin)) \<and> (\<forall>Y Z X. member (Z, intersection (X, Y)) \<longrightarrow> member (Z, X)) \<and> (\<forall>X Z Y. member (Z, intersection (X, Y)) \<longrightarrow> member (Z, Y)) \<and> (\<forall>X Z Y. member (Z, X) \<and> member (Z, Y) \<longrightarrow> member (Z, intersection (X, Y))) \<and> (\<forall>Z X. \<not> (member (Z, complement X) \<and> member (Z, X))) \<and> (\<forall>Z X. little_set Z \<longrightarrow> member (Z, complement X) \<or> member (Z, X)) \<and> (\<forall>X Y. equal (union (X, Y), complement (intersection (complement X, complement Y)))) \<and> (\<forall>Z X. member (Z, domain_of X) \<longrightarrow> ordered_pair_predicate (f8 (Z, X))) \<and> (\<forall>Z X. member (Z, domain_of X) \<longrightarrow> member (f8 (Z, X), X)) \<and> (\<forall>Z X. member (Z, domain_of X) \<longrightarrow> equal (Z, first (f8 (Z, X)))) \<and> (\<forall>X Z Xp. little_set Z \<and> ordered_pair_predicate Xp \<and> member (Xp, X) \<and> equal (Z, first Xp) \<longrightarrow> member (Z, domain_of X)) \<and> (\<forall>X Y Z. member (Z, cross_product (X, Y)) \<longrightarrow> ordered_pair_predicate Z) \<and> (\<forall>Y Z X. member (Z, cross_product (X, Y)) \<longrightarrow> member (first Z, X)) \<and> (\<forall>X Z Y. member (Z, cross_product (X, Y)) \<longrightarrow> member (second Z, Y)) \<and> (\<forall>X Z Y. little_set Z \<and> ordered_pair_predicate Z \<and> member (first Z, X) \<and> member (second Z, Y) \<longrightarrow> member (Z, cross_product (X, Y))) \<and> (\<forall>X Z. member (Z, inv1 X) \<longrightarrow> ordered_pair_predicate Z) \<and> (\<forall>Z X. member (Z, inv1 X) \<longrightarrow> member (ordered_pair (second Z, first Z), X)) \<and> (\<forall>Z X. little_set Z \<and> ordered_pair_predicate Z \<and> member (ordered_pair (second Z, first Z), X) \<longrightarrow> member (Z, inv1 X)) \<and> (\<forall>Z X. member (Z, rot_right X) \<longrightarrow> little_set (f9 (Z, X))) \<and> (\<forall>Z X. member (Z, rot_right X) \<longrightarrow> little_set (f10 (Z, X))) \<and> (\<forall>Z X. member (Z, rot_right X) \<longrightarrow> little_set (f11 (Z, X))) \<and> (\<forall>Z X. member (Z, rot_right X) \<longrightarrow> equal (Z, ordered_pair (f9 (Z, X), ordered_pair (f10 (Z, X), f11 (Z, X))))) \<and> (\<forall>Z X. member (Z, rot_right X) \<longrightarrow> member (ordered_pair (f10 (Z, X), ordered_pair (f11 (Z, X), f9 (Z, X))), X)) \<and> (\<forall>Z V W U X. little_set Z \<and> little_set U \<and> little_set V \<and> little_set W \<and> equal (Z, ordered_pair (U, ordered_pair (V, W))) \<and> member (ordered_pair (V, ordered_pair (W, U)), X) \<longrightarrow> member (Z, rot_right X)) \<and> (\<forall>Z X. member (Z, flip_range_of X) \<longrightarrow> little_set (f12 (Z, X))) \<and> (\<forall>Z X. member (Z, flip_range_of X) \<longrightarrow> little_set (f13 (Z, X))) \<and> (\<forall>Z X. member (Z, flip_range_of X) \<longrightarrow> little_set (f14 (Z, X))) \<and> (\<forall>Z X. member (Z, flip_range_of X) \<longrightarrow> equal (Z, ordered_pair (f12 (Z, X), ordered_pair (f13 (Z, X), f14 (Z, X))))) \<and> (\<forall>Z X. member (Z, flip_range_of X) \<longrightarrow> member (ordered_pair (f12 (Z, X), ordered_pair (f14 (Z, X), f13 (Z, X))), X)) \<and> (\<forall>Z U W V X. little_set Z \<and> little_set U \<and> little_set V \<and> little_set W \<and> equal (Z, ordered_pair (U, ordered_pair (V, W))) \<and> member (ordered_pair (U, ordered_pair (W, V)), X) \<longrightarrow> member (Z, flip_range_of X)) \<and> (\<forall>X. equal (successor X, union (X, singleton_set X))) \<and> (\<forall>Z. \<not> member (Z, empty_set)) \<and> (\<forall>Z. little_set Z \<longrightarrow> member (Z, universal_set)) \<and> little_set infinity \<and> member (empty_set, infinity) \<and> (\<forall>X. member (X, infinity) \<longrightarrow> member (successor X, infinity)) \<and> (\<forall>Z X. member (Z, sigma X) \<longrightarrow> member (f16 (Z, X), X)) \<and> (\<forall>Z X. member (Z, sigma X) \<longrightarrow> member (Z, f16 (Z, X))) \<and> (\<forall>X Z Y. member (Y, X) \<and> member (Z, Y) \<longrightarrow> member (Z, sigma X)) \<and> (\<forall>U. little_set U \<longrightarrow> little_set (sigma U)) \<and> (\<forall>X U Y. ssubset (X, Y) \<and> member (U, X) \<longrightarrow> member (U, Y)) \<and> (\<forall>Y X. ssubset (X, Y) \<or> member (f17 (X, Y), X)) \<and> (\<forall>X Y. member (f17 (X, Y), Y) \<longrightarrow> ssubset (X, Y)) \<and> (\<forall>X Y. proper_subset (X, Y) \<longrightarrow> ssubset (X, Y)) \<and> (\<forall>X Y. \<not> (proper_subset (X, Y) \<and> equal (X, Y))) \<and> (\<forall>X Y. ssubset (X, Y) \<longrightarrow> proper_subset (X, Y) \<or> equal (X, Y)) \<and> (\<forall>Z X. member (Z, powerset X) \<longrightarrow> ssubset (Z, X)) \<and> (\<forall>Z X. little_set Z \<and> ssubset (Z, X) \<longrightarrow> member (Z, powerset X)) \<and> (\<forall>U. little_set U \<longrightarrow> little_set (powerset U)) \<and> (\<forall>Z X. relation Z \<and> member (X, Z) \<longrightarrow> ordered_pair_predicate X) \<and> (\<forall>Z. relation Z \<or> member (f18 Z, Z)) \<and> (\<forall>Z. ordered_pair_predicate (f18 Z) \<longrightarrow> relation Z) \<and> (\<forall>U X V W. single_valued_set X \<and> little_set U \<and> little_set V \<and> little_set W \<and> member (ordered_pair (U, V), X) \<and> member (ordered_pair (U, W), X) \<longrightarrow> equal (V, W)) \<and> (\<forall>X. single_valued_set X \<or> little_set (f19 X)) \<and> (\<forall>X. single_valued_set X \<or> little_set (f20 X)) \<and> (\<forall>X. single_valued_set X \<or> little_set (f21 X)) \<and> (\<forall>X. single_valued_set X \<or> member (ordered_pair (f19 X, f20 X), X)) \<and> (\<forall>X. single_valued_set X \<or> member (ordered_pair (f19 X, f21 X), X)) \<and> (\<forall>X. equal (f20 X, f21 X) \<longrightarrow> single_valued_set X) \<and> (\<forall>Xf. function Xf \<longrightarrow> relation Xf) \<and> (\<forall>Xf. function Xf \<longrightarrow> single_valued_set Xf) \<and> (\<forall>Xf. relation Xf \<and> single_valued_set Xf \<longrightarrow> function Xf) \<and> (\<forall>Z X Xf. member (Z, image' (X, Xf)) \<longrightarrow> ordered_pair_predicate (f22 (Z, X, Xf))) \<and> (\<forall>Z X Xf. member (Z, image' (X, Xf)) \<longrightarrow> member (f22 (Z, X, Xf), Xf)) \<and> (\<forall>Z Xf X. member (Z, image' (X, Xf)) \<longrightarrow> member (first (f22 (Z, X, Xf)), X)) \<and> (\<forall>X Xf Z. member (Z, image' (X, Xf)) \<longrightarrow> equal (second (f22 (Z, X, Xf)), Z)) \<and> (\<forall>Xf X Y Z. little_set Z \<and> ordered_pair_predicate Y \<and> member (Y, Xf) \<and> member (first Y, X) \<and> equal (second Y, Z) \<longrightarrow> member (Z, image' (X, Xf))) \<and> (\<forall>X Xf. little_set X \<and> function Xf \<longrightarrow> little_set (image' (X, Xf))) \<and> (\<forall>X U Y. \<not> (disjoint (X, Y) \<and> member (U, X) \<and> member (U, Y))) \<and> (\<forall>Y X. disjoint (X, Y) \<or> member (f23 (X, Y), X)) \<and> (\<forall>X Y. disjoint (X, Y) \<or> member (f23 (X, Y), Y)) \<and> (\<forall>X. equal (X, empty_set) \<or> member (f24 X, X)) \<and> (\<forall>X. equal (X, empty_set) \<or> disjoint (f24 X, X)) \<and> function f25 \<and> (\<forall>X. little_set X \<longrightarrow> equal (X, empty_set) \<or> member (f26 X, X)) \<and> (\<forall>X. little_set X \<longrightarrow> equal (X, empty_set) \<or> member (ordered_pair (X, f26 X), f25)) \<and> (\<forall>Z X. member (Z, range_of X) \<longrightarrow> ordered_pair_predicate (f27 (Z, X))) \<and> (\<forall>Z X. member (Z, range_of X) \<longrightarrow> member (f27 (Z, X), X)) \<and> (\<forall>Z X. member (Z, range_of X) \<longrightarrow> equal (Z, second (f27 (Z, X)))) \<and> (\<forall>X Z Xp. little_set Z \<and> ordered_pair_predicate Xp \<and> member (Xp, X) \<and> equal (Z, second Xp) \<longrightarrow> member (Z, range_of X)) \<and> (\<forall>Z. member (Z, identity_relation) \<longrightarrow> ordered_pair_predicate Z) \<and> (\<forall>Z. member (Z, identity_relation) \<longrightarrow> equal (first Z, second Z)) \<and> (\<forall>Z. little_set Z \<and> ordered_pair_predicate Z \<and> equal (first Z, second Z) \<longrightarrow> member (Z, identity_relation)) \<and> (\<forall>X Y. equal (restrct (X, Y), intersection (X, cross_product (Y, universal_set)))) \<and> (\<forall>Xf. one_to_one_function Xf \<longrightarrow> function Xf) \<and> (\<forall>Xf. one_to_one_function Xf \<longrightarrow> function (inv1 Xf)) \<and> (\<forall>Xf. function Xf \<and> function (inv1 Xf) \<longrightarrow> one_to_one_function Xf) \<and> (\<forall>Z Xf Y. member (Z, apply (Xf, Y)) \<longrightarrow> ordered_pair_predicate (f28 (Z, Xf, Y))) \<and> (\<forall>Z Y Xf. member (Z, apply (Xf, Y)) \<longrightarrow> member (f28 (Z, Xf, Y), Xf)) \<and> (\<forall>Z Xf Y. member (Z, apply (Xf, Y)) \<longrightarrow> equal (first (f28 (Z, Xf, Y)), Y)) \<and> (\<forall>Z Xf Y. member (Z, apply (Xf, Y)) \<longrightarrow> member (Z, second (f28 (Z, Xf, Y)))) \<and> (\<forall>Xf Y Z W. ordered_pair_predicate W \<and> member (W, Xf) \<and> equal (first W, Y) \<and> member (Z, second W) \<longrightarrow> member (Z, apply (Xf, Y))) \<and> (\<forall>Xf X Y. equal (apply_to_two_arguments (Xf, X, Y), apply (Xf, ordered_pair (X, Y)))) \<and> (\<forall>X Y Xf. maps (Xf, X, Y) \<longrightarrow> function Xf) \<and> (\<forall>Y Xf X. maps (Xf, X, Y) \<longrightarrow> equal (domain_of Xf, X)) \<and> (\<forall>X Xf Y. maps (Xf, X, Y) \<longrightarrow> ssubset (range_of Xf, Y)) \<and> (\<forall>X Xf Y. function Xf \<and> equal (domain_of Xf, X) \<and> ssubset (range_of Xf, Y) \<longrightarrow> maps (Xf, X, Y)) \<and> (\<forall>Xf Xs. closed (Xs, Xf) \<longrightarrow> little_set Xs) \<and> (\<forall>Xs Xf. closed (Xs, Xf) \<longrightarrow> little_set Xf) \<and> (\<forall>Xf Xs. closed (Xs, Xf) \<longrightarrow> maps (Xf, cross_product (Xs, Xs), Xs)) \<and> (\<forall>Xf Xs. little_set Xs \<and> little_set Xf \<and> maps (Xf, cross_product (Xs, Xs), Xs) \<longrightarrow> closed (Xs, Xf)) \<and> (\<forall>Z Xf Xg. member (Z, composition (Xf, Xg)) \<longrightarrow> little_set (f29 (Z, Xf, Xg))) \<and> (\<forall>Z Xf Xg. member (Z, composition (Xf, Xg)) \<longrightarrow> little_set (f30 (Z, Xf, Xg))) \<and> (\<forall>Z Xf Xg. member (Z, composition (Xf, Xg)) \<longrightarrow> little_set (f31 (Z, Xf, Xg))) \<and> (\<forall>Z Xf Xg. member (Z, composition (Xf, Xg)) \<longrightarrow> equal (Z, ordered_pair (f29 (Z, Xf, Xg), f30 (Z, Xf, Xg)))) \<and> (\<forall>Z Xg Xf. member (Z, composition (Xf, Xg)) \<longrightarrow> member (ordered_pair (f29 (Z, Xf, Xg), f31 (Z, Xf, Xg)), Xf)) \<and> (\<forall>Z Xf Xg. member (Z, composition (Xf, Xg)) \<longrightarrow> member (ordered_pair (f31 (Z, Xf, Xg), f30 (Z, Xf, Xg)), Xg)) \<and> (\<forall>Z X Xf W Y Xg. little_set Z \<and> little_set X \<and> little_set Y \<and> little_set W \<and> equal (Z, ordered_pair (X, Y)) \<and> member (ordered_pair (X, W), Xf) \<and> member (ordered_pair (W, Y), Xg) \<longrightarrow> member (Z, composition (Xf, Xg))) \<and> (\<forall>Xh Xs2 Xf2 Xs1 Xf1. homomorphism (Xh, Xs1, Xf1, Xs2, Xf2) \<longrightarrow> closed (Xs1, Xf1)) \<and> (\<forall>Xh Xs1 Xf1 Xs2 Xf2. homomorphism (Xh, Xs1, Xf1, Xs2, Xf2) \<longrightarrow> closed (Xs2, Xf2)) \<and> (\<forall>Xf1 Xf2 Xh Xs1 Xs2. homomorphism (Xh, Xs1, Xf1, Xs2, Xf2) \<longrightarrow> maps (Xh, Xs1, Xs2)) \<and> (\<forall>Xs2 Xs1 Xf1 Xf2 X Xh Y. homomorphism (Xh, Xs1, Xf1, Xs2, Xf2) \<and> member (X, Xs1) \<and> member (Y, Xs1) \<longrightarrow> equal (apply (Xh, apply_to_two_arguments (Xf1, X, Y)), apply_to_two_arguments (Xf2, apply (Xh, X), apply (Xh, Y)))) \<and> (\<forall>Xh Xf1 Xs2 Xf2 Xs1. closed (Xs1, Xf1) \<and> closed (Xs2, Xf2) \<and> maps (Xh, Xs1, Xs2) \<longrightarrow> homomorphism (Xh, Xs1, Xf1, Xs2, Xf2) \<or> member (f32 (Xh, Xs1, Xf1, Xs2, Xf2), Xs1)) \<and> (\<forall>Xh Xf1 Xs2 Xf2 Xs1. closed (Xs1, Xf1) \<and> closed (Xs2, Xf2) \<and> maps (Xh, Xs1, Xs2) \<longrightarrow> homomorphism (Xh, Xs1, Xf1, Xs2, Xf2) \<or> member (f33 (Xh, Xs1, Xf1, Xs2, Xf2), Xs1)) \<and> (\<forall>Xh Xs1 Xf1 Xs2 Xf2. closed (Xs1, Xf1) \<and> closed (Xs2, Xf2) \<and> maps (Xh, Xs1, Xs2) \<and> equal (apply (Xh, apply_to_two_arguments (Xf1, f32 (Xh, Xs1, Xf1, Xs2, Xf2), f33 (Xh, Xs1, Xf1, Xs2, Xf2))), apply_to_two_arguments (Xf2, apply (Xh, f32 (Xh, Xs1, Xf1, Xs2, Xf2)), apply (Xh, f33 (Xh, Xs1, Xf1, Xs2, Xf2)))) \<longrightarrow> homomorphism (Xh, Xs1, Xf1, Xs2, Xf2)) \<and> (\<forall>A B C. equal (A, B) \<longrightarrow> equal (f1 (A, C), f1 (B, C))) \<and> (\<forall>D F' E. equal (D, E) \<longrightarrow> equal (f1 (F', D), f1 (F', E))) \<and> (\<forall>A2 B2. equal (A2, B2) \<longrightarrow> equal (f2 A2, f2 B2)) \<and> (\<forall>G4 H4. equal (G4, H4) \<longrightarrow> equal (f3 G4, f3 H4)) \<and> (\<forall>O7 P7 Q7. equal (O7, P7) \<longrightarrow> equal (f4 (O7, Q7), f4 (P7, Q7))) \<and> (\<forall>R7 T7 S7. equal (R7, S7) \<longrightarrow> equal (f4 (T7, R7), f4 (T7, S7))) \<and> (\<forall>U7 V7 W7. equal (U7, V7) \<longrightarrow> equal (f5 (U7, W7), f5 (V7, W7))) \<and> (\<forall>X7 Z7 Y7. equal (X7, Y7) \<longrightarrow> equal (f5 (Z7, X7), f5 (Z7, Y7))) \<and> (\<forall>A8 B8 C8. equal (A8, B8) \<longrightarrow> equal (f6 (A8, C8), f6 (B8, C8))) \<and> (\<forall>D8 F8 E8. equal (D8, E8) \<longrightarrow> equal (f6 (F8, D8), f6 (F8, E8))) \<and> (\<forall>G8 H8 I8. equal (G8, H8) \<longrightarrow> equal (f7 (G8, I8), f7 (H8, I8))) \<and> (\<forall>J8 L8 K8. equal (J8, K8) \<longrightarrow> equal (f7 (L8, J8), f7 (L8, K8))) \<and> (\<forall>M8 N8 O8. equal (M8, N8) \<longrightarrow> equal (f8 (M8, O8), f8 (N8, O8))) \<and> (\<forall>P8 R8 Q8. equal (P8, Q8) \<longrightarrow> equal (f8 (R8, P8), f8 (R8, Q8))) \<and> (\<forall>S8 T8 U8. equal (S8, T8) \<longrightarrow> equal (f9 (S8, U8), f9 (T8, U8))) \<and> (\<forall>V8 X8 W8. equal (V8, W8) \<longrightarrow> equal (f9 (X8, V8), f9 (X8, W8))) \<and> (\<forall>G H I'. equal (G, H) \<longrightarrow> equal (f10 (G, I'), f10 (H, I'))) \<and> (\<forall>J L K'. equal (J, K') \<longrightarrow> equal (f10 (L, J), f10 (L, K'))) \<and> (\<forall>M N O'. equal (M, N) \<longrightarrow> equal (f11 (M, O'), f11 (N, O'))) \<and> (\<forall>P R Q. equal (P, Q) \<longrightarrow> equal (f11 (R, P), f11 (R, Q))) \<and> (\<forall>S' T' U. equal (S', T') \<longrightarrow> equal (f12 (S', U), f12 (T', U))) \<and> (\<forall>V X W. equal (V, W) \<longrightarrow> equal (f12 (X, V), f12 (X, W))) \<and> (\<forall>Y Z A1. equal (Y, Z) \<longrightarrow> equal (f13 (Y, A1), f13 (Z, A1))) \<and> (\<forall>B1 D1 C1. equal (B1, C1) \<longrightarrow> equal (f13 (D1, B1), f13 (D1, C1))) \<and> (\<forall>E1 F1 G1. equal (E1, F1) \<longrightarrow> equal (f14 (E1, G1), f14 (F1, G1))) \<and> (\<forall>H1 J1 I1. equal (H1, I1) \<longrightarrow> equal (f14 (J1, H1), f14 (J1, I1))) \<and> (\<forall>K1 L1 M1. equal (K1, L1) \<longrightarrow> equal (f16 (K1, M1), f16 (L1, M1))) \<and> (\<forall>N1 P1 O1. equal (N1, O1) \<longrightarrow> equal (f16 (P1, N1), f16 (P1, O1))) \<and> (\<forall>Q1 R1 S1. equal (Q1, R1) \<longrightarrow> equal (f17 (Q1, S1), f17 (R1, S1))) \<and> (\<forall>T1 V1 U1. equal (T1, U1) \<longrightarrow> equal (f17 (V1, T1), f17 (V1, U1))) \<and> (\<forall>W1 X1. equal (W1, X1) \<longrightarrow> equal (f18 W1, f18 X1)) \<and> (\<forall>Y1 Z1. equal (Y1, Z1) \<longrightarrow> equal (f19 Y1, f19 Z1)) \<and> (\<forall>C2 D2. equal (C2, D2) \<longrightarrow> equal (f20 C2, f20 D2)) \<and> (\<forall>E2 F2. equal (E2, F2) \<longrightarrow> equal (f21 E2, f21 F2)) \<and> (\<forall>G2 H2 I2 J2. equal (G2, H2) \<longrightarrow> equal (f22 (G2, I2, J2), f22 (H2, I2, J2))) \<and> (\<forall>K2 M2 L2 N2. equal (K2, L2) \<longrightarrow> equal (f22 (M2, K2, N2), f22 (M2, L2, N2))) \<and> (\<forall>O2 Q2 R2 P2. equal (O2, P2) \<longrightarrow> equal (f22 (Q2, R2, O2), f22 (Q2, R2, P2))) \<and> (\<forall>S2 T2 U2. equal (S2, T2) \<longrightarrow> equal (f23 (S2, U2), f23 (T2, U2))) \<and> (\<forall>V2 X2 W2. equal (V2, W2) \<longrightarrow> equal (f23 (X2, V2), f23 (X2, W2))) \<and> (\<forall>Y2 Z2. equal (Y2, Z2) \<longrightarrow> equal (f24 Y2, f24 Z2)) \<and> (\<forall>A3 B3. equal (A3, B3) \<longrightarrow> equal (f26 A3, f26 B3)) \<and> (\<forall>C3 D3 E3. equal (C3, D3) \<longrightarrow> equal (f27 (C3, E3), f27 (D3, E3))) \<and> (\<forall>F3 H3 G3. equal (F3, G3) \<longrightarrow> equal (f27 (H3, F3), f27 (H3, G3))) \<and> (\<forall>I3 J3 K3 L3. equal (I3, J3) \<longrightarrow> equal (f28 (I3, K3, L3), f28 (J3, K3, L3))) \<and> (\<forall>M3 O3 N3 P3. equal (M3, N3) \<longrightarrow> equal (f28 (O3, M3, P3), f28 (O3, N3, P3))) \<and> (\<forall>Q3 S3 T3 R3. equal (Q3, R3) \<longrightarrow> equal (f28 (S3, T3, Q3), f28 (S3, T3, R3))) \<and> (\<forall>U3 V3 W3 X3. equal (U3, V3) \<longrightarrow> equal (f29 (U3, W3, X3), f29 (V3, W3, X3))) \<and> (\<forall>Y3 A4 Z3 B4. equal (Y3, Z3) \<longrightarrow> equal (f29 (A4, Y3, B4), f29 (A4, Z3, B4))) \<and> (\<forall>C4 E4 F4 D4. equal (C4, D4) \<longrightarrow> equal (f29 (E4, F4, C4), f29 (E4, F4, D4))) \<and> (\<forall>I4 J4 K4 L4. equal (I4, J4) \<longrightarrow> equal (f30 (I4, K4, L4), f30 (J4, K4, L4))) \<and> (\<forall>M4 O4 N4 P4. equal (M4, N4) \<longrightarrow> equal (f30 (O4, M4, P4), f30 (O4, N4, P4))) \<and> (\<forall>Q4 S4 T4 R4. equal (Q4, R4) \<longrightarrow> equal (f30 (S4, T4, Q4), f30 (S4, T4, R4))) \<and> (\<forall>U4 V4 W4 X4. equal (U4, V4) \<longrightarrow> equal (f31 (U4, W4, X4), f31 (V4, W4, X4))) \<and> (\<forall>Y4 A5 Z4 B5. equal (Y4, Z4) \<longrightarrow> equal (f31 (A5, Y4, B5), f31 (A5, Z4, B5))) \<and> (\<forall>C5 E5 F5 D5. equal (C5, D5) \<longrightarrow> equal (f31 (E5, F5, C5), f31 (E5, F5, D5))) \<and> (\<forall>G5 H5 I5 J5 K5 L5. equal (G5, H5) \<longrightarrow> equal (f32 (G5, I5, J5, K5, L5), f32 (H5, I5, J5, K5, L5))) \<and> (\<forall>M5 O5 N5 P5 Q5 R5. equal (M5, N5) \<longrightarrow> equal (f32 (O5, M5, P5, Q5, R5), f32 (O5, N5, P5, Q5, R5))) \<and> (\<forall>S5 U5 V5 T5 W5 X5. equal (S5, T5) \<longrightarrow> equal (f32 (U5, V5, S5, W5, X5), f32 (U5, V5, T5, W5, X5))) \<and> (\<forall>Y5 A6 B6 C6 Z5 D6. equal (Y5, Z5) \<longrightarrow> equal (f32 (A6, B6, C6, Y5, D6), f32 (A6, B6, C6, Z5, D6))) \<and> (\<forall>E6 G6 H6 I6 J6 F6. equal (E6, F6) \<longrightarrow> equal (f32 (G6, H6, I6, J6, E6), f32 (G6, H6, I6, J6, F6))) \<and> (\<forall>K6 L6 M6 N6 O6 P6. equal (K6, L6) \<longrightarrow> equal (f33 (K6, M6, N6, O6, P6), f33 (L6, M6, N6, O6, P6))) \<and> (\<forall>Q6 S6 R6 T6 U6 V6. equal (Q6, R6) \<longrightarrow> equal (f33 (S6, Q6, T6, U6, V6), f33 (S6, R6, T6, U6, V6))) \<and> (\<forall>W6 Y6 Z6 X6 A7 B7. equal (W6, X6) \<longrightarrow> equal (f33 (Y6, Z6, W6, A7, B7), f33 (Y6, Z6, X6, A7, B7))) \<and> (\<forall>C7 E7 F7 G7 D7 H7. equal (C7, D7) \<longrightarrow> equal (f33 (E7, F7, G7, C7, H7), f33 (E7, F7, G7, D7, H7))) \<and> (\<forall>I7 K7 L7 M7 N7 J7. equal (I7, J7) \<longrightarrow> equal (f33 (K7, L7, M7, N7, I7), f33 (K7, L7, M7, N7, J7))) \<and> (\<forall>A B C. equal (A, B) \<longrightarrow> equal (apply (A, C), apply (B, C))) \<and> (\<forall>D F' E. equal (D, E) \<longrightarrow> equal (apply (F', D), apply (F', E))) \<and> (\<forall>G H I' J. equal (G, H) \<longrightarrow> equal (apply_to_two_arguments (G, I', J), apply_to_two_arguments (H, I', J))) \<and> (\<forall>K' M L N. equal (K', L) \<longrightarrow> equal (apply_to_two_arguments (M, K', N), apply_to_two_arguments (M, L, N))) \<and> (\<forall>O' Q R P. equal (O', P) \<longrightarrow> equal (apply_to_two_arguments (Q, R, O'), apply_to_two_arguments (Q, R, P))) \<and> (\<forall>S' T'. equal (S', T') \<longrightarrow> equal (complement S', complement T')) \<and> (\<forall>U V W. equal (U, V) \<longrightarrow> equal (composition (U, W), composition (V, W))) \<and> (\<forall>X Z Y. equal (X, Y) \<longrightarrow> equal (composition (Z, X), composition (Z, Y))) \<and> (\<forall>A1 B1. equal (A1, B1) \<longrightarrow> equal (inv1 A1, inv1 B1)) \<and> (\<forall>C1 D1 E1. equal (C1, D1) \<longrightarrow> equal (cross_product (C1, E1), cross_product (D1, E1))) \<and> (\<forall>F1 H1 G1. equal (F1, G1) \<longrightarrow> equal (cross_product (H1, F1), cross_product (H1, G1))) \<and> (\<forall>I1 J1. equal (I1, J1) \<longrightarrow> equal (domain_of I1, domain_of J1)) \<and> (\<forall>I10 J10. equal (I10, J10) \<longrightarrow> equal (first I10, first J10)) \<and> (\<forall>Q10 R10. equal (Q10, R10) \<longrightarrow> equal (flip_range_of Q10, flip_range_of R10)) \<and> (\<forall>S10 T10 U10. equal (S10, T10) \<longrightarrow> equal (image' (S10, U10), image' (T10, U10))) \<and> (\<forall>V10 X10 W10. equal (V10, W10) \<longrightarrow> equal (image' (X10, V10), image' (X10, W10))) \<and> (\<forall>Y10 Z10 A11. equal (Y10, Z10) \<longrightarrow> equal (intersection (Y10, A11), intersection (Z10, A11))) \<and> (\<forall>B11 D11 C11. equal (B11, C11) \<longrightarrow> equal (intersection (D11, B11), intersection (D11, C11))) \<and> (\<forall>E11 F11 G11. equal (E11, F11) \<longrightarrow> equal (non_ordered_pair (E11, G11), non_ordered_pair (F11, G11))) \<and> (\<forall>H11 J11 I11. equal (H11, I11) \<longrightarrow> equal (non_ordered_pair (J11, H11), non_ordered_pair (J11, I11))) \<and> (\<forall>K11 L11 M11. equal (K11, L11) \<longrightarrow> equal (ordered_pair (K11, M11), ordered_pair (L11, M11))) \<and> (\<forall>N11 P11 O11. equal (N11, O11) \<longrightarrow> equal (ordered_pair (P11, N11), ordered_pair (P11, O11))) \<and> (\<forall>Q11 R11. equal (Q11, R11) \<longrightarrow> equal (powerset Q11, powerset R11)) \<and> (\<forall>S11 T11. equal (S11, T11) \<longrightarrow> equal (range_of S11, range_of T11)) \<and> (\<forall>U11 V11 W11. equal (U11, V11) \<longrightarrow> equal (restrct (U11, W11), restrct (V11, W11))) \<and> (\<forall>X11 Z11 Y11. equal (X11, Y11) \<longrightarrow> equal (restrct (Z11, X11), restrct (Z11, Y11))) \<and> (\<forall>A12 B12. equal (A12, B12) \<longrightarrow> equal (rot_right A12, rot_right B12)) \<and> (\<forall>C12 D12. equal (C12, D12) \<longrightarrow> equal (second C12, second D12)) \<and> (\<forall>K12 L12. equal (K12, L12) \<longrightarrow> equal (sigma K12, sigma L12)) \<and> (\<forall>M12 N12. equal (M12, N12) \<longrightarrow> equal (singleton_set M12, singleton_set N12)) \<and> (\<forall>O12 P12. equal (O12, P12) \<longrightarrow> equal (successor O12, successor P12)) \<and> (\<forall>Q12 R12 S12. equal (Q12, R12) \<longrightarrow> equal (union (Q12, S12), union (R12, S12))) \<and> (\<forall>T12 V12 U12. equal (T12, U12) \<longrightarrow> equal (union (V12, T12), union (V12, U12))) \<and> (\<forall>W12 X12 Y12. equal (W12, X12) \<and> closed (W12, Y12) \<longrightarrow> closed (X12, Y12)) \<and> (\<forall>Z12 B13 A13. equal (Z12, A13) \<and> closed (B13, Z12) \<longrightarrow> closed (B13, A13)) \<and> (\<forall>C13 D13 E13. equal (C13, D13) \<and> disjoint (C13, E13) \<longrightarrow> disjoint (D13, E13)) \<and> (\<forall>F13 H13 G13. equal (F13, G13) \<and> disjoint (H13, F13) \<longrightarrow> disjoint (H13, G13)) \<and> (\<forall>I13 J13. equal (I13, J13) \<and> function I13 \<longrightarrow> function J13) \<and> (\<forall>K13 L13 M13 N13 O13 P13. equal (K13, L13) \<and> homomorphism (K13, M13, N13, O13, P13) \<longrightarrow> homomorphism (L13, M13, N13, O13, P13)) \<and> (\<forall>Q13 S13 R13 T13 U13 V13. equal (Q13, R13) \<and> homomorphism (S13, Q13, T13, U13, V13) \<longrightarrow> homomorphism (S13, R13, T13, U13, V13)) \<and> (\<forall>W13 Y13 Z13 X13 A14 B14. equal (W13, X13) \<and> homomorphism (Y13, Z13, W13, A14, B14) \<longrightarrow> homomorphism (Y13, Z13, X13, A14, B14)) \<and> (\<forall>C14 E14 F14 G14 D14 H14. equal (C14, D14) \<and> homomorphism (E14, F14, G14, C14, H14) \<longrightarrow> homomorphism (E14, F14, G14, D14, H14)) \<and> (\<forall>I14 K14 L14 M14 N14 J14. equal (I14, J14) \<and> homomorphism (K14, L14, M14, N14, I14) \<longrightarrow> homomorphism (K14, L14, M14, N14, J14)) \<and> (\<forall>O14 P14. equal (O14, P14) \<and> little_set O14 \<longrightarrow> little_set P14) \<and> (\<forall>Q14 R14 S14 T14. equal (Q14, R14) \<and> maps (Q14, S14, T14) \<longrightarrow> maps (R14, S14, T14)) \<and> (\<forall>U14 W14 V14 X14. equal (U14, V14) \<and> maps (W14, U14, X14) \<longrightarrow> maps (W14, V14, X14)) \<and> (\<forall>Y14 A15 B15 Z14. equal (Y14, Z14) \<and> maps (A15, B15, Y14) \<longrightarrow> maps (A15, B15, Z14)) \<and> (\<forall>C15 D15 E15. equal (C15, D15) \<and> member (C15, E15) \<longrightarrow> member (D15, E15)) \<and> (\<forall>F15 H15 G15. equal (F15, G15) \<and> member (H15, F15) \<longrightarrow> member (H15, G15)) \<and> (\<forall>I15 J15. equal (I15, J15) \<and> one_to_one_function I15 \<longrightarrow> one_to_one_function J15) \<and> (\<forall>K15 L15. equal (K15, L15) \<and> ordered_pair_predicate K15 \<longrightarrow> ordered_pair_predicate L15) \<and> (\<forall>M15 N15 O15. equal (M15, N15) \<and> proper_subset (M15, O15) \<longrightarrow> proper_subset (N15, O15)) \<and> (\<forall>P15 R15 Q15. equal (P15, Q15) \<and> proper_subset (R15, P15) \<longrightarrow> proper_subset (R15, Q15)) \<and> (\<forall>S15 T15. equal (S15, T15) \<and> relation S15 \<longrightarrow> relation T15) \<and> (\<forall>U15 V15. equal (U15, V15) \<and> single_valued_set U15 \<longrightarrow> single_valued_set V15) \<and> (\<forall>W15 X15 Y15. equal (W15, X15) \<and> ssubset (W15, Y15) \<longrightarrow> ssubset (X15, Y15)) \<and> (\<forall>Z15 B16 A16. equal (Z15, A16) \<and> ssubset (B16, Z15) \<longrightarrow> ssubset (B16, A16)) \<and> \<not> little_set (ordered_pair (a, b)) \<longrightarrow> False
[PROOF STEP]
oops
|
{"llama_tokens": 25269, "file": null, "length": 1}
|
[STATEMENT]
lemma has_prod_mult:
assumes f: "f has_prod a" and g: "g has_prod b"
shows "(\<lambda>n. f n * g n) has_prod (a * b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
using f [unfolded has_prod_def]
[PROOF STATE]
proof (prove)
using this:
raw_has_prod f 0 a \<or> (\<exists>i q. a = (0::'a) \<and> f i = (0::'a) \<and> raw_has_prod f (Suc i) q)
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
proof (elim disjE exE conjE)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. raw_has_prod f 0 a \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>a = (0::'a); f i = (0::'a); raw_has_prod f (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
assume f0: "raw_has_prod f 0 a"
[PROOF STATE]
proof (state)
this:
raw_has_prod f 0 a
goal (2 subgoals):
1. raw_has_prod f 0 a \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>a = (0::'a); f i = (0::'a); raw_has_prod f (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
using g [unfolded has_prod_def]
[PROOF STATE]
proof (prove)
using this:
raw_has_prod g 0 b \<or> (\<exists>i q. b = (0::'a) \<and> g i = (0::'a) \<and> raw_has_prod g (Suc i) q)
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
proof (elim disjE exE conjE)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. raw_has_prod g 0 b \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
assume g0: "raw_has_prod g 0 b"
[PROOF STATE]
proof (state)
this:
raw_has_prod g 0 b
goal (2 subgoals):
1. raw_has_prod g 0 b \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
with f0
[PROOF STATE]
proof (chain)
picking this:
raw_has_prod f 0 a
raw_has_prod g 0 b
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
raw_has_prod f 0 a
raw_has_prod g 0 b
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
by (force simp add: has_prod_def prod.distrib tendsto_mult raw_has_prod_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>n. f n * g n) has_prod (a * b)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
fix j q
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
assume "b = 0" and "g j = 0" and q: "raw_has_prod g (Suc j) q"
[PROOF STATE]
proof (state)
this:
b = (0::'a)
g j = (0::'a)
raw_has_prod g (Suc j) q
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
obtain p where p: "raw_has_prod f (Suc j) p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>p. raw_has_prod f (Suc j) p \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using f0 raw_has_prod_ignore_initial_segment
[PROOF STATE]
proof (prove)
using this:
raw_has_prod f 0 a
\<lbrakk>raw_has_prod ?f ?M ?p; ?M \<le> ?N; \<And>q. raw_has_prod ?f ?N q \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>p. raw_has_prod f (Suc j) p \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
raw_has_prod f (Suc j) p
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
raw_has_prod f (Suc j) p
[PROOF STEP]
have "Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc j))"
[PROOF STATE]
proof (prove)
using this:
raw_has_prod f (Suc j) p
goal (1 subgoal):
1. Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc j))
[PROOF STEP]
using q raw_has_prod_mult
[PROOF STATE]
proof (prove)
using this:
raw_has_prod f (Suc j) p
raw_has_prod g (Suc j) q
\<lbrakk>raw_has_prod ?f ?M ?a; raw_has_prod ?g ?M ?b\<rbrakk> \<Longrightarrow> raw_has_prod (\<lambda>n. ?f n * ?g n) ?M (?a * ?b)
goal (1 subgoal):
1. Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc j))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc j))
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc j))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc j))
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
using \<open>b = 0\<close> \<open>g j = 0\<close> has_prod_0_iff
[PROOF STATE]
proof (prove)
using this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc j))
b = (0::'a)
g j = (0::'a)
?f has_prod (0::?'a) = (\<exists>i. ?f i = (0::?'a) \<and> (\<exists>p. raw_has_prod ?f (Suc i) p))
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
(\<lambda>n. f n * g n) has_prod (a * b)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>n. f n * g n) has_prod (a * b)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>a = (0::'a); f i = (0::'a); raw_has_prod f (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>a = (0::'a); f i = (0::'a); raw_has_prod f (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
fix i p
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>a = (0::'a); f i = (0::'a); raw_has_prod f (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
assume "a = 0" and "f i = 0" and p: "raw_has_prod f (Suc i) p"
[PROOF STATE]
proof (state)
this:
a = (0::'a)
f i = (0::'a)
raw_has_prod f (Suc i) p
goal (1 subgoal):
1. \<And>i q. \<lbrakk>a = (0::'a); f i = (0::'a); raw_has_prod f (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
using g [unfolded has_prod_def]
[PROOF STATE]
proof (prove)
using this:
raw_has_prod g 0 b \<or> (\<exists>i q. b = (0::'a) \<and> g i = (0::'a) \<and> raw_has_prod g (Suc i) q)
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
proof (elim disjE exE conjE)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. raw_has_prod g 0 b \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
assume g0: "raw_has_prod g 0 b"
[PROOF STATE]
proof (state)
this:
raw_has_prod g 0 b
goal (2 subgoals):
1. raw_has_prod g 0 b \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
obtain q where q: "raw_has_prod g (Suc i) q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>q. raw_has_prod g (Suc i) q \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using g0 raw_has_prod_ignore_initial_segment
[PROOF STATE]
proof (prove)
using this:
raw_has_prod g 0 b
\<lbrakk>raw_has_prod ?f ?M ?p; ?M \<le> ?N; \<And>q. raw_has_prod ?f ?N q \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>q. raw_has_prod g (Suc i) q \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
raw_has_prod g (Suc i) q
goal (2 subgoals):
1. raw_has_prod g 0 b \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
raw_has_prod g (Suc i) q
[PROOF STEP]
have "Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc i))"
[PROOF STATE]
proof (prove)
using this:
raw_has_prod g (Suc i) q
goal (1 subgoal):
1. Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc i))
[PROOF STEP]
using raw_has_prod_mult p
[PROOF STATE]
proof (prove)
using this:
raw_has_prod g (Suc i) q
\<lbrakk>raw_has_prod ?f ?M ?a; raw_has_prod ?g ?M ?b\<rbrakk> \<Longrightarrow> raw_has_prod (\<lambda>n. ?f n * ?g n) ?M (?a * ?b)
raw_has_prod f (Suc i) p
goal (1 subgoal):
1. Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc i))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc i))
goal (2 subgoals):
1. raw_has_prod g 0 b \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
2. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc i))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc i))
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
using \<open>a = 0\<close> \<open>f i = 0\<close> has_prod_0_iff
[PROOF STATE]
proof (prove)
using this:
Ex (raw_has_prod (\<lambda>n. f n * g n) (Suc i))
a = (0::'a)
f i = (0::'a)
?f has_prod (0::?'a) = (\<exists>i. ?f i = (0::?'a) \<and> (\<exists>p. raw_has_prod ?f (Suc i) p))
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
(\<lambda>n. f n * g n) has_prod (a * b)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
fix j q
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
assume "b = 0" and "g j = 0" and q: "raw_has_prod g (Suc j) q"
[PROOF STATE]
proof (state)
this:
b = (0::'a)
g j = (0::'a)
raw_has_prod g (Suc j) q
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
obtain p' where p': "raw_has_prod f (Suc (max i j)) p'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>p'. raw_has_prod f (Suc (max i j)) p' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis raw_has_prod_ignore_initial_segment max_Suc_Suc max_def p)
[PROOF STATE]
proof (state)
this:
raw_has_prod f (Suc (max i j)) p'
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
raw_has_prod f (Suc (max i j)) p'
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
obtain q' where q': "raw_has_prod g (Suc (max i j)) q'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>q'. raw_has_prod g (Suc (max i j)) q' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis raw_has_prod_ignore_initial_segment max.cobounded2 max_Suc_Suc q)
[PROOF STATE]
proof (state)
this:
raw_has_prod g (Suc (max i j)) q'
goal (1 subgoal):
1. \<And>i q. \<lbrakk>b = (0::'a); g i = (0::'a); raw_has_prod g (Suc i) q\<rbrakk> \<Longrightarrow> (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
raw_has_prod f (Suc (max i j)) p'
raw_has_prod g (Suc (max i j)) q'
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
raw_has_prod f (Suc (max i j)) p'
raw_has_prod g (Suc (max i j)) q'
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
using \<open>b = 0\<close>
[PROOF STATE]
proof (prove)
using this:
raw_has_prod f (Suc (max i j)) p'
raw_has_prod g (Suc (max i j)) q'
b = (0::'a)
goal (1 subgoal):
1. (\<lambda>n. f n * g n) has_prod (a * b)
[PROOF STEP]
by (simp add: has_prod_def) (metis \<open>f i = 0\<close> \<open>g j = 0\<close> raw_has_prod_mult max_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>n. f n * g n) has_prod (a * b)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>n. f n * g n) has_prod (a * b)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6504, "file": null, "length": 57}
|
"""Kuramoto-Sivashinsky (KS) system: the simplest (?) PDE admitting chaos.
Defined by:
u_t = -u*u_x - u_xx - u_xxxx
- See compare_schemes.py for a comparison of time-step integration schemes.
- See demo.py for further description.
"""
import functools
import numpy as np
from dapper.dpr_config import DotDict
from dapper.mods.integration import integrate_TLM, with_rk4
# To & from time/Fourier domain -- use reals-only fft
def fft(u): return np. fft. rfft(u, axis=-1) # F
def ifft(v): return np.fft.irfft(v, axis=-1) # F^{-1}
# Do fft to/from Fourier-domain for wrapped function.
def byFourier(func):
@functools.wraps(func)
def newfunc(u, *args, **kwargs):
return ifft(func(fft(u), *args, **kwargs))
return newfunc
def Model(dt=0.25, DL=32, Nx=128):
"""Define `step`, `x0`, `etc`. Alternative schemes (`step_XXX`) also implemented."""
h = dt # alias -- prevents o/w in step()
# Fourier stuff
# wave nums for rfft
kk = np.append(np.arange(0, Nx/2), 0)*2/DL
# wave nums for fft
# kk = ccat([np.arange(0,Nx/2),[0], np.arange(-Nx/2+1,0)])*2/DL
# Alternative method:
# kk = np.fft.fftfreq(Nx, DL/Nx/2)
# Operators
D = 1j*kk # Differentiation to compute: F[ u_x ]
L = kk**2 - kk**4 # Linear operator for KS eqn: F[ - u_xx - u_xxxx]
# NonLinear term (-u*u_x) in Fourier domain via time domain
def NL(v):
return -0.5 * D * fft(ifft(v).real ** 2)
# Evolution equation
@byFourier
def dxdt(v):
return NL(v) + L*v
# Jacobian of dxdt(u)
def d2x_dtdx(u):
dL = ifft(L * fft(np.eye(Nx))) . T
dNL = - ifft(D * fft(np.diag(u))) . T
return dL + dNL
# dstep_dx = FD_Jac(step)
def dstep_dx(x, t, dt):
return integrate_TLM(d2x_dtdx(x), dt, method='analytic')
# Runge-Kutta -- Requries dt<1e-2:
# ------------------------------------------------
step_RK4 = with_rk4(dxdt, autonom=True) # Bad, not recommended.
step_RK1 = with_rk4(dxdt, autonom=True, stages=1) # Truly terrible.
# "Semi-implicit RK3": explicit RK3 for nonlinear term,
# ------------------------------------------------
# implicit trapezoidal adjustment for linear term.
# Based on github.com/jswhit/pyks (Jeff Whitaker),
# who got it from doi.org/10.1175/MWR3214.1.
@byFourier
def step_SI_RK3(v, t, dt):
v0 = v.copy()
for n in range(3):
dt3 = h/(3-n)
v = v0 + dt3*NL(v)
v = (v + 0.5*L*dt3*v0)/(1 - 0.5*L*dt3)
return v
# ETD-RK4 -- Integration factor (IF) technique, mixed with RK4.
# ------------------------------------------------
# Based on kursiv.m of Kassam and Trefethen, 2002,
# doi.org/10.1137/S1064827502410633.
#
# Precompute ETDRK4 scalar quantities
E = np.exp(h*L) # Integrating factor, eval at dt
E2 = np.exp(h*L/2) # Integrating factor, eval at dt/2
# Roots of unity are used to discretize a circular countour...
nRoots = 16
roots = np.exp(1j * np.pi * (0.5+np.arange(nRoots))/nRoots)
# ... the associated integral then reduces to the mean,
# g(CL).mean(axis=-1) ~= g(L), whose computation is more stable.
CL = h * L[:, None] + roots # Contour for (each element of) L
# E * exact_integral of integrating factor:
Q = h * ((np.exp(CL/2)-1) / CL).mean(axis=-1).real
# RK4 coefficients (modified by Cox-Matthews):
f1 = h * ((-4-CL + np.exp(CL)*(4-3*CL+CL**2)) / CL**3).mean(axis=-1).real
f2 = h * ((2+CL + np.exp(CL)*(-2+CL)) / CL**3).mean(axis=-1).real
f3 = h * ((-4-3*CL-CL**2+np.exp(CL)*(4-CL)) / CL**3).mean(axis=-1).real
#
@byFourier
def step_ETD_RK4(v, t, dt):
assert dt == h, \
"Model is instantiated with a pre-set dt, " +\
"which differs from the requested value"
N1 = NL(v)
v1 = E2*v + Q*N1
N2a = NL(v1)
v2a = E2*v + Q*N2a
N2b = NL(v2a)
v2b = E2*v1 + Q*(2*N2b-N1)
N3 = NL(v2b)
v = E * v + N1*f1 + 2*(N2a+N2b)*f2 + N3*f3
return v
# Select the "official" step method
step = step_ETD_RK4
# Generate IC as end-point of ex. from Kassam and Trefethen.
# x0_Kassam isn't convenient, coz prefer {x0 ∈ attractor} to {x0 ∈ basin}.
grid = DL*np.pi*np.linspace(0, 1, Nx+1)[1:]
x0_Kassam = np.cos(grid/16) * (1 + np.sin(grid/16))
x0 = x0_Kassam.copy()
for _ in range(int(150/h)):
x0 = step(x0, np.nan, h)
# Return dict
dd = DotDict(dt=dt,
DL=DL,
Nx=Nx,
x0=x0,
x0_Kassam=x0_Kassam,
grid=grid,
step=step,
step_ETD_RK4=step_ETD_RK4,
step_SI_RK3=step_SI_RK3,
step_RK4=step_RK4,
step_RK1=step_RK1,
dxdt=dxdt,
d2x_dtdx=d2x_dtdx,
dstep_dx=dstep_dx,
)
return dd
Tplot = 10
|
{"hexsha": "ec29c478f1ad1d85d7890d9b4c5f4a9ef05286a2", "size": 5051, "ext": "py", "lang": "Python", "max_stars_repo_path": "dapper/mods/KS/__init__.py", "max_stars_repo_name": "yoavfreund/DAPPER", "max_stars_repo_head_hexsha": "c2fa5cc446a2b22a1efc174afc7e091363c9375d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 225, "max_stars_repo_stars_event_min_datetime": "2016-11-01T09:55:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:03:28.000Z", "max_issues_repo_path": "dapper/mods/KS/__init__.py", "max_issues_repo_name": "yoavfreund/DAPPER", "max_issues_repo_head_hexsha": "c2fa5cc446a2b22a1efc174afc7e091363c9375d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 85, "max_issues_repo_issues_event_min_datetime": "2018-02-09T03:13:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T17:10:28.000Z", "max_forks_repo_path": "dapper/mods/KS/__init__.py", "max_forks_repo_name": "yoavfreund/DAPPER", "max_forks_repo_head_hexsha": "c2fa5cc446a2b22a1efc174afc7e091363c9375d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 92, "max_forks_repo_forks_event_min_datetime": "2017-05-29T23:24:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:03:29.000Z", "avg_line_length": 33.0130718954, "max_line_length": 88, "alphanum_fraction": 0.5432587606, "include": true, "reason": "import numpy", "num_tokens": 1655}
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
This script adds a context menu to a :class:`silx.gui.plot3d.ScalarFieldView`.
This is done by adding a custom context menu to the :class:`Plot3DWidget`:
- set the context menu policy to Qt.CustomContextMenu.
- connect to the customContextMenuRequested signal.
For more information on context menus, see Qt documentation.
"""
from __future__ import absolute_import, division, unicode_literals
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "03/10/2017"
import logging
import numpy
from silx.gui import qt
from silx.gui.plot3d.ScalarFieldView import ScalarFieldView
from silx.gui.plot3d import actions
logging.basicConfig()
_logger = logging.getLogger(__name__)
class ScalarFieldViewWithContextMenu(ScalarFieldView):
"""Subclass ScalarFieldView to add a custom context menu to its 3D area."""
def __init__(self, parent=None):
super(ScalarFieldViewWithContextMenu, self).__init__(parent)
self.setWindowTitle("Right-click to open the context menu")
# Set Plot3DWidget custom context menu
self.getPlot3DWidget().setContextMenuPolicy(qt.Qt.CustomContextMenu)
self.getPlot3DWidget().customContextMenuRequested.connect(
self._contextMenu)
def _contextMenu(self, pos):
"""Handle plot area customContextMenuRequested signal.
:param QPoint pos: Mouse position relative to plot area
"""
# Create the context menu
menu = qt.QMenu(self)
menu.addAction(actions.mode.PanAction(
parent=menu, plot3d=self.getPlot3DWidget()))
menu.addAction(actions.mode.RotateArcballAction(
parent=menu, plot3d=self.getPlot3DWidget()))
menu.addSeparator()
menu.addAction(actions.io.CopyAction(
parent=menu, plot3d=self.getPlot3DWidget()))
# Displaying the context menu at the mouse position requires
# a global position.
# The position received as argument is relative to Plot3DWidget
# and needs to be converted.
globalPosition = self.getPlot3DWidget().mapToGlobal(pos)
menu.exec_(globalPosition)
# Start Qt QApplication
app = qt.QApplication([])
# Create the viewer main window
window = ScalarFieldViewWithContextMenu()
# Create dummy data
coords = numpy.linspace(-10, 10, 64)
z = coords.reshape(-1, 1, 1)
y = coords.reshape(1, -1, 1)
x = coords.reshape(1, 1, -1)
data = numpy.sin(x * y * z) / (x * y * z)
# Set ScalarFieldView data
window.setData(data)
# Add an iso-surface
window.addIsosurface(0.2, '#FF0000FF')
window.show()
app.exec_()
|
{"hexsha": "d33bb8f801c823658b4bd2adf6c778637013b65b", "size": 3868, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/plot3dContextMenu.py", "max_stars_repo_name": "vincefn/silx", "max_stars_repo_head_hexsha": "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "max_stars_repo_licenses": ["CC0-1.0", "MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-09T15:50:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-09T15:50:23.000Z", "max_issues_repo_path": "examples/plot3dContextMenu.py", "max_issues_repo_name": "vincefn/silx", "max_issues_repo_head_hexsha": "4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444", "max_issues_repo_licenses": ["CC0-1.0", "MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2016-10-19T09:27:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-24T13:26:56.000Z", "max_forks_repo_path": "examples/plot3dContextMenu.py", "max_forks_repo_name": "payno/silx", "max_forks_repo_head_hexsha": "13301e61627f98fa837008250ac74a0627a7a560", "max_forks_repo_licenses": ["CC0-1.0", "MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-04-02T18:00:14.000Z", "max_forks_repo_forks_event_max_datetime": "2017-04-02T18:00:14.000Z", "avg_line_length": 34.2300884956, "max_line_length": 79, "alphanum_fraction": 0.7013960703, "include": true, "reason": "import numpy", "num_tokens": 866}
|
#################
# IMPORTS
#################
import torch
import torch.nn as nn
from torchvision import models
import numpy as np
import torch.utils.data as dataset
import sys
from torch.autograd import Variable
import torchvision.transforms as transforms
sys.path.append('')
#################
# CLASSES
#################
nc = 3
ndf = 18
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
with torch.no_grad():
for param in self.parameters():
param = Variable(param, volatile=True)
param.requires_grad = False
def forward(self, X):
# X = Variable(X, volatile=True)
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGLoss(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
if gpu_ids >= 0:
self.vgg = Vgg19().cuda(device = gpu_ids)
else:
self.vgg = Vgg19()
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
self.mean_sq0 = nn.MSELoss()
self.mean_sq1 = nn.MSELoss()
def forward(self, x, y, pix = 0.1):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
pixel_loss = pix * self.mean_sq1(x, y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
# loss = loss + pixel_loss
return loss
# class TotalVariationLoss(object):
# def __init__(self, device):
# if device >= 0:
# self.cuda0 = torch.device(f'cuda:{device}')
# else:
# self.cuda0 = 'cpu'
# def tv_loss(self, x):
# h1 = torch.sum((x[:,:,:,1:]-x[:,:,:,:-1])**2)
# h2 = torch.sum((x[:,:,1:,:]-x[:,:,:-1,:])**2)
# y = h1 + h2
# if self.cuda0 != 'cpu':
# y = y.to(self.cuda0)
# return y
#class LossFunction(object):
# def __init__(self, device):
# if device >= 0:
# cuda0 = torch.device(f'cuda:{device}')
# vgg = models.vgg16(pretrained=True).to(cuda0)
# self.totalVariationLoss = TotalVariationLoss(device)
# else:
# vgg = models.vgg16(pretrained=True)
# self.totalVariationLoss = TotalVariationLoss(device)
#
# self.mean_sq0 = nn.MSELoss()
# self.mean_sq1 = nn.MSELoss()
#
# self.vgg16Layers = nn.Sequential(*list(vgg.children())[0])[:11]
#
# for param in self.vgg16Layers.parameters():
# param.requires_grad = False
#
# def __call__(self, t, y, pix = 0.5, tv=1e-8):
#
# t_ = self.vgg16Layers(t)
# y_ = self.vgg16Layers(y)
# feature = self.mean_sq0(t_, y_)
# pixel_loss = pix * self.mean_sq1(t, y)
#
# total_variation_loss = tv * self.totalVariationLoss.tv_loss(y)
## loss = feature + pixel_loss + total_variation_loss
# loss = feature + pixel_loss
#
# return loss
#################
# FUNCTIONS
#################
def load_gausdata(size):
'''
Load all necessary items for creating inputs for the model.
Inputs for the model will be signals * multivariate gaussian RFs. T
he targets will be just the images seen by the monkey. returns gaus
'''
gaus_raw = np.load(f'multivariate_gaussians{size}_cropped.npy') # (192 x 240 x 240) is the shape
gaus = np.concatenate([gaus_raw[:144], gaus_raw[145:]]) #takes away the ones with respective Nan electrode (elec # = 144) in LFP data
gaus = torch.from_numpy(gaus).float()
return gaus
def load_ydata(images_set, size):
'''Loads targets.
------------------
These are himages from the old testing dataset before splitting. Now since the data is split, use only this one and not testing
cropped images.
Returns a torch.Tensor()'''
if images_set == 'CIFAR':
targets = np.load('cropped_CIFAR_tosplit.npy')
else:
if size == '240':
targets = np.load(f'cropped_images_tosplit.npy')
if size =='96':
targets = np.load(f'cropped_images96_tosplit.npy')
return targets
def load_LFPdata(set_t):
'''
Loads the LFP data.
-------------------
set_t: 'training' for loading training data, 'testing' for testing data.
-------------------
Returns float32.
'''
nn_data = np.load(f'../../sorted_data/LFP/{set_t}/LFP_{set_t}_splitted.npy')
return nn_data.astype('float32')
def make_iterator_unique(dot_number, set_t, batch_size, shuffle):
'''
Makes an iterator for this experiment. Iterator will output dot_numbers (to use as synthetic signal) and UNIQUE image indices.
'''
img_indexes = np.load(f'{set_t}/index_{set_t}_LFP_split.npy').astype(np.int)
img_indexes = np.unique(img_indexes)
data_indices = torch.from_numpy(img_indexes)
dot_number = torch.from_numpy(dot_number)
data = dataset.TensorDataset(dot_number, data_indices)
return dataset.DataLoader(data, batch_size, shuffle = shuffle)
def make_iterator(nn_data, set_t, batch_size, shuffle):
'''
Makes an iterator for this experiment. It allows iteration through indices and the signals
Returns an iterator.'''
img_indexes = np.load(f'../../sorted_data/LFP/{set_t}/index_{set_t}_LFP_split.npy').astype(np.int)
data_indices = torch.from_numpy(img_indexes)
data = dataset.TensorDataset(torch.from_numpy(nn_data.T), data_indices)
return dataset.DataLoader(data, batch_size, shuffle = shuffle)
def select_type_inputs(type_input, gaus_expand_to_batch, mean_signals_expand_to_gaus):
'''
Selects which type of inputs will be used for the model.
If 'all_channels', set to in channels as 191. If 'V1_V4', set
in_channels as 2.
'''
if type_input == 'all_channels':
return (gaus_expand_to_batch * mean_signals_expand_to_gaus)
if type_input == 'V1_V4':
weighted_gaus = gaus_expand_to_batch * mean_signals_expand_to_gaus
v1 = weighted_gaus[:,:97].sum(1)
v4 = weighted_gaus[:,97:].sum(1)
return torch.stack((v1,v4), dim=1)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
# self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, kernel_size = 4, stride = 2, padding = 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, kernel_size =4, stride= 2, padding = 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8,kernel_size = 4, stride = 3, padding = 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(in_channels = ndf * 8, out_channels = 1, kernel_size =4, stride = 3, padding=0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
|
{"hexsha": "d299db302c4671d55387a98da1ba140b326d555a", "size": 8937, "ext": "py", "lang": "Python", "max_stars_repo_path": "withoutNoise/module_split.py", "max_stars_repo_name": "lelynn/RF_GANsynth", "max_stars_repo_head_hexsha": "d1e58f2756c66e06ec7ec8544f0a7456f58e2055", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "withoutNoise/module_split.py", "max_issues_repo_name": "lelynn/RF_GANsynth", "max_issues_repo_head_hexsha": "d1e58f2756c66e06ec7ec8544f0a7456f58e2055", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "withoutNoise/module_split.py", "max_forks_repo_name": "lelynn/RF_GANsynth", "max_forks_repo_head_hexsha": "d1e58f2756c66e06ec7ec8544f0a7456f58e2055", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8566176471, "max_line_length": 137, "alphanum_fraction": 0.5818507329, "include": true, "reason": "import numpy", "num_tokens": 2396}
|
#define BOOST_TEST_MODULE pcraster dal space step mapper
#include <boost/test/unit_test.hpp>
#include "dal_MathUtils.h"
#include "dal_SpaceStepMapper.h"
BOOST_AUTO_TEST_CASE(test)
{
using namespace dal;
{
SpaceStepMapper mapper;
BOOST_CHECK(!mapper.isValid());
}
{
SpaceStepMapper mapper(0, 5.0, 0.5);
BOOST_CHECK(comparable(mapper.destination(1.0), 5.5));
BOOST_CHECK(comparable(mapper.destination(0.0), 5.0));
BOOST_CHECK(comparable(mapper.destination(-1.0), 4.5));
BOOST_CHECK(comparable(mapper.destination(0.5), 5.25));
BOOST_CHECK(comparable(mapper.destination(-0.5), 4.75));
BOOST_CHECK(comparable(mapper.source(5.5), 1.0));
BOOST_CHECK(comparable(mapper.source(5.0), 0.0));
BOOST_CHECK(comparable(mapper.source(4.5), -1.0));
BOOST_CHECK(comparable(mapper.source(5.25), 0.5));
BOOST_CHECK(comparable(mapper.source(4.75), -0.5));
}
{
SpaceStepMapper mapper(0, 74.85, -0.30);
BOOST_CHECK(comparable(mapper.destination(0), 74.85));
BOOST_CHECK(comparable(mapper.destination(1), 74.55));
BOOST_CHECK(comparable(mapper.destination(-0.5), 75.0));
BOOST_CHECK(comparable(mapper.source(0.15), 249.0));
BOOST_CHECK(comparable(mapper.source(74.85), 0.0));
BOOST_CHECK(comparable(mapper.source(74.55), 1.0));
BOOST_CHECK(comparable(mapper.source(75.0), -0.5));
}
{
SpaceStepMapper mapper(1, 74.85, -0.30);
BOOST_CHECK(comparable(mapper.destination(1), 74.85));
BOOST_CHECK(comparable(mapper.destination(2), 74.55));
BOOST_CHECK(comparable(mapper.destination(0.5), 75.0));
BOOST_CHECK(comparable(mapper.source(74.85), 1.0));
BOOST_CHECK(comparable(mapper.source(74.55), 2.0));
BOOST_CHECK(comparable(mapper.source(75.0), 0.5));
}
}
|
{"hexsha": "5929a8e435e65ac32af499844d3449e21ba0b8ef", "size": 1769, "ext": "cc", "lang": "C++", "max_stars_repo_path": "pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_dal/dal_SpaceStepMapperTest.cc", "max_stars_repo_name": "quanpands/wflow", "max_stars_repo_head_hexsha": "b454a55e4a63556eaac3fbabd97f8a0b80901e5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_dal/dal_SpaceStepMapperTest.cc", "max_issues_repo_name": "quanpands/wflow", "max_issues_repo_head_hexsha": "b454a55e4a63556eaac3fbabd97f8a0b80901e5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_dal/dal_SpaceStepMapperTest.cc", "max_forks_repo_name": "quanpands/wflow", "max_forks_repo_head_hexsha": "b454a55e4a63556eaac3fbabd97f8a0b80901e5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5, "max_line_length": 60, "alphanum_fraction": 0.6919163369, "num_tokens": 538}
|
try:
import unzip_requirements
except ImportError:
pass
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import io
import boto3
import requests
from distil_bilstm_pos_tagger.distilbert_utils.tokenization_distilbert import DistilBertTokenizer
import nltk
import numpy as np
class BiLSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
super(BiLSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim,bidirectional=True)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim*2, tagset_size)
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores,tag_space
class DistilBiLstmPosTagger:
def __init__(self,model_path=None):
self.tokenizer = DistilBertTokenizer("bert-base-uncased-vocab.txt")
self.EMBEDDING_DIM = 256
self.HIDDEN_DIM = 512
self.lab_list = ['NNS', 'CD', 'TO', 'VBD', 'WP$', 'LS', 'RP', 'SYM', 'VBN', 'NNPS', 'RBR', 'JJS', 'VBP', 'MD', 'JJ', 'CC', 'VBG', 'IN', 'WP', 'PRP', 'PUNC', 'POS', 'FW', 'JJR', 'EX', 'WRB', 'DT', 'UH', 'VB', 'VBZ', 'RB', 'RBS', 'NN', 'WDT', 'NNP', 'PRP$', 'PDT']
self.label_map = {label:i for i, label in enumerate(self.lab_list)}
self.ix_to_tag = {i:label for i, label in enumerate(self.lab_list)}
self.model = BiLSTMTagger(self.EMBEDDING_DIM, self.HIDDEN_DIM, self.tokenizer.vocab_size, len(self.label_map))
if (model_path != None):
state_dict = torch.load(model_path,map_location='cpu')
else:
state_dict = self.load_model()
self.model.load_state_dict(state_dict)
self.model.eval()
def load_model(self):
bucket = os.getenv("BUCKET_NAME")
model_path = os.getenv("POS_MODEL")
s3 = boto3.resource("s3")
modelObj = s3.Object(bucket_name=bucket, key=model_path)
state_dict = torch.load(
io.BytesIO(modelObj.get()["Body"].read()), map_location="cpu"
)
return state_dict
def label_selector(self,predicted_label_list,tokenized_sent):
predicted_list =[]
start = 0
end = 1
for token in tokenized_sent:
window_len = len(self.tokenizer.tokenize(token))
if window_len ==1:
predicted_list.append(predicted_label_list[start])
start=end
end+=1
elif window_len >1:
end = window_len +start
if (predicted_label_list[start] == 'PUNC'):
lab_check = False
for lab in predicted_label_list[start:end+1]:
if (lab != 'PUNC'):
predicted_list.append(lab)
lab_check = True
break
if (lab_check==False):
predicted_list.append(predicted_label_list[start])
else:
pass
else:
predicted_list.append(predicted_label_list[start])
start=end
end+=1
return predicted_list
def get_sent_pos_tags(self, text):
batch_size = 128
predicted_label_list = []
tokenized_sent = nltk.word_tokenize(text)
text = " ".join(tokenized_sent)
input_ids = self.tokenizer.encode(text,add_special_tokens=False)
for i in range(0,len(input_ids),batch_size):
batch_input_ids = input_ids[i:i+batch_size]
batch_input_ids = torch.tensor(batch_input_ids)
with torch.no_grad():
tag_scores,_ = self.model(batch_input_ids)
for sc in tag_scores:
predicted_label_list.append(self.ix_to_tag[int(np.argmax(sc.cpu().detach().numpy()))])
predicted_list = self.label_selector(predicted_label_list,tokenized_sent)
sent_tag = []
for i in zip(tokenized_sent,predicted_list):
sent_tag.append(i)
return sent_tag
|
{"hexsha": "24666fbced5065a1ecf988843dbc8fc2453adafb", "size": 4558, "ext": "py", "lang": "Python", "max_stars_repo_path": "pkg/distil_bilstm_pos_tagger/distil_bilstm_pos_tagger.py", "max_stars_repo_name": "etherlabsio/ai-engine", "max_stars_repo_head_hexsha": "e73a4419a34db42a410e2a7e7629eb946b86f2c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pkg/distil_bilstm_pos_tagger/distil_bilstm_pos_tagger.py", "max_issues_repo_name": "etherlabsio/ai-engine", "max_issues_repo_head_hexsha": "e73a4419a34db42a410e2a7e7629eb946b86f2c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pkg/distil_bilstm_pos_tagger/distil_bilstm_pos_tagger.py", "max_forks_repo_name": "etherlabsio/ai-engine", "max_forks_repo_head_hexsha": "e73a4419a34db42a410e2a7e7629eb946b86f2c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-19T11:07:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-19T11:07:42.000Z", "avg_line_length": 39.9824561404, "max_line_length": 270, "alphanum_fraction": 0.6064063186, "include": true, "reason": "import numpy", "num_tokens": 1073}
|
'''
This script uses the SCOPUS data on ERNIE to generate data expected in the Abt tables
This script is revised to use an updated version of the documents table, cci_s_documents_jeroen_updated,
that uses document citation counts from a frozen dataset
'''
import psycopg2
import argparse
import pandas as pd
import numpy as np
import chord
from math import ceil
# Collect user input and possibly override defaults based on that input
parser = argparse.ArgumentParser(description='''
This script interfaces with the PostgreSQL database and then creates summary tables for the Abt project
''', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-ph','--postgres_host',help='the server hosting the PostgreSQL server',default='localhost',type=str)
parser.add_argument('-pd','--postgres_dbname',help='the database to query in the PostgreSQL server',type=str,required=True)
parser.add_argument('-pp','--postgres_port',help='the port hosting the PostgreSQL service on the server', default='5432',type=int)
parser.add_argument('-U','--postgres_user',help='the PostgreSQL user to log in as',required=True)
parser.add_argument('-W','--postgres_password',help='the password of the PostgreSQL user',required=True)
args = parser.parse_args()
postgres_dsn={'host':args.postgres_host,'dbname':args.postgres_dbname,'port':args.postgres_port,'user':args.postgres_user,'password':args.postgres_password}
postgres_conn=psycopg2.connect(" ".join("{}={}".format(k,postgres_dsn[k]) for k in postgres_dsn))
# Collect a dataframe on the number of papers per center where the papers are known sponsor linked papers
def total_pubs_per_center(postgres_conn):
df = pd.read_sql_query('''
SELECT award_number,
(SELECT publication_year FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as publication_year,
count(1) as n_pubs
FROM cci_s_award_paper_matches a
WHERE scopus_id in (SELECT scopus_id FROM cci_s_documents_jeroen_updated)
AND award_number in (SELECT award_number FROM cci_phase_awards where phase is not null)
GROUP BY award_number, publication_year
ORDER BY award_number, publication_year;
''', con=postgres_conn)
return (df)
# Collect a dataframe on the number of citations per center where the papers are known sponsor linked papers
def total_citations_per_center(postgres_conn):
df = pd.read_sql_query('''
SELECT award_number,publication_year,sum(n_citations) as sum_citations
FROM
(
SELECT award_number,
(SELECT publication_year FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as publication_year,
(SELECT scopus_cited_by_count FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as n_citations
FROM cci_s_award_paper_matches a
WHERE scopus_id in (SELECT scopus_id FROM cci_s_documents_jeroen_updated)
AND award_number in (SELECT award_number FROM cci_phase_awards where phase is not null)
) foo
GROUP BY award_number, publication_year
ORDER BY award_number, publication_year;
''', con=postgres_conn)
return (df)
# Collect a dataframe for centile information for sponsor linked papers
def sponsored_centiles(postgres_conn):
df = pd.read_sql_query('''
WITH award_author_documents AS
(
SELECT a.award_number,
a.author_id,
a.first_year,
b.scopus_id,
(SELECT c.publication_year FROM cci_s_documents_jeroen_updated c WHERE b.scopus_id=c.scopus_id) as publication_year,
(SELECT c.scopus_cited_by_count FROM cci_s_documents_jeroen_updated c WHERE b.scopus_id=c.scopus_id) as n_citations
FROM cci_s_author_search_results a
INNER JOIN cci_s_author_document_mappings b -- May need to be chopped down to give a more fine tuned look at viable author ids
ON a.author_id=b.author_id
INNER JOIN sl_sr_all_personel_and_comp d
ON a.author_id=d.author_id
WHERE a.award_number='COMP'
OR a.award_number IN (SELECT award_number FROM cci_phase_awards WHERE phase IN ('II'))
AND scopus_id NOT IN (SELECT scopus_id FROM cci_s_award_paper_matches)
),
sponsored_documents AS
(
SELECT award_number,
(SELECT DISTINCT b.first_year FROM cci_phase_awards b WHERE a.award_number=b.award_number) as first_year,
scopus_id,
(SELECT b.publication_year FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as publication_year,
(SELECT b.scopus_cited_by_count FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as n_citations
FROM cci_s_award_paper_matches a
)
SELECT
award_number,
sponsored,
publication_year,
min(centile) as min,
percentile_cont(0.25) WITHIN GROUP (ORDER BY centile) as pct_25,
percentile_cont(0.50) WITHIN GROUP (ORDER BY centile) as pct_50,
percentile_cont(0.75) WITHIN GROUP (ORDER BY centile) as pct_75,
max(centile) as max,avg(centile) as mean
FROM(
SELECT scopus_id,sponsored,award_number,publication_year,n_citations, cume_dist() over (PARTITION BY publication_year ORDER BY n_citations)*100 as centile
FROM (
SELECT DISTINCT award_number, first_year,scopus_id,publication_year,n_citations,'UNKNOWN' as sponsored
FROM award_author_documents
WHERE publication_year BETWEEN first_year AND first_year+5
UNION ALL
SELECT DISTINCT award_number, first_year,scopus_id,publication_year,n_citations,'SPONSORED' as sponsored
FROM sponsored_documents
WHERE first_year IS NOT NULL
AND publication_year IS NOT NULL
) foo
) bar
GROUP BY award_number,sponsored,publication_year
HAVING sponsored='SPONSORED'
ORDER BY avg(centile);
''', con=postgres_conn)
return (df)
# Collect a dataframe for centile information for sponsor linked papers
def sponsored_centile_list(postgres_conn):
df = pd.read_sql_query('''
WITH award_author_documents AS
(
SELECT a.award_number,
a.author_id,
a.first_year,
b.scopus_id,
(SELECT c.publication_year FROM cci_s_documents_jeroen_updated c WHERE b.scopus_id=c.scopus_id) as publication_year,
(SELECT c.scopus_cited_by_count FROM cci_s_documents_jeroen_updated c WHERE b.scopus_id=c.scopus_id) as n_citations
FROM cci_s_author_search_results a
INNER JOIN cci_s_author_document_mappings b -- May need to be chopped down to give a more fine tuned look at viable author ids
ON a.author_id=b.author_id
INNER JOIN sl_sr_all_personel_and_comp d
ON a.author_id=d.author_id
WHERE a.award_number='COMP'
OR a.award_number IN (SELECT award_number FROM cci_phase_awards WHERE phase IN ('II'))
AND scopus_id NOT IN (SELECT scopus_id FROM cci_s_award_paper_matches)
),
sponsored_documents AS
(
SELECT award_number,
(SELECT DISTINCT b.first_year FROM cci_phase_awards b WHERE a.award_number=b.award_number) as first_year,
scopus_id,
(SELECT b.publication_year FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as publication_year,
(SELECT b.scopus_cited_by_count FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as n_citations
FROM cci_s_award_paper_matches a
)
SELECT
scopus_id,
centile,
publication_year,
award_number
FROM(
SELECT scopus_id,sponsored,award_number,publication_year,n_citations, cume_dist() over (PARTITION BY publication_year ORDER BY n_citations)*100 as centile
FROM (
SELECT DISTINCT award_number, first_year,scopus_id,publication_year,n_citations,'UNKNOWN' as sponsored
FROM award_author_documents
WHERE publication_year BETWEEN first_year AND first_year+5
UNION ALL
SELECT DISTINCT award_number, first_year,scopus_id,publication_year,n_citations,'SPONSORED' as sponsored
FROM sponsored_documents
WHERE first_year IS NOT NULL
AND publication_year IS NOT NULL
) foo
) bar WHERE sponsored='SPONSORED'
ORDER BY centile DESC;
''', con=postgres_conn)
return (df)
# Collect a dataframe for centile information for sponsor linked papers
def sponsored_centile_list_w_extra_awards(postgres_conn):
df = pd.read_sql_query('''
WITH award_author_documents AS
(
SELECT a.award_number,
a.author_id,
a.first_year,
b.scopus_id,
(SELECT c.publication_year FROM cci_s_documents_jeroen_updated c WHERE b.scopus_id=c.scopus_id) as publication_year,
(SELECT c.scopus_cited_by_count FROM cci_s_documents_jeroen_updated c WHERE b.scopus_id=c.scopus_id) as n_citations
FROM cci_s_author_search_results a
INNER JOIN cci_s_author_document_mappings b -- May need to be chopped down to give a more fine tuned look at viable author ids
ON a.author_id=b.author_id
INNER JOIN sl_sr_all_personel_and_comp d
ON a.author_id=d.author_id
WHERE a.award_number='COMP'
OR a.award_number IN (SELECT award_number FROM cci_phase_awards WHERE phase IN ('II'))
AND scopus_id NOT IN (SELECT scopus_id FROM cci_s_award_paper_matches)
),
sponsored_documents AS
(
SELECT award_number,
(SELECT DISTINCT b.first_year FROM cci_phase_awards b WHERE a.award_number=b.award_number) as first_year,
scopus_id,
(SELECT b.publication_year FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as publication_year,
(SELECT b.scopus_cited_by_count FROM cci_s_documents_jeroen_updated b WHERE a.scopus_id=b.scopus_id) as n_citations
FROM cci_s_award_paper_matches a
)
SELECT
scopus_id,
centile,
publication_year,
award_number
FROM(
SELECT scopus_id,sponsored,award_number,publication_year,n_citations, cume_dist() over (PARTITION BY publication_year ORDER BY n_citations)*100 as centile
FROM (
SELECT DISTINCT award_number, first_year,scopus_id,publication_year,n_citations,'UNKNOWN' as sponsored
FROM award_author_documents
WHERE publication_year BETWEEN first_year AND first_year+5
UNION ALL
SELECT DISTINCT award_number, first_year,scopus_id,publication_year,n_citations,'SPONSORED' as sponsored
FROM sponsored_documents
WHERE publication_year IS NOT NULL
) foo
) bar WHERE sponsored='SPONSORED'
ORDER BY centile DESC;
''', con=postgres_conn)
return (df)
# perc_pubs_per_center_with_n_or_more_participants
def perc_pubs_per_center_with_n_or_more_participants(postgres_conn):
df = pd.read_sql_query('''
WITH award_author_documents AS
(
SELECT a.award_number,
a.author_id,
a.first_year,
b.scopus_id,
(SELECT c.publication_year FROM cci_s_documents_jeroen_updated c WHERE b.scopus_id=c.scopus_id) as publication_year
FROM cci_s_author_search_results a
INNER JOIN cci_s_author_document_mappings b
ON a.author_id=b.author_id
INNER JOIN sl_sr_all_personel_and_comp d
ON a.author_id=d.author_id
WHERE a.award_number!='COMP'
)
SELECT sum(CASE WHEN n_participants >= THEN 1 ELSE 0 END)::decimal/sum(1) as percentage_pubs_with_n_or_more_participants
FROM(
SELECT DISTINCT scopus_id,count(author_id) as n_participants
FROM award_author_documents
WHERE scopus_id in (SELECT scopus_id FROM cci_s_award_paper_matches
WHERE scopus_id in (SELECT scopus_id FROM cci_s_documents_jeroen_updated)
AND award_number in (SELECT award_number FROM cci_phase_awards where phase is not null))
GROUP BY scopus_id
ORDER BY scopus_id
) foo;
''', con=postgres_conn)
return (df)
n_pubs_df = total_pubs_per_center(postgres_conn)
n_pubs_df_pivot=n_pubs_df.pivot(index='award_number',columns='publication_year',values='n_pubs')
print(n_pubs_df_pivot)
n_pubs_df_pivot.to_csv('sponsored_papers_n_pubs.csv')
n_citations_df = total_citations_per_center(postgres_conn)
n_citations_df_pivot=n_citations_df.pivot(index='award_number',columns='publication_year',values='sum_citations')
print(n_citations_df_pivot)
n_citations_df_pivot.to_csv('sponsored_papers_n_citations.csv')
avg_centiles_df = sponsored_centiles(postgres_conn)
avg_centiles_df_pivot=avg_centiles_df.pivot(index='award_number',columns='publication_year',values='mean')
print(avg_centiles_df_pivot)
avg_centiles_df_pivot.to_csv('sponsored_papers_avg_centiles.csv')
centile_list_df = sponsored_centile_list(postgres_conn)
print(centile_list_df)
centile_list_df.to_csv('sponsored_centile_list.csv',index=False)
centile_list_df_w_extra_awards = sponsored_centile_list_w_extra_awards(postgres_conn)
print(centile_list_df_w_extra_awards)
centile_list_df_w_extra_awards.to_csv('sponsored_centile_list_w_extra_awards.csv',index=False)
|
{"hexsha": "74f8e51d5af5f02091231b5b93b197e09b0f1e41", "size": 13529, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scopus/Abt_Analysis/Analysis_scripts/sponsored_pubs_summary_statistics.py", "max_stars_repo_name": "chackoge/ERNIE_Plus", "max_stars_repo_head_hexsha": "7e480c47a69fc2f736ac7fb55ece35dbff919938", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-09-26T23:45:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-18T22:58:38.000Z", "max_issues_repo_path": "Scopus/Abt_Analysis/Analysis_scripts/sponsored_pubs_summary_statistics.py", "max_issues_repo_name": "NETESOLUTIONS/ERNIE", "max_issues_repo_head_hexsha": "454518f28b39a6f37ad8dde4f3be15d4dccc6f61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scopus/Abt_Analysis/Analysis_scripts/sponsored_pubs_summary_statistics.py", "max_forks_repo_name": "NETESOLUTIONS/ERNIE", "max_forks_repo_head_hexsha": "454518f28b39a6f37ad8dde4f3be15d4dccc6f61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-11-22T13:42:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-16T17:58:03.000Z", "avg_line_length": 47.4701754386, "max_line_length": 160, "alphanum_fraction": 0.7217089216, "include": true, "reason": "import numpy", "num_tokens": 3137}
|
import argparse
import os,sys
import numpy as np
import datetime
from train import get_symbol, data_loader
import mxnet as mx
from mxnet import ndarray as nd
import ipdb
def softmax(x):
exp = np.exp(x)
partition = exp.sum(axis=1,keepdims=True)
return exp / partition
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--model', default='model/20d/model,2', help='path to load model.')
parser.add_argument('--data-dir', default='',
help='training set directory')
args = parser.parse_args()
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd) > 0:
for i in range(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx) == 0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
vec = args.model.split(',')
print('loading', vec)
sym, arg_params, aux_params = mx.model.load_checkpoint(vec[0], int(vec[1]))
all_layers = sym.get_internals()
sym = all_layers['fc1'+'_output']
model = mx.mod.Module(context=ctx, symbol=sym)
model.bind(data_shapes=[('data', (1, 20))])
model.set_params(arg_params, aux_params)
path_data = os.path.join(args.data_dir, "train")
test_loader = data_loader(path_data, batch_size=64, shuffle=False)
acc_num = 0
number = 0
for dat in test_loader:
label = dat.label[0].asnumpy()
data = mx.io.DataBatch(data=dat.data)
model.forward(data, is_train=False)
ret = model.get_outputs()[0].asnumpy()
ret = softmax(ret)
pred = np.argmax(ret, 1)
for i in range(ret.shape[0]):
if ret[i,1] > 0.7:
pred[i] = 1
if pred[i]==label[i]:
acc_num += 1
number += 1
else:
pred[i] = 0
#acc_num += sum(pred==label)
#number += label.shape[0]
print('%d / %d = %f'%(acc_num, number, 1.0*acc_num/number))
|
{"hexsha": "79fa9a197efdfd42f16953a8f9534c4b79708442", "size": 1855, "ext": "py", "lang": "Python", "max_stars_repo_path": "stranger/test.py", "max_stars_repo_name": "hudmgy/insightface", "max_stars_repo_head_hexsha": "2ebc87e1bde3d4ef04bc0d4498038a0bc01b0ddc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-05T00:54:27.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-05T00:54:27.000Z", "max_issues_repo_path": "stranger/test.py", "max_issues_repo_name": "hudmgy/insightface", "max_issues_repo_head_hexsha": "2ebc87e1bde3d4ef04bc0d4498038a0bc01b0ddc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stranger/test.py", "max_forks_repo_name": "hudmgy/insightface", "max_forks_repo_head_hexsha": "2ebc87e1bde3d4ef04bc0d4498038a0bc01b0ddc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2794117647, "max_line_length": 87, "alphanum_fraction": 0.6371967655, "include": true, "reason": "import numpy", "num_tokens": 507}
|
import Compat: @static, is_windows, is_apple
@static if is_windows()
error("The shoco C library doesn't support Windows")
end
using BinDeps
@BinDeps.setup
shoco = library_dependency("shoco")
if isdir(srcdir(shoco))
rm(srcdir(shoco), recursive=true)
mkdir(srcdir(shoco))
end
if isdir(BinDeps.downloadsdir(shoco))
rm(BinDeps.downloadsdir(shoco), recursive=true)
mkdir(BinDeps.downloadsdir(shoco))
end
sha = "4dee0fc850cdec2bdb911093fe0a6a56e3623b71"
provides(Sources, URI("https://github.com/Ed-von-Schleck/shoco/archive/$(sha).zip"), shoco,
unpacked_dir="shoco-$sha")
provides(BuildProcess, (@build_steps begin
GetSources(shoco)
CreateDirectory(joinpath(BinDeps.builddir(shoco), "shoco"))
@build_steps begin
ChangeDirectory(joinpath(BinDeps.builddir(shoco), "shoco"))
FileRule(joinpath(libdir(shoco), "shoco." * BinDeps.shlib_ext), @build_steps begin
CreateDirectory(libdir(shoco))
CCompile(joinpath(srcdir(shoco), "shoco-$sha", "shoco.c"),
joinpath(libdir(shoco), "shoco." * BinDeps.shlib_ext),
["-fPIC", "-std=c99", is_apple() ? "-dynamiclib" : "-shared"], [])
end)
end
end), shoco)
@BinDeps.install Dict(:shoco => :shoco)
|
{"hexsha": "0d6c583647b3cbd3cd2f00a44edff8d49ab3234a", "size": 1269, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deps/build.jl", "max_stars_repo_name": "JuliaPackageMirrors/Shoco.jl", "max_stars_repo_head_hexsha": "37931aac0afe4e669283d2282ffde2f0f3de757f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deps/build.jl", "max_issues_repo_name": "JuliaPackageMirrors/Shoco.jl", "max_issues_repo_head_hexsha": "37931aac0afe4e669283d2282ffde2f0f3de757f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deps/build.jl", "max_forks_repo_name": "JuliaPackageMirrors/Shoco.jl", "max_forks_repo_head_hexsha": "37931aac0afe4e669283d2282ffde2f0f3de757f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.511627907, "max_line_length": 91, "alphanum_fraction": 0.6737588652, "num_tokens": 378}
|
SUBROUTINE cast_shadow_main( &
dem_data, solar_data, sazi_data, &
dresx, dresy, spheroid, alat1, alon1, &
Aoff_x1, Aoff_x2, Aoff_y1, Aoff_y2, &
nlA_ori, nsA_ori, &
is_utm, &
nrow, ncol, nl, ns, dem_nr, dem_nc, &
a, solar, sazi, dem, alat, alon, mask, &
ierr, mask_all)
implicit none
! NOTE THAT THIS CODE CANNOT BE COMPILED WITH -O3, AS IT PRODUCES
! VERY DIFFERENT RESULTS WHEN IT IS. The only other optimisation
! level I have tried is -O0.
!
! Program to calculate cast shadow for a standard Landsat scene
! the program was originally written by DLB Jupp in Oct. 2010
! for a small sub_matrix and was modified by Fuqin Li in Oct.
! 2010 so that the program can be used for large landsat scene.
!
! Basically, a sub-matrix A is embedded in a larger DEM image
! and the borders must be large enough to find the shaded pixels.
! If we assume the solar azimuth and zenith angles change very
! little within the sub-matrix A, then the Landsat scene can be
! divided into several sub_matrix.
! For Australian region, with 0.00025 degree resolution, the
! sub-marix A is set to 500x500
!
! we also need to set extra DEM lines/columns to run the Landsat
! scene. This will change with elevation difference within the
! scene and solar zenith angle. For Australian region and Landsat
! scene with 0.00025 degree resolution, the maximum extra lines
! are set to 250 pixels/lines for each direction. This figure
! shold be sufficient for everywhere and anytime in Australia.
! thus the DEM image will be larger than landsat image for
! 500 lines x 500 columns
!
! Current program operates in all 4 Azimuth cases (the four quadrants)
!
! Arguments
! ==========
! dem_data is the dem data.
! solar_data is the solar zenith angle data.
! azi_data is the solar azimuth angle data.
! nrow and ncol are the number of rows and columns in the region.
! nl and ns are the number of rows and columns in the dem data
! dem_nr and dem_nc are the number of rows and columns in 'one chunk' of a submatrix (includes the boundary padding).
! dresx is the x cell size.
! dresy is the y cell size.
! spheroid is the spheroidal parameters.
! 1. Spheroid major axis
! 2. Inverse flattening
! 3. Eccentricity squared
! 4. Earth rotational angular velocity rad/sec
!
! ierr provides a spot for the a return error code.
! alat and alon are the lattitude and longitude of the origin of the region.
! nlA_ori, nsA_ori are the sub-matrix lines and columns.
! is_utm are the inputs in UTM (.true. == 'yes').
! mask_all holds the result mask.
! Aoff_x1 is the pixel number before the Landsat image and Aoff_x2 is pixel number after the Landsat image
! Aoff_y1 is the line number before the Landsat image starts and Aoff_y2 is line number after teh Landsat image end
!
! Errors
! ======
! in 20s to 50s: errors indexing various arrays.
! in 60s: set_border
! in 70s: get_proj_shadows
! ierr = 61: 'azimuth case not possible - phi_sun must be in 0 to 360 deg'
! ierr = 62: 'k_max gt k_setting'
! ierr = 63: 'add outside add_max ranges'
! ierr = 71: 'Parameters defining A are invalid'
! ierr = 72: 'Matrix A not embedded in image'
! ierr = 73: 'matrix A does not have sufficient y buffer'
! ierr = 74: 'matrix A does not have sufficient x buffer'
integer*4 k_setting
parameter (k_setting=1500)
! arguments
real*4 dem_data(nl, ns) !
real*4 solar_data(nrow, ncol) !
real*4 sazi_data(nrow, ncol) !
real*8 spheroid(4)
real*8 dresx, dresy, alat1, alon1
integer*4 Aoff_x1, Aoff_x2, Aoff_y1, Aoff_y2
integer*4 nlA_ori, nsA_ori
logical is_utm
integer*4 nrow, ncol, dem_nr, dem_nc, nl, ns
real*4 a(dem_nr, dem_nc) !
real*4 solar(nlA_ori, ncol) !
real*4 sazi(nlA_ori, ncol) !
real*4 dem(dem_nr, ns) !
real*8 alat(nlA_ori) !
real*8 alon(ncol) !
integer*2 mask(nlA_ori, nsA_ori) !
integer*2 mask_all(nrow, ncol) !
integer*4 ierr
! internal variables
integer*4 nsA, nlA, nchf, i, j, ii, jj
integer*4 k, l, kkx, kky, nmax_sub, Mmax_sub
integer istat
real*4 n_inc(k_setting) !
real*4 m_inc(k_setting) !
real*4 h_offset(k_setting) !
real*8 hx, hy
real*4 zmax, zmin
real*4 phi_sun
real*4 sun_zen
real*4 htol
logical exists
real pi, r2d, d2r, eps
common/base/pi,r2d,d2r,eps
!f2py intent(in) dem_data, solar_data, sazi_data
!f2py intent(in) dresx, dresy, spheroid, alat1, alon1
!f2py intent(in) Aoff_x1, Aoff_x2, Aoff_y1, Aoff_y2
!f2py intent(in) nlA_ori, nsA_ori
!f2py intent(in) is_utm
!f2py integer intent(hide),depend(solar_data) :: nrow=shape(solar_data,0), ncol=shape(solar_data,1)
!f2py integer intent(hide),depend(dem_data) :: nl=shape(dem_data,0), ns=shape(dem_data,1)
!f2py integer intent(hide),depend(Aoff_y1,nlA_ori,Aoff_y2,Aoff_x1,nsA_ori,Aoff_x2) :: dem_nr=Aoff_y1+nlA_ori+Aoff_y2, dem_nc=Aoff_x1+nsA_ori+Aoff_x2
!f2py intent(hide) :: a, solar, sazi, dem, alat, alon, mask
!f2py intent(out) ierr
!f2py intent(out) mask_all
! set basic constants
pi=4.0*atan(1.0)
r2d=180.0/pi
d2r=pi/180.0
eps=1.0e-7
! set the tolerance for occlusion in metres
! (usually >0 and less than 10.0m)
htol=1.0
if(is_utm) then
hx = dresx
hy = dresy
else
! calculate longitude for each pixel of the line
do j=1,ncol
alon(j)=alon1+(j-1)*dresx
enddo
endif
!--------------------------------------------------------------
! kky for line and kkx for column
! kky and kkx are the sub_marix number
kky=int(nrow/nlA_ori)
kkx=int(ncol/nsA_ori)
!write(*,*)"about to start main loop"
do k=1, kky
! calculate sub_marix DEM dimensions
nlA=nlA_ori
mmax_sub=nlA+Aoff_y1+Aoff_y2
! not sure we really need to copy the array here - perhaps we could just index more cleverly below.
!write(*,*)"about to copy dem data"
!----------------------------bounds check----------------------------
!dem(dem_nr, ns)
!dem_data(nl, ns)
if(mmax_sub .gt. dem_nr) then
ierr = 20
return
endif
if((k-1)*nlA_ori+mmax_sub .gt. nl) then
ierr = 21
return
endif
!--------------------------end bounds check--------------------------
do j=1,ns
do i=1,mmax_sub
dem(i, j) = dem_data((k-1)*nlA_ori+i, j)
end do
end do
zmax=maxval(dem(1:mmax_sub,1:ns))
zmin=minval(dem(1:mmax_sub,1:ns))
!write(*,*)"about to copy solar data"
!----------------------------bounds check----------------------------
!solar(nlA_ori, ncol)
!solar_data(nrow, ncol)
if(nlA .gt. nlA_ori) then
ierr = 22
return
endif
if((k-1)*nlA_ori+nlA .gt. nrow) then
ierr = 23
return
endif
!--------------------------end bounds check--------------------------
do j=1,ncol
do i=1,nlA
solar(i, j) = solar_data((k-1)*nlA_ori+i, j)
enddo
enddo
do j=1,ncol
do i=1,nlA
sazi(i, j) = sazi_data((k-1)*nlA_ori+i, j)
enddo
enddo
ii=nlA/2
if(.not.is_utm) then
! calculate latitude for each line
do i=1,nlA
alat(i)=alat1-((k-1)*nlA_ori+i-1)*dresy
enddo
call geo2metres_pixel_size(alat(ii), dresx, dresy, &
spheroid, hx, hy, istat)
endif
! divide seveal sub_matrix according to columns
!write(*,*)"about to start cols: kkx = ",kkx
do l=1,kkx
nsA=nsA_ori
nmax_sub=nsA+Aoff_x1+Aoff_x2
jj=(l-1)*nsA_ori+nsA/2
phi_sun=sazi(ii,jj)
! NOTE zenith + 3 degrees
sun_zen=solar(ii,jj)+3
!----------------------------bounds check----------------------------
!dem(dem_nr, ns)
!a(dem_nr, dem_nc)
if(mmax_sub .gt. dem_nr) then
ierr = 24
return
endif
if((l-1)*nsA_ori+nmax_sub .gt. ns) then
ierr = 25
return
endif
if(nmax_sub .gt. dem_nc) then
ierr = 26
return
endif
!--------------------------end bounds check--------------------------
do j=1,nmax_sub
do i=1,mmax_sub
a(i,j)=dem(i,(l-1)*nsA_ori+j)
enddo
enddo
!write(*,*)"about to call get_proj_shadows"
call get_proj_shadows(hx, hy, nmax_sub, mmax_sub, &
htol, phi_sun, sun_zen, zmax, zmin, a, mask, h_offset, &
n_inc, m_inc, Aoff_x1, Aoff_y1, nsA, nlA, k_setting, &
dem_nr, dem_nc, nlA_ori, nsA_ori, ierr)
!----------------------------bounds check----------------------------
!mask(nlA_ori, nsA_ori)
!mask_all(nrow, ncol)
if((k-1)*nlA_ori+nlA .gt. nrow) then
ierr = 27
return
endif
if((l-1)*nsA_ori+nsA .gt. ncol) then
ierr = 28
return
endif
if(nlA .gt. nlA_ori) then
ierr = 29
return
endif
if(nsA .gt. nsA_ori) then
ierr = 30
return
endif
!--------------------------end bounds check--------------------------
do j=1,nsA
do i=1,nlA
mask_all((k-1)*nlA_ori+i,(l-1)*nsA_ori+j)=mask(i,j)
enddo
enddo
enddo
! last column block (if required)
!write(*,*)"about to do last cols"
if (ncol .gt. kkx*nsA_ori) then
nsA=ncol-kkx*nsA_ori
nmax_sub=nsA+Aoff_x1+Aoff_x2
jj=kkx*nsA_ori+nsA/2
phi_sun=sazi(ii,jj)
! NOTE zenith + 3 degrees
sun_zen=solar(ii,jj)+3
!----------------------------bounds check----------------------------
!dem(dem_nr, ns)
!a(dem_nr, dem_nc)
if(mmax_sub .gt. dem_nr) then
ierr = 31
return
endif
if(nmax_sub .gt. dem_nc) then
ierr = 32
return
endif
if(kkx*nsA_ori+nmax_sub .gt. ns) then
ierr = 33
return
endif
!--------------------------end bounds check--------------------------
do i=1,mmax_sub
do j=1,nmax_sub
a(i,j)=dem(i,kkx*nsA_ori+j)
enddo
enddo
call get_proj_shadows(hx, hy, nmax_sub, mmax_sub, &
htol, phi_sun, sun_zen, zmax, zmin, a, mask, h_offset, &
n_inc, m_inc, Aoff_x1, Aoff_y1, nsA, nlA, k_setting, &
dem_nr, dem_nc, nlA_ori, nsA_ori, ierr)
!----------------------------bounds check----------------------------
!mask(nlA_ori, nsA_ori)
!mask_all(nrow, ncol)
if((k-1)*nlA_ori+nlA .gt. nrow) then
ierr = 34
return
endif
if(nlA .gt. nlA_ori) then
ierr = 35
return
endif
if(kkx*nsA_ori+nsA .gt. ncol) then
ierr = 36
return
endif
if(nsA .gt. nsA_ori) then
ierr = 37
return
endif
!--------------------------end bounds check--------------------------
do i=1,nlA
do j=1,nsA
mask_all((k-1)*nlA_ori+i,kkx*nsA_ori+j)=mask(i,j)
enddo
enddo
endif
enddo
!write(*,*)"about to do last rows"
! do the last rows (if required)
if (nrow .gt. kky*nlA_ori) then
nlA=nrow-kky*nlA_ori
mmax_sub=nlA+Aoff_y1+Aoff_y2
! not sure we really need to copy the array here - perhaps we could just index more cleverly below.
!----------------------------bounds check----------------------------
!dem(dem_nr, ns)
!dem_data(nl, ns)
if(mmax_sub .gt. dem_nr) then
ierr = 38
return
endif
if(kky*nlA_ori+mmax_sub .gt. nl) then
ierr = 39
return
endif
!--------------------------end bounds check--------------------------
do i=1,mmax_sub
do j=1,ns
dem(i, j) = dem_data(kky*nlA_ori+i, j)
end do
end do
zmax=maxval(dem(1:mmax_sub,1:ns))
zmin=minval(dem(1:mmax_sub,1:ns))
!----------------------------bounds check----------------------------
!solar(nlA_ori, ncol)
!solar_data(nrow, ncol)
if(nlA .gt. nlA_ori) then
ierr = 40
return
endif
if(kky*nlA_ori+nlA .gt. nrow) then
ierr = 41
return
endif
!--------------------------end bounds check--------------------------
do i=1,nlA
do j=1,ncol
solar(i,j) = solar_data(kky*nlA_ori+i, j)
enddo
do j=1,ncol
sazi(i,j) = sazi_data(kky*nlA_ori+i, j)
enddo
enddo
ii=nlA/2
if(.not.is_utm) then
! calculate latitude and longitude for sub_matrix
do i=1,nlA
alat(i)=alat1-(kky*nlA_ori+i-1)*dresy
enddo
call geo2metres_pixel_size(alat(ii), dresx, dresy, &
spheroid, hx, hy, istat)
endif
! divide seveal sub_matrix according to columns
do l=1,kkx
nsA=nsA_ori
nmax_sub=nsA+Aoff_x1+Aoff_x2
jj=(l-1)*nsA_ori+nsA/2
phi_sun=sazi(ii,jj)
! NOTE zenith + 3 degrees
sun_zen=solar(ii,jj)+3
!----------------------------bounds check----------------------------
!dem(dem_nr, ns)
!a(dem_nr, dem_nc)
if(mmax_sub .gt. dem_nr) then
ierr = 42
return
endif
if(nmax_sub .gt. dem_nc) then
ierr = 43
return
endif
if((l-1)*nsA_ori+nmax_sub .gt. ns) then
ierr = 44
return
endif
!--------------------------end bounds check--------------------------
do i=1,mmax_sub
do j=1,nmax_sub
a(i,j)=dem(i,(l-1)*nsA_ori+j)
enddo
enddo
call get_proj_shadows(hx, hy, nmax_sub, mmax_sub, &
htol, phi_sun, sun_zen, zmax, zmin, a, mask, h_offset, &
n_inc, m_inc, Aoff_x1, Aoff_y1, nsA, nlA, k_setting, &
dem_nr, dem_nc, nlA_ori, nsA_ori, ierr)
!----------------------------bounds check----------------------------
!mask(nlA_ori, nsA_ori)
!mask_all(nrow, ncol)
if(kky*nlA_ori+nlA .gt. nrow) then
ierr = 45
return
endif
if(nlA .gt. nlA_ori) then
ierr = 46
return
endif
if((l-1)*nsA_ori+nsA .gt. ncol) then
ierr = 47
return
endif
if(nsA .gt. nsA_ori) then
ierr = 48
return
endif
!--------------------------end bounds check--------------------------
do i=1,nlA
do j=1,nsA
mask_all(kky*nlA_ori+i,(l-1)*nsA_ori+j)=mask(i,j)
enddo
enddo
enddo
if (ncol .gt. kkx*nsA_ori) then
nsA=ncol-kkx*nsA_ori
nmax_sub=nsA+Aoff_x1+Aoff_x2
jj=kkx*nsA_ori+nsA/2
phi_sun=sazi(ii,jj)
! NOTE zenith + 3 degrees
sun_zen=solar(ii,jj)+3
!----------------------------bounds check----------------------------
!dem(dem_nr, ns)
!a(dem_nr, dem_nc)
if(mmax_sub .gt. dem_nr) then
ierr = 49
return
endif
if(nmax_sub .gt. dem_nc) then
ierr = 50
return
endif
if(kkx*nsA_ori+nmax_sub .gt. ns) then
ierr = 51
return
endif
!--------------------------end bounds check--------------------------
do i=1,mmax_sub
do j=1,nmax_sub
a(i,j)=dem(i,kkx*nsA_ori+j)
enddo
enddo
call get_proj_shadows(hx, hy, nmax_sub, mmax_sub, &
htol, phi_sun, sun_zen, zmax, zmin, a, mask, h_offset, &
n_inc, m_inc, Aoff_x1, Aoff_y1, nsA, nlA, k_setting, &
dem_nr, dem_nc, nlA_ori, nsA_ori, ierr)
!----------------------------bounds check----------------------------
!mask(nlA_ori, nsA_ori)
!mask_all(nrow, ncol)
if(kky*nlA_ori+nlA .gt. nrow) then
ierr = 52
return
endif
if(nlA .gt. nlA_ori) then
ierr = 53
return
endif
if(kkx*nsA_ori+nsA .gt. ncol) then
ierr = 54
return
endif
if(nsA .gt. nsA_ori) then
ierr = 55
return
endif
!--------------------------end bounds check--------------------------
do i=1,nlA
do j=1,nsA
mask_all(kky*nlA_ori+i,kkx*nsA_ori+j)=mask(i,j)
enddo
enddo
endif
endif
!write(*,*)"nrow = ",nrow
!write(*,*)"ncol = ",ncol
!write(*,*)"nlA_ori = ",nlA_ori
!write(*,*)"nsA_ori = ",nsA_ori
!write(*,*)"nl = ",nl
!write(*,*)"ns = ",ns
!write(*,*)"dem_nr = ",dem_nr
!write(*,*)"dem_nc = ",dem_nc
!write(*,*)"Aoff_x1 = ",Aoff_x1
!write(*,*)"Aoff_x2 = ",Aoff_x2
!write(*,*)"Aoff_y1 = ",Aoff_y1
!write(*,*)"Aoff_y2 = ",Aoff_y2
!write(*,*)"kky = ",kky
!write(*,*)"kkx = ",kkx
!write(*,*)"end of shade_main_landsat_pixel"
END SUBROUTINE cast_shadow_main
|
{"hexsha": "cec33bf722af55b7b1ebd7655f23acdd04d07472", "size": 18270, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "wagl/f90_sources/cast_shadow_main.f90", "max_stars_repo_name": "Oceancolour-RG/wagl", "max_stars_repo_head_hexsha": "f002a1c0a373d21758d44d2a808bdfd755d90226", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2018-05-30T23:42:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-25T14:21:46.000Z", "max_issues_repo_path": "wagl/f90_sources/cast_shadow_main.f90", "max_issues_repo_name": "Oceancolour-RG/wagl", "max_issues_repo_head_hexsha": "f002a1c0a373d21758d44d2a808bdfd755d90226", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2018-02-20T05:31:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-23T23:38:15.000Z", "max_forks_repo_path": "wagl/f90_sources/cast_shadow_main.f90", "max_forks_repo_name": "Oceancolour-RG/wagl", "max_forks_repo_head_hexsha": "f002a1c0a373d21758d44d2a808bdfd755d90226", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-02-20T05:08:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T23:16:41.000Z", "avg_line_length": 32.625, "max_line_length": 148, "alphanum_fraction": 0.4969348659, "num_tokens": 5386}
|
"""
Module report
================
A module with helper functions for computing pre-defined plots for the analysis
of fragment combinations.
"""
import warnings
import logging
import argparse
import sys
from shutil import rmtree
from datetime import datetime
import re
from pathlib import Path
from collections import OrderedDict
# data handling
import numpy as np
import json
import pandas as pd
from pandas import DataFrame
import networkx as nx
# data visualization
from matplotlib import pyplot as plt
import seaborn as sns
# from pylab import savefig
from adjustText import adjust_text
# chemoinformatics
import rdkit
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import Mol
from rdkit.Chem import rdChemReactions
# docs
from typing import List
from typing import Tuple
from rdkit import RDLogger
# custom libraries
import npfc
from npfc import utils
from npfc import load
from npfc import save
from npfc import fragment_combination
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLOBALS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# test
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CLASSES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
class ReporterProcess:
def __init__(self,
chunk_load: str,
chunk_std_passed: str,
chunk_std_filtered: str,
chunk_std_error: str,
chunk_dedupl: str,
WD_out: str,
max_examples: int = 1,
):
pass
class ReporterFragmentSearch:
def __init__(self):
pass
class ReporterFragmentCombination:
def __init__(self):
pass
class ReporterFragmentCombinationGraph:
def __init__(self):
pass
class ReporterPNP:
def __init__(self):
pass
def _parse_std_chunks(chunks: List[str]) -> DataFrame:
"""Parse all output files of a category (passed, filtered or error) for the std step and return a corresponding a results summary.
:param chunks: output files for a category of std results
:return: summary DF with counts
"""
# parse all files
dfs = []
for c in chunks:
df = pd.read_csv(c, sep="|", compression="gzip").groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
if len(df.index) > 0:
dfs.append(df)
# if no case was found, return an empty dataframe
if len(dfs) == 0:
df = pd.DataFrame([], columns=["Count", "Category"])
return df
# concatenate all dataframes and compute the sum of all counts
df = pd.concat(dfs)
df["Category"] = df.index # I don't know how to group by index!
df = df.groupby("Category").sum()
df["Category"] = df.index.map(lambda x: x.replace('filter_', ''))
# df['Perc_status'] = df['Count'].map(lambda x: f"{x/tot_mols:.2%}")
return df
def preprocess(input_load: str,
output_file: str,
input_std_passed: str = None,
input_std_filtered: str = None,
input_std_error: str = None,
input_dedupl: str = None,
input_depict: str = None,
num_examples: int = 0,
):
"""The information is not looked everywhere using the same logic:
- input_load: the log file from the chunk being loaded
"""
# load
df = pd.read_csv(input_load, sep="@", header=None) # char not found in the log file
records = df[df[0].str.contains("FAILURE")].iloc[0][0].split()
num_total = int(records[6])
num_errors = int(records[9])
num_passed = int(df[df[0].str.contains("SAVED")].iloc[0][0].split()[6])
if num_total != num_errors + num_passed:
raise ValueError(f"Error during parsing of log file: '{input_load}': {num_passed} + {num_errors} != {num_total}")
df_load = DataFrame({'Category': ['loaded', 'cannot_load'], 'Count': [num_passed, num_errors]})
# standardize
df_std_passed = load.file(input_std_passed, decode=False)[['task', 'status']].groupby("task").count()[['status']].reset_index().rename({'task': 'Category', 'status': 'Count'}, axis=1)
df_std_filtered = load.file(input_std_filtered, decode=False)[['task', 'status']].groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
df_std_error = load.file(input_std_error, decode=False)[['task', 'status']].groupby("task").count()[['status']].rename({'status': 'Count'}, axis=1)
# dedupl
df = pd.read_csv(input_dedupl, sep="@", header=None) # char not found in the log file so we can extract all lines as one column
num_passed, num_total = [int(x) for x in df[df[0].str.contains("REMAINING MOLECULES")].iloc[0][0].split("MOLECULES:")[1].split("/")]
num_filtered = num_total - num_passed
df_dedupl = pd.DataFrame({'Category': ['unique', 'duplicate'], 'Count': [num_passed, num_filtered]})
# depict
pass # nothing to do here at the moment since I never saw any problem at this step
# merge data
def get_df_dedupl(WD: str) -> DataFrame:
"""Get a DF summarizing the results of the deduplication step.
:param WD: the directory of the std step
:return: a DF summarizing results of the deduplication step
"""
logger.info("PREP -- COMPUTING DEDUPL RESULTS")
# iterate over the log files to count status
pattern = ".*([0-9]{3})?_dedupl.log"
chunks = _get_chunks(f"{WD}/log", pattern)
chunks = [c for c in chunks if c.split('.')[-1] == 'log'] # ###### quick and dirty
# print(f"{chunks=}")
logger.info(f"PREP -- FOUND {len(chunks):,d} CHUNKS")
# initiate counts
num_tot = 0
num_passed = 0
num_filtered = 0
for c in chunks:
df = pd.read_csv(c, sep="@", header=None) # char not found in the log file so we can extract all lines as one column
passed, total = [int(x) for x in df[df[0].str.contains("REMAINING MOLECULES")].iloc[0][0].split("MOLECULES:")[1].split("/")]
num_passed += passed
num_filtered += total - passed
num_tot += total
# create a dataframe with counts
df = pd.DataFrame({'Category': ['unique', 'duplicate'], 'Count': [num_passed, num_filtered]})
logger.info(f"PREP -- RESULTS FOR DEDUPL:\n\n{df}\n")
return df
def get_dfs_prep_frags(WD: str) -> Tuple[DataFrame]:
"""
Same as dfs_prep but adapted to the fragments protocol.
The latter differs from other protocols in several ways:
- no deglycoslyation step
- 2 competiting murcko scaffold extraction schemes:
- A: load > murcko > std
- B: load > std > murcko > stdms
- concatenation of A and B to prioritize B over A
(with std without filters)
"""
p = Path(WD)
WD_LOAD = [str(x) for x in list(p.glob("*_load"))][0]
WD_STD = [str(x) for x in list(p.glob("*_std*"))]
print(WD_STD)
WD_DEDUPL = [str(x) for x in list(p.glob("*_dedupl"))][0]
sys.exit(0)
def get_dfs_prep(WD: str) -> Tuple[DataFrame]:
"""Get a list of DFs summarizing the whole preprocess superstep: load, deglyco, std and dedupl.
- DF_deglyco is the detailed summary of deglycosylation appended with the number of mols that did not get processed because they could not be loaded (NA).
- DF_prep_filtered is the detailed summary of std and dedupl
- DF_prep_error is the detailed summary of std and load
- DF_prep_all is the general summary with the final number of passed, filtered and error molecules.
:param WD: the main directory of the dataset data (i.e. 'natural/dnp/data')
:return: a list of DFs of interest: [DF_deglyco, DF_prep_filtered, DF_prep_error, DF_prep_all]
"""
logger.info("PREP -- COMPUTE RESULTS FOR PREPROCESS")
logger.info("PREP -- PROPRESS CONTAINS LOAD, DEGLYCO, STD AND DEDUPL STEPS")
# define subfolders
p = Path(WD)
WD_LOAD = [str(x) for x in list(p.glob("*_load"))][0]
# there is no deglyco step anymore for fragments
WD_DEGLYCO = [str(x) for x in list(Path(WD).glob("*_deglyco"))]
if len(WD_DEGLYCO) < 1:
logging.debug(f"NO DEGLYCO STEP COULD BE FOUND AT '{WD}'")
WD_DEGLYCO = ''
else:
WD_DEGLYCO = WD_DEGLYCO[0]
WD_STD = [str(x) for x in list(p.glob("*_std"))][0]
WD_DEDUPL = [str(x) for x in list(p.glob("*_dedupl"))][0]
# get dfs
df_load = get_df_load(WD_LOAD)
df_deglyco = get_df_deglyco(WD_DEGLYCO)
df_std_passed = get_df_std_passed(WD_STD)
df_std_filtered = get_df_std_filtered(WD_STD)
df_std_error = get_df_std_error(WD_STD)
df_dedupl = get_df_dedupl(WD_DEDUPL)
# get total of molecules in input
num_mols_tot = df_load['Count'].sum()
if WD_DEGLYCO != '':
# count not loaded molecules as well in deglyco
num_mols_deglyco_na = df_load[df_load['Category'] == 'cannot_load']['Count']
df_deglyco = pd.concat([df_deglyco, pd.DataFrame({'Category': ['NA'], 'Count': [num_mols_deglyco_na]})])
df_deglyco.reset_index(inplace=True, drop=True)
df_deglyco['Count'] = df_deglyco['Count'].astype(int)
df_deglyco['Perc_Status'] = df_deglyco['Count'].map(lambda x: f"{x/num_mols_tot:.2%}")
logger.info(f"PREP -- RESULTS FOR DEGLYCOSYLATION:\n\n{df_deglyco}\n")
else:
df_deglyco = None
# gather all filtered molecules
df_dedupl_dupl = df_dedupl[df_dedupl['Category'] == 'duplicate']
num_dedupl_dupl = df_dedupl_dupl['Count'].sum()
df_std_filtered = pd.concat([df_std_filtered, df_dedupl_dupl], sort=True)
# count even unoccurred cases in df_std_filtered
filters = ['empty', 'hac', 'molweight', 'nrings', 'medchem', 'timeout', 'duplicate']
df_std_filtered.set_index('Category', inplace=True)
df_std_filtered = df_std_filtered.reindex(filters)
df_std_filtered.reset_index(inplace=True)
df_std_filtered.fillna(0, inplace=True)
df_std_filtered['Count'] = df_std_filtered['Count'].astype(int)
df_std_filtered['Perc_Status'] = df_std_filtered['Count'].map(lambda x: f"{x/num_mols_tot:.2%}")
logger.info(f"PREP -- RESULTS FOR STD_FILTERED:\n\n{df_std_filtered}\n")
# gather all molecules that raised an error
df_std_error = pd.concat([df_std_error, df_load[df_load['Category'] == 'cannot_load']], sort=True)
# count even unoccurred cases in df_std_error
errors = ['cannot_load', 'initiate_mol', 'disconnect_metal', 'sanitize', 'remove_isotopes', 'normalize', 'uncharge', 'canonicalize', 'remove_stereo']
df_std_error.set_index('Category', inplace=True)
df_std_error = df_std_error.reindex(errors)
df_std_error.reset_index(inplace=True)
df_std_error.fillna(0, inplace=True)
df_std_error['Count'] = df_std_error['Count'].astype(int)
df_std_error['Perc_Status'] = df_std_error['Count'].map(lambda x: f"{x/num_mols_tot:.2%}")
logger.info(f"PREP -- RESULTS FOR STD_ERRORS:\n\n{df_std_error}\n")
# general count for passed/filtered/errors
num_tot_filtered = df_std_filtered['Count'].sum()
num_tot_passed = df_std_passed['Count'].sum() - num_dedupl_dupl # dedupl happens after std, so std contains passsed mols that get filtered
num_tot_errors = df_std_error['Count'].sum()
df_std_all = pd.DataFrame({'Category': ['passed', 'filtered', 'errors'], 'Count': [num_tot_passed, num_tot_filtered, num_tot_errors]})
df_std_all['Perc_Status'] = df_std_all['Count'].map(lambda x: f"{x/num_mols_tot:.2%}")
logger.info(f"PREP -- RESULTS FOR STD_ALL:\n\n{df_std_all}\n")
return {'df_prep_overview': df_std_all, 'df_prep_filtered': df_std_filtered, 'df_prep_error': df_std_error, 'df_prep_deglyco': df_deglyco}
|
{"hexsha": "62b3838c10149d00d3030d186d99b63fdf3343ca", "size": 11627, "ext": "py", "lang": "Python", "max_stars_repo_path": "npfc/report.py", "max_stars_repo_name": "mpimp-comas/npfc", "max_stars_repo_head_hexsha": "316156a8826759d767ef16833cd4f0670868693e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-07-22T07:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T11:23:53.000Z", "max_issues_repo_path": "npfc/report.py", "max_issues_repo_name": "mpimp-comas/npfc", "max_issues_repo_head_hexsha": "316156a8826759d767ef16833cd4f0670868693e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-08-10T11:48:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-13T07:53:42.000Z", "max_forks_repo_path": "npfc/report.py", "max_forks_repo_name": "mpimp-comas/npfc", "max_forks_repo_head_hexsha": "316156a8826759d767ef16833cd4f0670868693e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-22T07:05:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-13T11:03:43.000Z", "avg_line_length": 38.5, "max_line_length": 187, "alphanum_fraction": 0.6535649781, "include": true, "reason": "import numpy,import networkx", "num_tokens": 3156}
|
[STATEMENT]
lemma ACI_norm_PLUS:
"list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys \<Longrightarrow> \<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys \<Longrightarrow> \<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>
[PROOF STEP]
proof (induct rule: list_all2_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<guillemotleft>PLUS []\<guillemotright> = \<guillemotleft>PLUS []\<guillemotright>
2. \<And>x xs y ys. \<lbrakk>\<guillemotleft>x\<guillemotright> = \<guillemotleft>y\<guillemotright>; list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys; \<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>\<rbrakk> \<Longrightarrow> \<guillemotleft>PLUS (x # xs)\<guillemotright> = \<guillemotleft>PLUS (y # ys)\<guillemotright>
[PROOF STEP]
case (Cons x xs y ys)
[PROOF STATE]
proof (state)
this:
\<guillemotleft>x\<guillemotright> = \<guillemotleft>y\<guillemotright>
list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys
\<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>
goal (2 subgoals):
1. \<guillemotleft>PLUS []\<guillemotright> = \<guillemotleft>PLUS []\<guillemotright>
2. \<And>x xs y ys. \<lbrakk>\<guillemotleft>x\<guillemotright> = \<guillemotleft>y\<guillemotright>; list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys; \<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>\<rbrakk> \<Longrightarrow> \<guillemotleft>PLUS (x # xs)\<guillemotright> = \<guillemotleft>PLUS (y # ys)\<guillemotright>
[PROOF STEP]
hence "length xs = length ys"
[PROOF STATE]
proof (prove)
using this:
\<guillemotleft>x\<guillemotright> = \<guillemotleft>y\<guillemotright>
list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys
\<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>
goal (1 subgoal):
1. length xs = length ys
[PROOF STEP]
by (elim list_all2_lengthD)
[PROOF STATE]
proof (state)
this:
length xs = length ys
goal (2 subgoals):
1. \<guillemotleft>PLUS []\<guillemotright> = \<guillemotleft>PLUS []\<guillemotright>
2. \<And>x xs y ys. \<lbrakk>\<guillemotleft>x\<guillemotright> = \<guillemotleft>y\<guillemotright>; list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys; \<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>\<rbrakk> \<Longrightarrow> \<guillemotleft>PLUS (x # xs)\<guillemotright> = \<guillemotleft>PLUS (y # ys)\<guillemotright>
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
length xs = length ys
goal (1 subgoal):
1. \<guillemotleft>PLUS (x # xs)\<guillemotright> = \<guillemotleft>PLUS (y # ys)\<guillemotright>
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
length xs = length ys
\<guillemotleft>x\<guillemotright> = \<guillemotleft>y\<guillemotright>
list_all2 (\<lambda>r s. \<guillemotleft>r\<guillemotright> = \<guillemotleft>s\<guillemotright>) xs ys
\<guillemotleft>PLUS xs\<guillemotright> = \<guillemotleft>PLUS ys\<guillemotright>
goal (1 subgoal):
1. \<guillemotleft>PLUS (x # xs)\<guillemotright> = \<guillemotleft>PLUS (y # ys)\<guillemotright>
[PROOF STEP]
by (induct xs ys rule: list_induct2) auto
[PROOF STATE]
proof (state)
this:
\<guillemotleft>PLUS (x # xs)\<guillemotright> = \<guillemotleft>PLUS (y # ys)\<guillemotright>
goal (1 subgoal):
1. \<guillemotleft>PLUS []\<guillemotright> = \<guillemotleft>PLUS []\<guillemotright>
[PROOF STEP]
qed simp
|
{"llama_tokens": 1597, "file": "MSO_Regex_Equivalence_Pi_Derivatives", "length": 8}
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of CARS
# (see https://github.com/CNES/cars).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Test module for cars/steps/matching/sparse_matching.py
"""
# Standard imports
from __future__ import absolute_import
# Third party imports
import numpy as np
import pytest
# CARS imports
from cars.steps.epi_rectif import resampling
from cars.steps.matching import sparse_matching
# CARS Tests imports
from ...helpers import absolute_data_path
@pytest.mark.unit_tests
def test_dataset_matching():
"""
Test dataset_matching method
"""
region = [200, 250, 320, 400]
img1 = absolute_data_path("input/phr_reunion/left_image.tif")
img2 = absolute_data_path("input/phr_reunion/right_image.tif")
mask1 = absolute_data_path("input/phr_reunion/left_mask.tif")
mask2 = absolute_data_path("input/phr_reunion/right_mask.tif")
nodata1 = 0
nodata2 = 0
grid1 = absolute_data_path(
"input/preprocessing_input/left_epipolar_grid_reunion.tif"
)
grid2 = absolute_data_path(
"input/preprocessing_input/right_epipolar_grid_reunion.tif"
)
epipolar_size_x = 596
epipolar_size_y = 596
left = resampling.resample_image(
img1,
grid1,
[epipolar_size_x, epipolar_size_y],
region=region,
nodata=nodata1,
mask=mask1,
)
right = resampling.resample_image(
img2,
grid2,
[epipolar_size_x, epipolar_size_y],
region=region,
nodata=nodata2,
mask=mask2,
)
matches = sparse_matching.dataset_matching(left, right)
# Uncomment to update baseline
# np.save(absolute_data_path("ref_output/matches.npy"), matches)
matches_ref = np.load(absolute_data_path("ref_output/matches.npy"))
np.testing.assert_allclose(matches, matches_ref)
# Case with no matches
region = [0, 0, 2, 2]
left = resampling.resample_image(
img1,
grid1,
[epipolar_size_x, epipolar_size_y],
region=region,
nodata=nodata1,
mask=mask1,
)
right = resampling.resample_image(
img1,
grid1,
[epipolar_size_x, epipolar_size_y],
region=region,
nodata=nodata1,
mask=mask1,
)
matches = sparse_matching.dataset_matching(left, right)
assert matches.shape == (0, 4)
@pytest.mark.unit_tests
def test_remove_epipolar_outliers():
"""
Test remove epipolar outliers function
"""
matches_file = absolute_data_path(
"input/preprocessing_input/matches_reunion.npy"
)
matches = np.load(matches_file)
matches_filtered = sparse_matching.remove_epipolar_outliers(matches)
nb_filtered_points = matches.shape[0] - matches_filtered.shape[0]
assert nb_filtered_points == 2
@pytest.mark.unit_tests
def test_compute_disparity_range():
"""
Test compute disparity range function
"""
matches_file = absolute_data_path(
"input/preprocessing_input/matches_reunion.npy"
)
matches = np.load(matches_file)
matches_filtered = sparse_matching.remove_epipolar_outliers(matches)
dispmin, dispmax = sparse_matching.compute_disparity_range(matches_filtered)
assert dispmin == -3.1239416122436525
assert dispmax == 3.820396270751972
|
{"hexsha": "5322fc0ca4ad05ce1f6a140480be0473f49eaddb", "size": 3889, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/steps/matching/test_sparse_matching.py", "max_stars_repo_name": "fchapoton/cars", "max_stars_repo_head_hexsha": "c145e12c8b984d5c496c29cff474628044f6216e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 134, "max_stars_repo_stars_event_min_datetime": "2020-07-30T08:51:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:56:01.000Z", "max_issues_repo_path": "tests/steps/matching/test_sparse_matching.py", "max_issues_repo_name": "fchapoton/cars", "max_issues_repo_head_hexsha": "c145e12c8b984d5c496c29cff474628044f6216e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-09-15T19:27:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T08:11:36.000Z", "max_forks_repo_path": "tests/steps/matching/test_sparse_matching.py", "max_forks_repo_name": "fchapoton/cars", "max_forks_repo_head_hexsha": "c145e12c8b984d5c496c29cff474628044f6216e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-08-06T07:29:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:40:39.000Z", "avg_line_length": 26.8206896552, "max_line_length": 80, "alphanum_fraction": 0.7019799434, "include": true, "reason": "import numpy", "num_tokens": 965}
|
import numpy as np
import random
from cfDNA_Fragment import Fragment
class FragmentCopies:
"""
A class used to represent and run the assay process
...
Attributes
----------
fragment_copies : list
A storage of Fragment objects that is used for each step of the process
Methods
-------
cfDNA_convert(input = 15, genomic_equivalent = 330)
Converts the cfDNA input amount (ng) into its genomic equivalent
ligation_process(lig_efficiency = 0.5)
Link unique molecular barcodes to individual cfDNA fragments by running each fragment's ligation method
pcr(cycles = 8, pcr_efficiency = 1.8)
PCR pre-amplification during library preparation before target enrichment
target_enrichment(panel_size = 200, capture_efficiency = 0.5, cycles = 12, pcr_efficiency = 1.8)
Capture fragments within selected target regions and use PCR post-capture amplification
calculate():
Count the number of unique cfDNA fragments after entire process and calculate GE recovery rate and duplication rate
"""
fragment_amount_check = [] # Adds total fragment count to list after each simulation for graphing later
unique_count_check = [] # Adds unique fragment count to list after each simulation for graphing later
ge_recovery_rate_check = [] # Adds GE recovery rate to list after each simulation for graphing later
def __init__(self):
self.fragment_copies = []
def cfDNA_convert(self, input=15, genomic_equivalent=330):
"""
Parameters
----------
input : int
cfDNA input amount (ng)
Default = 15 ng
genomic_equivalent : int
Around 330 double stranded whole genome coverage per ng of cfDNA input
Default = 330
"""
for i in range(input * genomic_equivalent):
fragment = Fragment()
self.fragment_copies.append(fragment)
def ligation_process(self):
"""
Parameters
----------
lig_efficiency : float
Percentage of fragments that have unique barcode attached to them
Default = 0.5
"""
lig_efficiency = np.random.normal(0.5, 0.1)
list_size, i = int(lig_efficiency * len(self.fragment_copies)), 0
while i < list_size:
self.fragment_copies.pop(random.randrange(0, len(self.fragment_copies)))
i += 1
for i in self.fragment_copies:
i.ligation()
def pcr(self, cycles=8, pcr_efficiency=np.random.normal(1.8, 0.1)):
"""
Parameters
----------
cycles : int
Number of pcr cycles during pre-amplification
Default = 8 cycles
pcr_efficiency : float
Percentage that each fragment will get amplified by
Default = 1.8x
"""
list_size, i, count = len(self.fragment_copies), 0, int(pcr_efficiency ** cycles)
for i in range(list_size):
for j in range(count):
self.fragment_copies.append(self.fragment_copies[i])
def target_enrichment(self, panel_size=200, capture_efficiency=0.5, cycles=12, pcr_efficiency=np.random.normal(1.8, 0.1)):
"""
Parameters
----------
panel_size : int
Selected target region (kb) that fragments are captured from
Default = 200 kb
capture_efficiency : float
Percentage of fragments that are captured within target region
Default = 0.5
cycles : int
Number of pcr cycles during post-capture amplification
Default = 12 cycles
pcr_efficiency : float
Percentage that each fragment will get amplified by
Default = 1.8x
"""
list_size, i = int(capture_efficiency * len(self.fragment_copies)), 0
while i < list_size:
self.fragment_copies.pop(random.randrange(0, list_size - 1))
i += 1
self.pcr_process(cycles, pcr_efficiency)
def calculate(self):
fragment_amount = len(self.fragment_copies)
unique_count = len(set(x.barcode for x in self.fragment_copies))
ge_recovery_rate = unique_count/(15*330)
FragmentCopies.fragment_amount_check.append(fragment_amount)
FragmentCopies.unique_count_check.append(unique_count)
FragmentCopies.ge_recovery_rate_check.append(ge_recovery_rate)
|
{"hexsha": "f6790b670d26686b684c9b9d24272e50cca94980", "size": 4393, "ext": "py", "lang": "Python", "max_stars_repo_path": "fragment_copies.py", "max_stars_repo_name": "shen-y-eric/Genomic-Recovery-Simulation", "max_stars_repo_head_hexsha": "31e32e659c4e706bd9245b15f0181eee919d32e8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fragment_copies.py", "max_issues_repo_name": "shen-y-eric/Genomic-Recovery-Simulation", "max_issues_repo_head_hexsha": "31e32e659c4e706bd9245b15f0181eee919d32e8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fragment_copies.py", "max_forks_repo_name": "shen-y-eric/Genomic-Recovery-Simulation", "max_forks_repo_head_hexsha": "31e32e659c4e706bd9245b15f0181eee919d32e8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7835820896, "max_line_length": 126, "alphanum_fraction": 0.6462554063, "include": true, "reason": "import numpy", "num_tokens": 974}
|
import collections
import warnings
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import patches as plt_patches
from DLBio.helpers import safe_division
class IRectangle(object):
"""Abstract base class for a rectagle. Intended to specify the positions
of each dimension to reduce bugs.
"""
x_pos = 0
y_pos = 1
w_pos = 2
h_pos = 3
def __init__(self, **kwargs):
"""
Parameters
----------
confidence : float in [0, 1]
the value defines how certain this rectangle contains an object.
"""
self.id = kwargs.pop('id', None)
self.confidence = kwargs.pop('confidence', 1.0)
if len(kwargs.keys()) > 0:
warnings.warn(
'Found ununsed keys: {}'.format(kwargs.keys())
)
def __repr__(self):
return '{}: {}'.format(self.__class__.__name__, self.__dict__)
def add_id(self, id):
self.id = id
class TopLeftRectangle(IRectangle):
"""Rectangle containing pixel coordinates. x and y determine the top-left
point of the rectangle.
"""
def __init__(self, **kwargs):
self.x = kwargs.pop('x')
self.y = kwargs.pop('y')
self.h = kwargs.pop('h')
self.w = kwargs.pop('w')
super(TopLeftRectangle, self).__init__(**kwargs)
def estimate_jaccard_index(self, rectangle):
"""Compute the intersection of the object and another TopLeftRectangle
and divide it by the union.
Parameters
----------
rectangle : TopLeftRectangle
Returns
-------
jaccard_index : float
"""
# estimate position of intersection rectangle
left = max(self.x, rectangle.x)
top = max(self.y, rectangle.y)
right = min(self.x + self.w, rectangle.x + rectangle.w)
bottom = min(self.y + self.h, rectangle.y + rectangle.h)
a = max(right - left, 0)
b = max(bottom - top, 0)
# estimate areas
area_intersection = a * b
area_union = self.w * self.h + rectangle.w * rectangle.h - area_intersection
jaccard_index = safe_division(area_intersection, area_union)
return jaccard_index
def get_array(self):
output = np.zeros(4)
output[self.x_pos] = self.x
output[self.y_pos] = self.y
output[self.w_pos] = self.w
output[self.h_pos] = self.h
return output
def point_is_within_rectangle(self, x, y):
return x >= self.x and x <= self.x + self.w \
and y >= self.y and y <= self.y + self.h
def get_viewable(self, color=[255, 0, 0], type_id='unknown'):
"""Return a ViewableTopLeftRectangle with the current parameters.
Parameters
----------
color : list, optional
in which color shall the rectangle be displayed
(the default is [255, 0, 0], which is blue)
type_id : str, optional
string specifying the type of rectangle (e.g. positive, negative, etc.).
The default is 'unknown'.
Returns
-------
ViewableTopLeftRectangle
"""
input = self.__dict__
input.update({'color': color, 'type_id': type_id})
return ViewableTopLeftRectangle(**input)
def to_c_rectangle(self, h_norm, w_norm):
"""Turn the rectangle into a CenterNormalizedRectangle.
Pass the image size as well to know how the rectangle needs to
be scaled.
Parameters
----------
h_norm : float
scaling factor
w_norm : float
scaling factor
Returns
-------
CenterNormalizedRectangle
"""
if h_norm is None:
h_norm = float(self.h)
else:
if type(h_norm) != float:
raise ValueError('h_norm is not a float number')
if w_norm is None:
w_norm = float(self.w)
else:
if type(w_norm) != float:
raise ValueError('w_norm is not a float number')
cx = self.x + .5 * self.w
cy = self.y + .5 * self.h
return CenterNormalizedRectangle(
cx=cx,
cy=cy,
w=self.w / h_norm,
h=self.h / w_norm,
)
class ViewableTopLeftRectangle(TopLeftRectangle):
def __init__(self, **kwargs):
self.color = kwargs.pop('color', [255, 0, 0])
self.type_id = kwargs.pop('type_id', 'unknown')
super(ViewableTopLeftRectangle, self).__init__(**kwargs)
def add_cv_rectangle(self, image, color=None):
"""Draw a rectangle on the image via openCV
Parameters
----------
image : np.array
"""
if color is None:
color = self.color
cv2.rectangle(
image,
(self.x, self.y),
(self.x + self.w, self.y + self.h),
color
)
def get_pyplot_patch(self):
"""Return a pyplot platch that can by added to a plot axis.
Returns
-------
plt_patches
"""
xy = [self.x, self.y]
h_box = self.h
w_box = self.w
return plt_patches.Rectangle(
xy, w_box, h_box,
linewidth=1,
edgecolor=np.array(self.color).astype('float') / 255.0,
facecolor='none')
class CenterNormalizedRectangle(IRectangle):
"""Rectangle containing normalized positions (the typical output of a
single shot detector network). Furthermore, cx and cy define the center
of the rectangle.
"""
def __init__(self, **kwargs):
self.cx = kwargs.pop('cx')
self.cy = kwargs.pop('cy')
self.h = kwargs.pop('h')
self.w = kwargs.pop('w')
super(CenterNormalizedRectangle, self).__init__(**kwargs)
def get_normalized_top_left(self):
# function is needed only for ssd-label computations
x = self.cx - .5 * self.w
y = self.cy - .5 * self.h
return TopLeftRectangle(
x=x,
y=y,
w=self.w,
h=self.h
)
def to_rectangle(self, h_norm, w_norm):
"""Returns a topleft rectangle
Parameters
----------
h_norm, w_norm : float
The dimensions are multiplied by the current position values
to compute the pixel positions. Be careful to choose the right
value here!
Returns
-------
TopLeftRectangle
"""
# get top left normalized coordinate
x = self.cx - .5 * self.w
y = self.cy - .5 * self.h
# compute float normalized values to pixel coordinates
x_new = int(np.round(x * w_norm))
y_new = int(np.round(y * h_norm))
h_new = int(np.round(self.h * h_norm))
w_new = int(np.round(self.w * w_norm))
return TopLeftRectangle(
x=x_new,
y=y_new,
w=w_new,
h=h_new,
)
def estimate_jaccard_index(self, c_rectangle, h_norm, w_norm):
# special case for ssd label computations
if h_norm == 1.0 and w_norm == 1.0:
rect1 = self.get_normalized_top_left()
rect2 = c_rectangle.get_normalized_top_left()
else:
rect1 = self.to_rectangle(h_norm, w_norm)
rect2 = c_rectangle.to_rectangle(h_norm, w_norm)
return rect1.estimate_jaccard_index(rect2)
|
{"hexsha": "3a3f7eeb0c8125aad0296704e4e38198b196b8b9", "size": 7507, "ext": "py", "lang": "Python", "max_stars_repo_path": "ssd/rectangles.py", "max_stars_repo_name": "pgruening/dlbio", "max_stars_repo_head_hexsha": "0c4e468bcd5d7e298fbecba13003bcae36889486", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-08T11:14:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-08T11:14:48.000Z", "max_issues_repo_path": "ssd/rectangles.py", "max_issues_repo_name": "pgruening/dlbio", "max_issues_repo_head_hexsha": "0c4e468bcd5d7e298fbecba13003bcae36889486", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-03-24T18:01:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:17:24.000Z", "max_forks_repo_path": "ssd/rectangles.py", "max_forks_repo_name": "pgruening/dlbio", "max_forks_repo_head_hexsha": "0c4e468bcd5d7e298fbecba13003bcae36889486", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-29T10:31:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T10:31:28.000Z", "avg_line_length": 28.9845559846, "max_line_length": 84, "alphanum_fraction": 0.5600106567, "include": true, "reason": "import numpy", "num_tokens": 1726}
|
(* Title: Tarski's geometry
Author: Tim Makarios <tjm1983 at gmail.com>, 2012
Maintainer: Tim Makarios <tjm1983 at gmail.com>
*)
header "Tarski's geometry"
theory Tarski
imports Complex_Main Miscellany Metric
begin
subsection "The axioms"
text {* The axioms, and all theorems beginning with \emph{th}
followed by a number, are based on corresponding axioms and
theorems in \cite{schwabhauser}. *}
locale tarski_first3 =
fixes C :: "'p \<Rightarrow> 'p \<Rightarrow> 'p \<Rightarrow> 'p \<Rightarrow> bool" ("_ _ \<congruent> _ _" [99,99,99,99] 50)
assumes A1: "\<forall>a b. a b \<congruent> b a"
and A2: "\<forall>a b p q r s. a b \<congruent> p q \<and> a b \<congruent> r s \<longrightarrow> p q \<congruent> r s"
and A3: "\<forall>a b c. a b \<congruent> c c \<longrightarrow> a = b"
locale tarski_first5 = tarski_first3 +
fixes B :: "'p \<Rightarrow> 'p \<Rightarrow> 'p \<Rightarrow> bool"
assumes A4: "\<forall>q a b c. \<exists>x. B q a x \<and> a x \<congruent> b c"
and A5: "\<forall>a b c d a' b' c' d'. a \<noteq> b \<and> B a b c \<and> B a' b' c'
\<and> a b \<congruent> a' b' \<and> b c \<congruent> b' c' \<and> a d \<congruent> a' d' \<and> b d \<congruent> b' d'
\<longrightarrow> c d \<congruent> c' d'"
locale tarski_absolute_space = tarski_first5 +
assumes A6: "\<forall>a b. B a b a \<longrightarrow> a = b"
and A7: "\<forall>a b c p q. B a p c \<and> B b q c \<longrightarrow> (\<exists>x. B p x b \<and> B q x a)"
and A11: "\<forall>X Y. (\<exists>a. \<forall>x y. x \<in> X \<and> y \<in> Y \<longrightarrow> B a x y)
\<longrightarrow> (\<exists>b. \<forall>x y. x \<in> X \<and> y \<in> Y \<longrightarrow> B x b y)"
locale tarski_absolute = tarski_absolute_space +
assumes A8: "\<exists>a b c. \<not> B a b c \<and> \<not> B b c a \<and> \<not> B c a b"
and A9: "\<forall>p q a b c. p \<noteq> q \<and> a p \<congruent> a q \<and> b p \<congruent> b q \<and> c p \<congruent> c q
\<longrightarrow> B a b c \<or> B b c a \<or> B c a b"
locale tarski_space = tarski_absolute_space +
assumes A10: "\<forall>a b c d t. B a d t \<and> B b d c \<and> a \<noteq> d
\<longrightarrow> (\<exists>x y. B a b x \<and> B a c y \<and> B x t y)"
locale tarski = tarski_absolute + tarski_space
subsection "Semimetric spaces satisfy the first three axioms"
context semimetric
begin
definition smC :: "'p \<Rightarrow> 'p \<Rightarrow> 'p \<Rightarrow> 'p \<Rightarrow> bool" ("_ _ \<congruent>\<^sub>s\<^sub>m _ _" [99,99,99,99] 50)
where [simp]: "a b \<congruent>\<^sub>s\<^sub>m c d \<equiv> dist a b = dist c d"
end
sublocale semimetric < tarski_first3 smC
proof
from symm show "\<forall>a b. a b \<congruent>\<^sub>s\<^sub>m b a" by simp
show "\<forall>a b p q r s. a b \<congruent>\<^sub>s\<^sub>m p q \<and> a b \<congruent>\<^sub>s\<^sub>m r s \<longrightarrow> p q \<congruent>\<^sub>s\<^sub>m r s" by simp
show "\<forall>a b c. a b \<congruent>\<^sub>s\<^sub>m c c \<longrightarrow> a = b" by simp
qed
subsection "Some consequences of the first three axioms"
context tarski_first3
begin
notation %invisible C ("_ _ \<equiv> _ _" [99,99,99,99] 50)
lemma A1': "a b \<congruent> b a"
by (simp add: A1)
lemma A2': "\<lbrakk>a b \<congruent> p q; a b \<congruent> r s\<rbrakk> \<Longrightarrow> p q \<congruent> r s"
proof -
assume "a b \<congruent> p q" and "a b \<congruent> r s"
with A2 show ?thesis by blast
qed
lemma A3': "a b \<congruent> c c \<Longrightarrow> a = b"
by (simp add: A3)
theorem th2_1: "a b \<congruent> a b"
proof -
from A2' [of b a a b a b] and A1' [of b a] show ?thesis by simp
qed
theorem th2_2: "a b \<congruent> c d \<Longrightarrow> c d \<congruent> a b"
proof -
assume "a b \<congruent> c d"
with A2' [of a b c d a b] and th2_1 [of a b] show ?thesis by simp
qed
theorem th2_3: "\<lbrakk>a b \<congruent> c d; c d \<congruent> e f\<rbrakk> \<Longrightarrow> a b \<congruent> e f"
proof -
assume "a b \<congruent> c d"
with th2_2 [of a b c d] have "c d \<congruent> a b" by simp
assume "c d \<congruent> e f"
with A2' [of c d a b e f] and `c d \<congruent> a b` show ?thesis by simp
qed
theorem th2_4: "a b \<congruent> c d \<Longrightarrow> b a \<congruent> c d"
proof -
assume "a b \<congruent> c d"
with th2_3 [of b a a b c d] and A1' [of b a] show ?thesis by simp
qed
definition is_segment :: "'p set \<Rightarrow> bool" where
"is_segment X \<equiv> \<exists>x y. X = {x, y}"
definition segments :: "'p set set" where
"segments = {X. is_segment X}"
definition SC :: "'p set \<Rightarrow> 'p set \<Rightarrow> bool" where
"SC X Y \<equiv> \<exists>w x y z. X = {w, x} \<and> Y = {y, z} \<and> w x \<congruent> y z"
definition SC_rel :: "('p set \<times> 'p set) set" where
"SC_rel = {(X, Y) | X Y. SC X Y}"
lemma left_segment_congruence:
assumes "{a, b} = {p, q}" and "p q \<congruent> c d"
shows "a b \<congruent> c d"
proof cases
assume "a = p"
with unordered_pair_element_equality [of a b p q] and `{a, b} = {p, q}`
have "b = q" by simp
with `p q \<congruent> c d` and `a = p` show ?thesis by simp
next
assume "a \<noteq> p"
with `{a, b} = {p, q}` have "a = q" by auto
with unordered_pair_element_equality [of a b q p] and `{a, b} = {p, q}`
have "b = p" by auto
with `p q \<congruent> c d` and `a = q` have "b a \<congruent> c d" by simp
with th2_4 [of b a c d] show ?thesis by simp
qed
lemma right_segment_congruence:
assumes "{c, d} = {p, q}" and "a b \<congruent> p q"
shows "a b \<congruent> c d"
proof -
from th2_2 [of a b p q] and `a b \<congruent> p q` have "p q \<congruent> a b" by simp
with left_segment_congruence [of c d p q a b] and `{c, d} = {p, q}`
have "c d \<congruent> a b" by simp
with th2_2 [of c d a b] show ?thesis by simp
qed
lemma C_SC_equiv: "a b \<congruent> c d = SC {a, b} {c, d}"
proof
assume "a b \<congruent> c d"
with SC_def [of "{a, b}" "{c, d}"] show "SC {a, b} {c, d}" by auto
next
assume "SC {a, b} {c, d}"
with SC_def [of "{a, b}" "{c, d}"]
obtain w x y z where "{a, b} = {w, x}" and "{c, d} = {y, z}" and "w x \<congruent> y z"
by blast
from left_segment_congruence [of a b w x y z] and
`{a, b} = {w, x}` and
`w x \<congruent> y z`
have "a b \<congruent> y z" by simp
with right_segment_congruence [of c d y z a b] and `{c, d} = {y, z}`
show "a b \<congruent> c d" by simp
qed
lemmas SC_refl = th2_1 [simplified]
lemma SC_rel_refl: "refl_on segments SC_rel"
proof -
note refl_on_def [of segments SC_rel]
moreover
{ fix Z
assume "Z \<in> SC_rel"
with SC_rel_def obtain X Y where "Z = (X, Y)" and "SC X Y" by auto
from `SC X Y` and SC_def [of X Y]
have "\<exists>w x. X = {w, x}" and "\<exists>y z. Y = {y, z}" by auto
with is_segment_def [of X] and is_segment_def [of Y]
have "is_segment X" and "is_segment Y" by auto
with segments_def have "X \<in> segments" and "Y \<in> segments" by auto
with `Z = (X, Y)` have "Z \<in> segments \<times> segments" by simp }
hence "SC_rel \<subseteq> segments \<times> segments" by auto
moreover
{ fix X
assume "X \<in> segments"
with segments_def have "is_segment X" by auto
with is_segment_def [of X] obtain x y where "X = {x, y}" by auto
with SC_def [of X X] and SC_refl have "SC X X" by (simp add: C_SC_equiv)
with SC_rel_def have "(X, X) \<in> SC_rel" by simp }
hence "\<forall>X. X \<in> segments \<longrightarrow> (X, X) \<in> SC_rel" by simp
ultimately show ?thesis by simp
qed
lemma SC_sym:
assumes "SC X Y"
shows "SC Y X"
proof -
from SC_def [of X Y] and `SC X Y`
obtain w x y z where "X = {w, x}" and "Y = {y, z}" and "w x \<congruent> y z"
by auto
from th2_2 [of w x y z] and `w x \<congruent> y z` have "y z \<congruent> w x" by simp
with SC_def [of Y X] and `X = {w, x}` and `Y = {y, z}`
show "SC Y X" by (simp add: C_SC_equiv)
qed
lemma SC_sym': "SC X Y = SC Y X"
proof
assume "SC X Y"
with SC_sym [of X Y] show "SC Y X" by simp
next
assume "SC Y X"
with SC_sym [of Y X] show "SC X Y" by simp
qed
lemma SC_rel_sym: "sym SC_rel"
proof -
{ fix X Y
assume "(X, Y) \<in> SC_rel"
with SC_rel_def have "SC X Y" by simp
with SC_sym' have "SC Y X" by simp
with SC_rel_def have "(Y, X) \<in> SC_rel" by simp }
with sym_def [of SC_rel] show ?thesis by blast
qed
lemma SC_trans:
assumes "SC X Y" and "SC Y Z"
shows "SC X Z"
proof -
from SC_def [of X Y] and `SC X Y`
obtain w x y z where "X = {w, x}" and "Y = {y, z}" and "w x \<congruent> y z"
by auto
from SC_def [of Y Z] and `SC Y Z`
obtain p q r s where "Y = {p, q}" and "Z = {r, s}" and "p q \<congruent> r s" by auto
from `Y = {y, z}` and `Y = {p, q}` and `p q \<congruent> r s`
have "y z \<congruent> r s" by (simp add: C_SC_equiv)
with th2_3 [of w x y z r s] and `w x \<congruent> y z` have "w x \<congruent> r s" by simp
with SC_def [of X Z] and `X = {w, x}` and `Z = {r, s}`
show "SC X Z" by (simp add: C_SC_equiv)
qed
lemma SC_rel_trans: "trans SC_rel"
proof -
{ fix X Y Z
assume "(X, Y) \<in> SC_rel" and "(Y, Z) \<in> SC_rel"
with SC_rel_def have "SC X Y" and "SC Y Z" by auto
with SC_trans [of X Y Z] have "SC X Z" by simp
with SC_rel_def have "(X, Z) \<in> SC_rel" by simp }
with trans_def [of SC_rel] show ?thesis by blast
qed
lemma A3_reversed:
assumes "a a \<congruent> b c"
shows "b = c"
proof -
from `a a \<congruent> b c` have "b c \<congruent> a a" by (rule th2_2)
thus "b = c" by (rule A3')
qed
lemma equiv_segments_SC_rel: "equiv segments SC_rel"
by (simp add: equiv_def SC_rel_refl SC_rel_sym SC_rel_trans)
end
subsection "Some consequences of the first five axioms"
context tarski_first5
begin
lemma A4': "\<exists>x. B q a x \<and> a x \<congruent> b c"
by (simp add: A4 [simplified])
theorem th2_8: "a a \<congruent> b b"
proof -
from A4' [of _ a b b] obtain x where "a x \<congruent> b b" by auto
with A3' [of a x b] have "x = a" by simp
with `a x \<congruent> b b` show ?thesis by simp
qed
definition OFS :: "['p,'p,'p,'p,'p,'p,'p,'p] \<Rightarrow> bool" where
"OFS a b c d a' b' c' d' \<equiv>
B a b c \<and> B a' b' c' \<and> a b \<congruent> a' b' \<and> b c \<congruent> b' c' \<and> a d \<congruent> a' d' \<and> b d \<congruent> b' d'"
lemma A5': "\<lbrakk>OFS a b c d a' b' c' d'; a \<noteq> b\<rbrakk> \<Longrightarrow> c d \<congruent> c' d'"
proof -
assume "OFS a b c d a' b' c' d'" and "a \<noteq> b"
with A5 and OFS_def show ?thesis by blast
qed
theorem th2_11:
assumes hypotheses:
"B a b c"
"B a' b' c'"
"a b \<congruent> a' b'"
"b c \<congruent> b' c'"
shows "a c \<congruent> a' c'"
proof cases
assume "a = b"
with `a b \<congruent> a' b'` have "a' = b'" by (simp add: A3_reversed)
with `b c \<congruent> b' c'` and `a = b` show ?thesis by simp
next
assume "a \<noteq> b"
moreover
note A5' [of a b c a a' b' c' a'] and
unordered_pair_equality [of a c] and
unordered_pair_equality [of a' c']
moreover
from OFS_def [of a b c a a' b' c' a'] and
hypotheses and
th2_8 [of a a'] and
unordered_pair_equality [of a b] and
unordered_pair_equality [of a' b']
have "OFS a b c a a' b' c' a'" by (simp add: C_SC_equiv)
ultimately show ?thesis by (simp add: C_SC_equiv)
qed
lemma A4_unique:
assumes "q \<noteq> a" and "B q a x" and "a x \<congruent> b c"
and "B q a x'" and "a x' \<congruent> b c"
shows "x = x'"
proof -
from SC_sym' and SC_trans and C_SC_equiv and `a x' \<congruent> b c` and `a x \<congruent> b c`
have "a x \<congruent> a x'" by blast
with th2_11 [of q a x q a x'] and `B q a x` and `B q a x'` and SC_refl
have "q x \<congruent> q x'" by simp
with OFS_def [of q a x x q a x x'] and
`B q a x` and
SC_refl and
`a x \<congruent> a x'`
have "OFS q a x x q a x x'" by simp
with A5' [of q a x x q a x x'] and `q \<noteq> a` have "x x \<congruent> x x'" by simp
thus "x = x'" by (rule A3_reversed)
qed
theorem th2_12:
assumes "q \<noteq> a"
shows "\<exists>!x. B q a x \<and> a x \<congruent> b c"
using `q \<noteq> a` and A4' and A4_unique
by blast
end
subsection "Simple theorems about betweenness"
theorem (in tarski_first5) th3_1: "B a b b"
proof -
from A4 [rule_format, of a b b b] obtain x where "B a b x" and "b x \<congruent> b b" by auto
from A3 [rule_format, of b x b] and `b x \<congruent> b b` have "b = x" by simp
with `B a b x` show "B a b b" by simp
qed
context tarski_absolute_space
begin
lemma A6':
assumes "B a b a"
shows "a = b"
proof -
from A6 and `B a b a` show "a = b" by simp
qed
lemma A7':
assumes "B a p c" and "B b q c"
shows "\<exists>x. B p x b \<and> B q x a"
proof -
from A7 and `B a p c` and `B b q c` show ?thesis by blast
qed
lemma A11':
assumes "\<forall> x y. x \<in> X \<and> y \<in> Y \<longrightarrow> B a x y"
shows "\<exists> b. \<forall> x y. x \<in> X \<and> y \<in> Y \<longrightarrow> B x b y"
proof -
from assms have "\<exists> a. \<forall> x y. x \<in> X \<and> y \<in> Y \<longrightarrow> B a x y" by (rule exI)
thus "\<exists> b. \<forall> x y. x \<in> X \<and> y \<in> Y \<longrightarrow> B x b y" by (rule A11 [rule_format])
qed
theorem th3_2:
assumes "B a b c"
shows "B c b a"
proof -
from th3_1 have "B b c c" by simp
with A7' and `B a b c` obtain x where "B b x b" and "B c x a" by blast
from A6' and `B b x b` have "x = b" by auto
with `B c x a` show "B c b a" by simp
qed
theorem th3_4:
assumes "B a b c" and "B b a c"
shows "a = b"
proof -
from `B a b c` and `B b a c` and A7' [of a b c b a]
obtain x where "B b x b" and "B a x a" by auto
hence "b = x" and "a = x" by (simp_all add: A6')
thus "a = b" by simp
qed
theorem th3_5_1:
assumes "B a b d" and "B b c d"
shows "B a b c"
proof -
from `B a b d` and `B b c d` and A7' [of a b d b c]
obtain x where "B b x b" and "B c x a" by auto
from `B b x b` have "b = x" by (rule A6')
with `B c x a` have "B c b a" by simp
thus "B a b c" by (rule th3_2)
qed
theorem th3_6_1:
assumes "B a b c" and "B a c d"
shows "B b c d"
proof -
from `B a c d` and `B a b c` and th3_2 have "B d c a" and "B c b a" by fast+
hence "B d c b" by (rule th3_5_1)
thus "B b c d" by (rule th3_2)
qed
theorem th3_7_1:
assumes "b \<noteq> c" and "B a b c" and "B b c d"
shows "B a c d"
proof -
from A4' obtain x where "B a c x" and "c x \<congruent> c d" by fast
from `B a b c` and `B a c x` have "B b c x" by (rule th3_6_1)
have "c d \<congruent> c d" by (rule th2_1)
with `b \<noteq> c` and `B b c x` and `c x \<congruent> c d` and `B b c d`
have "x = d" by (rule A4_unique)
with `B a c x` show "B a c d" by simp
qed
subsection "Simple theorems about congruence and betweenness"
definition (in tarski_first5) Col :: "'p \<Rightarrow> 'p \<Rightarrow> 'p \<Rightarrow> bool" where
"Col a b c \<equiv> B a b c \<or> B b c a \<or> B c a b"
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/Tarskis_Geometry/Tarski.thy"}
|
{-# OPTIONS --cubical --safe --no-sized-types --no-guardedness #-}
module Agda.Builtin.Cubical.Glue where
open import Agda.Primitive
open import Agda.Builtin.Sigma
open import Agda.Primitive.Cubical renaming (primINeg to ~_; primIMax to _∨_; primIMin to _∧_;
primHComp to hcomp; primTransp to transp; primComp to comp;
itIsOne to 1=1)
open import Agda.Builtin.Cubical.Path
open import Agda.Builtin.Cubical.Sub renaming (Sub to _[_↦_]; primSubOut to ouc)
module Helpers where
-- Homogeneous filling
hfill : ∀ {ℓ} {A : Set ℓ} {φ : I}
(u : ∀ i → Partial φ A)
(u0 : A [ φ ↦ u i0 ]) (i : I) → A
hfill {φ = φ} u u0 i =
hcomp (λ j → \ { (φ = i1) → u (i ∧ j) 1=1
; (i = i0) → ouc u0 })
(ouc u0)
-- Heterogeneous filling defined using comp
fill : ∀ {ℓ : I → Level} (A : ∀ i → Set (ℓ i)) {φ : I}
(u : ∀ i → Partial φ (A i))
(u0 : A i0 [ φ ↦ u i0 ]) →
∀ i → A i
fill A {φ = φ} u u0 i =
comp (λ j → A (i ∧ j)) _
(λ j → \ { (φ = i1) → u (i ∧ j) 1=1
; (i = i0) → ouc u0 })
(ouc {φ = φ} u0)
module _ {ℓ} {A : Set ℓ} where
refl : {x : A} → x ≡ x
refl {x = x} = λ _ → x
sym : {x y : A} → x ≡ y → y ≡ x
sym p = λ i → p (~ i)
cong : ∀ {ℓ'} {B : A → Set ℓ'} {x y : A}
(f : (a : A) → B a) (p : x ≡ y)
→ PathP (λ i → B (p i)) (f x) (f y)
cong f p = λ i → f (p i)
isContr : ∀ {ℓ} → Set ℓ → Set ℓ
isContr A = Σ A \ x → (∀ y → x ≡ y)
fiber : ∀ {ℓ ℓ'} {A : Set ℓ} {B : Set ℓ'} (f : A → B) (y : B) → Set (ℓ ⊔ ℓ')
fiber {A = A} f y = Σ A \ x → f x ≡ y
open Helpers
-- We make this a record so that isEquiv can be proved using
-- copatterns. This is good because copatterns don't get unfolded
-- unless a projection is applied so it should be more efficient.
record isEquiv {ℓ ℓ'} {A : Set ℓ} {B : Set ℓ'} (f : A → B) : Set (ℓ ⊔ ℓ') where
field
equiv-proof : (y : B) → isContr (fiber f y)
open isEquiv public
infix 4 _≃_
_≃_ : ∀ {ℓ ℓ'} (A : Set ℓ) (B : Set ℓ') → Set (ℓ ⊔ ℓ')
A ≃ B = Σ (A → B) \ f → (isEquiv f)
equivFun : ∀ {ℓ ℓ'} {A : Set ℓ} {B : Set ℓ'} → A ≃ B → A → B
equivFun e = fst e
-- Improved version of equivProof compared to Lemma 5 in CCHM. We put
-- the (φ = i0) face in contr' making it be definitionally c in this
-- case. This makes the computational behavior better, in particular
-- for transp in Glue.
equivProof : ∀ {la lt} (T : Set la) (A : Set lt) → (w : T ≃ A) → (a : A)
→ ∀ ψ → (Partial ψ (fiber (w .fst) a)) → fiber (w .fst) a
equivProof A B w a ψ fb = contr' {A = fiber (w .fst) a} (w .snd .equiv-proof a) ψ fb
where
contr' : ∀ {ℓ} {A : Set ℓ} → isContr A → (φ : I) → (u : Partial φ A) → A
contr' {A = A} (c , p) φ u = hcomp (λ i → λ { (φ = i1) → p (u 1=1) i
; (φ = i0) → c }) c
{-# BUILTIN EQUIV _≃_ #-}
{-# BUILTIN EQUIVFUN equivFun #-}
{-# BUILTIN EQUIVPROOF equivProof #-}
primitive
primGlue : ∀ {ℓ ℓ'} (A : Set ℓ) {φ : I}
→ (T : Partial φ (Set ℓ')) → (e : PartialP φ (λ o → T o ≃ A))
→ Set ℓ'
prim^glue : ∀ {ℓ ℓ'} {A : Set ℓ} {φ : I}
→ {T : Partial φ (Set ℓ')} → {e : PartialP φ (λ o → T o ≃ A)}
→ PartialP φ T → A → primGlue A T e
prim^unglue : ∀ {ℓ ℓ'} {A : Set ℓ} {φ : I}
→ {T : Partial φ (Set ℓ')} → {e : PartialP φ (λ o → T o ≃ A)}
→ primGlue A T e → A
-- Needed for transp in Glue.
primFaceForall : (I → I) → I
module _ {ℓ : I → Level} (P : (i : I) → Set (ℓ i)) where
private
E : (i : I) → Set (ℓ i)
E = λ i → P i
~E : (i : I) → Set (ℓ (~ i))
~E = λ i → P (~ i)
A = P i0
B = P i1
f : A → B
f x = transp E i0 x
g : B → A
g y = transp ~E i0 y
u : ∀ i → A → E i
u i x = transp (λ j → E (i ∧ j)) (~ i) x
v : ∀ i → B → E i
v i y = transp (λ j → ~E ( ~ i ∧ j)) i y
fiberPath : (y : B) → (xβ0 xβ1 : fiber f y) → xβ0 ≡ xβ1
fiberPath y (x0 , β0) (x1 , β1) k = ω , λ j → δ (~ j) where
module _ (j : I) where
private
sys : A → ∀ i → PartialP (~ j ∨ j) (λ _ → E (~ i))
sys x i (j = i0) = v (~ i) y
sys x i (j = i1) = u (~ i) x
ω0 = comp ~E _ (sys x0) ((β0 (~ j)))
ω1 = comp ~E _ (sys x1) ((β1 (~ j)))
θ0 = fill ~E (sys x0) (inc (β0 (~ j)))
θ1 = fill ~E (sys x1) (inc (β1 (~ j)))
sys = λ {j (k = i0) → ω0 j ; j (k = i1) → ω1 j}
ω = hcomp sys (g y)
θ = hfill sys (inc (g y))
δ = λ (j : I) → comp E _
(λ i → λ { (j = i0) → v i y ; (k = i0) → θ0 j (~ i)
; (j = i1) → u i ω ; (k = i1) → θ1 j (~ i) })
(θ j)
γ : (y : B) → y ≡ f (g y)
γ y j = comp E _ (λ i → λ { (j = i0) → v i y
; (j = i1) → u i (g y) }) (g y)
pathToisEquiv : isEquiv f
pathToisEquiv .equiv-proof y .fst .fst = g y
pathToisEquiv .equiv-proof y .fst .snd = sym (γ y)
pathToisEquiv .equiv-proof y .snd = fiberPath y _
pathToEquiv : A ≃ B
pathToEquiv .fst = f
pathToEquiv .snd = pathToisEquiv
{-# BUILTIN PATHTOEQUIV pathToEquiv #-}
|
{"hexsha": "c7ed5e75ae186911881b15dc4df408ad918d67f7", "size": 5286, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/data/lib/prim/Agda/Builtin/Cubical/Glue.agda", "max_stars_repo_name": "phadej/agda", "max_stars_repo_head_hexsha": "2fa8ede09451d43647f918dbfb24ff7b27c52edc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data/lib/prim/Agda/Builtin/Cubical/Glue.agda", "max_issues_repo_name": "phadej/agda", "max_issues_repo_head_hexsha": "2fa8ede09451d43647f918dbfb24ff7b27c52edc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/lib/prim/Agda/Builtin/Cubical/Glue.agda", "max_forks_repo_name": "phadej/agda", "max_forks_repo_head_hexsha": "2fa8ede09451d43647f918dbfb24ff7b27c52edc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2452830189, "max_line_length": 104, "alphanum_fraction": 0.4496783958, "num_tokens": 2149}
|
/******************************************************************
*
* uEcho for C
*
* Copyright (C) Satoshi Konno 2015
*
* This is licensed under BSD-style license, see file COPYING.
*
******************************************************************/
#include <boost/test/unit_test.hpp>
#include <uecho/_node.h>
#include <uecho/profile.h>
BOOST_AUTO_TEST_CASE(NodeDefault)
{
uEchoNode* node = uecho_node_new();
BOOST_CHECK(node);
BOOST_CHECK_EQUAL(uecho_node_getobjectcount(node), 1);
BOOST_CHECK(uecho_node_hasobjectbycode(node, uEchoNodeProfileObject));
uecho_node_delete(node);
}
BOOST_AUTO_TEST_CASE(NodeAddress)
{
const char* test_addr = "192.168.0.1";
uEchoNode* node = uecho_node_new();
BOOST_CHECK(node);
BOOST_CHECK_EQUAL(uecho_node_isaddress(node, test_addr), false);
uecho_node_setaddress(node, test_addr);
BOOST_CHECK_EQUAL(uecho_node_isaddress(node, test_addr), true);
uecho_node_delete(node);
}
BOOST_AUTO_TEST_CASE(NodeTID)
{
uEchoNode* node = uecho_node_new();
uEchoTID first_tid = uecho_node_getnexttid(node);
BOOST_CHECK(uEchoTidMin <= first_tid);
BOOST_CHECK(first_tid <= uEchoTidMax);
uEchoTID prev_tid = first_tid;
for (int n = 0; n < 100; n++) {
uEchoTID tid = uecho_node_getnexttid(node);
BOOST_CHECK(uEchoTidMin <= tid);
BOOST_CHECK(prev_tid < tid);
BOOST_CHECK(tid <= uEchoTidMax);
}
uecho_node_delete(node);
}
BOOST_AUTO_TEST_CASE(NodeSetObjects)
{
uEchoNode* node = uecho_node_new();
BOOST_CHECK(node);
const int u_echo_test_object_code_max = uEchoObjectCodeMax % 100;
uecho_node_clear(node);
BOOST_CHECK_EQUAL(uecho_node_getobjectcount(node), 0);
for (size_t n = uEchoObjectCodeMin; n <= u_echo_test_object_code_max; n++) {
uecho_node_setobject(node, (uEchoObjectCode)n);
}
BOOST_CHECK_EQUAL(uecho_node_getobjectcount(node), (u_echo_test_object_code_max - uEchoObjectCodeMin + 1));
for (size_t n = uEchoObjectCodeMin; n <= u_echo_test_object_code_max; n++) {
uEchoObject* obj = uecho_node_getobjectbycode(node, (uEchoObjectCode)n);
BOOST_CHECK(obj);
BOOST_CHECK_EQUAL(uecho_object_getcode(obj), n);
BOOST_CHECK_EQUAL(uecho_object_getparentnode(obj), node);
}
BOOST_CHECK_EQUAL(uecho_node_getobjectcount(node), (u_echo_test_object_code_max - uEchoObjectCodeMin + 1));
uecho_node_delete(node);
}
BOOST_AUTO_TEST_CASE(NodeProfileClass)
{
uEchoNode* node = uecho_node_new();
BOOST_CHECK(node);
BOOST_CHECK(uecho_node_setobject(node, 0x001101));
BOOST_CHECK(uecho_node_setobject(node, 0x001102));
BOOST_CHECK(uecho_node_setobject(node, 0x001201));
BOOST_CHECK(uecho_node_hasobjectbycode(node, 0x001101));
BOOST_CHECK(uecho_node_hasobjectbycode(node, 0x001102));
BOOST_CHECK(uecho_node_hasobjectbycode(node, 0x001201));
uEchoObject* obj = uecho_node_getnodeprofileclassobject(node);
BOOST_CHECK(obj);
BOOST_CHECK_EQUAL(uecho_nodeprofile_getclasscount(obj), 3);
BOOST_CHECK_EQUAL(uecho_nodeprofile_getinstancecount(obj), 3);
// Class List
BOOST_CHECK_EQUAL(uecho_object_getpropertydatasize(obj, uEchoNodeProfileClassSelfNodeClassListS), ((2 * 2) + 1));
byte cls_list[] = { 0x02, 0x00, 0x11, 0x00, 0x12 };
byte* node_cls_list = uecho_nodeprofile_getclasslist(obj);
BOOST_CHECK(node_cls_list);
for (int n = 0; n < sizeof(cls_list); n++) {
BOOST_CHECK_EQUAL(cls_list[n], node_cls_list[n]);
}
// Instance List
BOOST_CHECK_EQUAL(uecho_object_getpropertydatasize(obj, uEchoNodeProfileClassSelfNodeInstanceListS), ((3 * 3) + 1));
byte ins_list[] = { 0x03, 0x00, 0x11, 0x01, 0x00, 0x11, 0x02, 0x00, 0x12, 0x01 };
byte* node_ins_list = uecho_nodeprofile_getinstancelist(obj);
BOOST_CHECK(node_ins_list);
for (int n = 0; n < sizeof(ins_list); n++) {
BOOST_CHECK_EQUAL(ins_list[n], node_ins_list[n]);
}
// Notification Instance List
node_ins_list = uecho_nodeprofile_getnotificationinstancelist(obj);
BOOST_CHECK(node_ins_list);
for (int n = 0; n < sizeof(ins_list); n++) {
BOOST_CHECK_EQUAL(ins_list[n], node_ins_list[n]);
}
uecho_node_delete(node);
}
|
{"hexsha": "c32973f42e3c1b10309f8de9ec4e47a0b09c5b33", "size": 4086, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/NodeTest.cpp", "max_stars_repo_name": "cybergarage/uecho4c", "max_stars_repo_head_hexsha": "d92633d556602c8ffa917525a241350ac10a9ff2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2016-08-23T01:23:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-13T00:41:32.000Z", "max_issues_repo_path": "test/NodeTest.cpp", "max_issues_repo_name": "cybergarage/uecho4c", "max_issues_repo_head_hexsha": "d92633d556602c8ffa917525a241350ac10a9ff2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-07-16T22:28:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T14:57:53.000Z", "max_forks_repo_path": "test/NodeTest.cpp", "max_forks_repo_name": "cybergarage/uecho4c", "max_forks_repo_head_hexsha": "d92633d556602c8ffa917525a241350ac10a9ff2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2016-08-23T01:23:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-22T08:25:59.000Z", "avg_line_length": 29.3956834532, "max_line_length": 118, "alphanum_fraction": 0.7249143417, "num_tokens": 1172}
|
import unittest
import numpy as np
from polyhedral_analysis.polyhedra_recipe import (PolyhedraRecipe,
matching_sites,
polyhedra_from_distance_cutoff)
from pymatgen.core import Structure, Lattice
from polyhedral_analysis.atom import Atom
from unittest.mock import Mock, patch
class TestPolyhedraRecipeInit(unittest.TestCase):
def test_polyhedra_recipe_init_raises_ValueError_for_invalid_method(self):
invalid_method = 'foo'
with self.assertRaises(ValueError):
PolyhedraRecipe(method=invalid_method, central_atoms='foo', vertex_atoms='bar')
def test_polyhdra_recipe_init(self):
recipe_args = {'method': 'distance cutoff',
'central_atoms': 'S',
'vertex_atoms': 'Li',
'coordination_cutoff': 5.0}
with patch('polyhedral_analysis.polyhedra_recipe.generator_from_atom_argument') as mock_generator_from_atom_argument:
mock_generators = [Mock(), Mock()]
mock_generator_from_atom_argument.side_effect = mock_generators
recipe = PolyhedraRecipe(**recipe_args)
self.assertEqual(recipe.method, recipe_args['method'])
self.assertEqual(recipe._central_atom_list_generator, mock_generators[0])
self.assertEqual(recipe._vertex_atom_list_generator, mock_generators[1])
self.assertEqual(recipe._central_atom_list, None)
self.assertEqual(recipe._vertex_atom_list, None)
self.assertEqual(recipe.coordination_cutoff, recipe_args['coordination_cutoff'])
self.assertEqual(recipe.vertex_graph_cutoff, None)
self.assertEqual(recipe.n_neighbours, None)
self.assertEqual(recipe.label, None)
self.assertEqual(recipe.recalculate, True)
class TestPolyhedraRecipeFunctions(unittest.TestCase):
def test_matching_sites(self):
# construct a pymatgen Structure instance using the site fractional coordinates
# face-centered cubic lattice
coords = np.array([[0.0, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5]])
atom_list = ['Li'] * len(coords)
lattice = Lattice.from_parameters(a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90)
structure = Structure(lattice, atom_list, coords)
ref_coords = np.array([[0.1, 0.1, 0.1],
[0.0, 0.4, 0.5]])
ref_atom_list = ['Na'] * len(ref_coords)
ref_structure = Structure(lattice, ref_atom_list, ref_coords)
matched_sites = matching_sites(structure, ref_structure)
self.assertEqual(len(matched_sites), 2)
self.assertEqual(matched_sites[0], (structure[0], 0))
self.assertEqual(matched_sites[1], (structure[2], 2))
def test_matching_sites_with_species(self):
# construct a pymatgen Structure instance using the site fractional coordinates
# face-centered cubic lattice
coords = np.array([[0.0, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5]])
atom_list = ['Li', 'Mg', 'Mg', 'Mg']
lattice = Lattice.from_parameters(a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90)
structure = Structure(lattice, atom_list, coords)
ref_coords = np.array([[0.1, 0.1, 0.1],
[0.0, 0.4, 0.5]])
ref_atom_list = ['Na'] * len(ref_coords)
ref_structure = Structure(lattice, ref_atom_list, ref_coords)
matched_sites = matching_sites(structure, ref_structure, species=['Li'])
self.assertEqual(len(matched_sites), 1)
self.assertEqual(matched_sites[0], (structure[0], 0))
def test_polyhedra_from_distance_cutoff_with_no_central_atoms_returns_empty_list(self):
# If an empty list of central atoms is passed in, and empty list of polyhedra
# should be returned.
central_atoms = []
vertex_atoms = [ Mock(spec=Atom), Mock(spec=Atom) ]
cutoff = 1.0
polyhedra = polyhedra_from_distance_cutoff( central_atoms=central_atoms,
vertex_atoms=vertex_atoms,
cutoff=cutoff )
self.assertEqual( polyhedra, [] )
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "43e2bd44fc923057f3d0217d2264a19a34cb58f9", "size": 4519, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_polyhedra_recipe.py", "max_stars_repo_name": "bjmorgan/polyhedral-analysis", "max_stars_repo_head_hexsha": "1f594032f68596b7622bf15b95f7e2ac1183a169", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-22T15:57:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T15:57:12.000Z", "max_issues_repo_path": "tests/test_polyhedra_recipe.py", "max_issues_repo_name": "bjmorgan/polyhedral-analysis", "max_issues_repo_head_hexsha": "1f594032f68596b7622bf15b95f7e2ac1183a169", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-22T14:51:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T15:47:36.000Z", "max_forks_repo_path": "tests/test_polyhedra_recipe.py", "max_forks_repo_name": "bjmorgan/polyhedral-analysis", "max_forks_repo_head_hexsha": "1f594032f68596b7622bf15b95f7e2ac1183a169", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-08-25T02:54:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-05T12:17:58.000Z", "avg_line_length": 50.2111111111, "max_line_length": 125, "alphanum_fraction": 0.6105333038, "include": true, "reason": "import numpy", "num_tokens": 1039}
|
[STATEMENT]
lemma i_shrink_i_take_mult: "0 < k \<Longrightarrow> f \<Down> (n * k) \<div>\<^sub>f k = f \<div>\<^sub>i k \<Down> n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < k \<Longrightarrow> f \<Down> (n * k) \<div> k = f \<div> k \<Down> n
[PROOF STEP]
by (simp add: f_shrink_def i_shrink_def i_aggregate_i_take_mult)
|
{"llama_tokens": 148, "file": "AutoFocus-Stream_AF_Stream", "length": 1}
|
#include "st_ydl.hpp"
#include "ydl_data.hpp"
#include <fstream>
#include <iostream>
#include <thread>
#include <queue>
#include <boost/thread.hpp>
using namespace std;
STydl::STydl(string _file)
: file_name(_file)
{
try
{
read_file();
}
catch(const runtime_error &run_err)
{
cerr << run_err.what();
exit(EXIT_FAILURE);
}
catch(const bad_alloc &ba)
{
cerr << ba.what();
exit(EXIT_FAILURE);
}
catch(const exception &exc)
{
cerr << exc.what();
exit(EXIT_FAILURE);
}
catch(...)
{
cerr << "ERROR (Unknown): while reading file and preparing data structure.";
exit(EXIT_FAILURE);
}
}
STydl::~STydl()
{
for (auto [key, value] : progress)
{
delete key;
}
}
void STydl::read_file()
{
ifstream in_file(file_name);
if (!in_file.is_open())
{
throw runtime_error("file handling error: Could not open file: " + file_name);
}
YDLdata *new_YDLdata;
string command;
string path;
// Create for every link an object and push it in to the "data"-vector.
// "command" and "path" might be the same over multiple links that's why they are saved in variables.
string line;
while (getline(in_file, line))
{
// Ignore empty lines and do nothing if the line starts with a " " (space).
if (line != "")
{
switch (line.at(0))
{
case '>':
line.erase(0, 1);
command = line;
break;
case '#':
line.erase(0, 1);
path = line;
break;
case 'h':
if (!command.empty())
new_YDLdata = new YDLdata(path, line, command);
else
new_YDLdata = new YDLdata(path, line);
// data.push_back(new_YDLdata);
// push data in map first
progress[new_YDLdata] = 2;
break;
default:
break;
}
}
}
}
#if _WIN32
void STydl::printProgress(bool firstRun)
{
system("cls");
int i = 0;
for (auto [key, value] : progress)
{
enum status {Finished, Downloading, Queued};
switch (value)
{
case Finished:
cout << "\033[1;32m" << "Finished -> " << "\033[0m";
break;
case Downloading:
cout << "\033[1;33m" << "Downloading -> " << "\033[0m";
break;
case Queued:
cout << "\033[1;34m" << "Queued -> " << "\033[0m";
break;
default:
break;
}
cout << "[" << i++ << "] - " << key->get_Link() << endl;
}
}
#else
void STydl::printProgress(bool firstRun)
{
if (!firstRun)
for (int i = 0; i < progress.size(); ++i)
{
cout << "\33[2K\r" << flush;
cout << "\33[A" << flush;
}
int i = 0;
for (auto [key, value] : progress)
{
enum status {Finished, Downloading, Queued};
switch (value)
{
case Finished:
cout << "\033[1;32m" << "Finished -> " << "\033[0m";
break;
case Downloading:
cout << "\033[1;33m" << "Downloading -> " << "\033[0m";
break;
case Queued:
cout << "\033[1;34m" << "Queued -> " << "\033[0m";
break;
default:
break;
}
cout << "[" << i++ << "] - " << key->get_Link() << endl;
}
}
#endif
void STydl::download_multithreading(int _threads_to_use)
{
try
{
int numer_of_threads = _threads_to_use;
// all pointer to YDLdata which are going to be downloaded
queue<YDLdata *> YDLdata_queue;
// queue of all available threads
queue<boost::thread *> thread_queue;
// thread YDLdata mapping
map<boost::thread *, YDLdata *> thread_data_map;
// push YDLdata to queue
for (auto [key, value] : progress)
YDLdata_queue.push(key);
// create threads and add them to the queue
for (int i = 0; i < numer_of_threads; ++i)
{
boost::thread *temp_thread = nullptr;
thread_queue.push(temp_thread);
}
// used for "printProgress"
bool first_run = true;
while (!YDLdata_queue.empty() || (thread_queue.size() != numer_of_threads))
{
printProgress(first_run);
first_run = false;
if (!thread_queue.empty() && !YDLdata_queue.empty())
{
boost::thread *thread_to_use = thread_queue.front();
thread_queue.pop();
YDLdata *ydl_temp = YDLdata_queue.front();
YDLdata_queue.pop();
thread_to_use = new boost::thread(boost::bind(&YDLdata::download, ydl_temp));
progress[ydl_temp] = 1;
thread_data_map[thread_to_use] = ydl_temp;
}
else
{
bool free_thread = false;
for (auto [key, value] : thread_data_map)
{
if (key != nullptr)
{
if (key->try_join_for(boost::chrono::seconds(5)))
{
progress[value] = 0;
boost::thread *temp_thread = key;
// delete of thread not needed
temp_thread = nullptr;
thread_queue.push(temp_thread);
free_thread = true;
}
}
}
}
printProgress(first_run);
}
printProgress(false);
// clean thread queue
while (!thread_queue.empty())
{
boost::thread *temp = thread_queue.front();
thread_queue.pop();
delete temp;
}
}
catch(const std::exception& e)
{
std::cerr << e.what() << '\n';
}
}
int STydl::get_all_downloads()
{
return progress.size();
}
|
{"hexsha": "33ba22d5fdb42ce1da5175c49b67f960db5123f7", "size": 6299, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/st_ydl.cpp", "max_stars_repo_name": "SoujiThenria/st_ydl", "max_stars_repo_head_hexsha": "4bd47dcf9b909ec6172e6e1cd41fdb34ba90210f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-07-18T15:31:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-18T15:31:07.000Z", "max_issues_repo_path": "src/st_ydl.cpp", "max_issues_repo_name": "SoujiThenria/st_ydl", "max_issues_repo_head_hexsha": "4bd47dcf9b909ec6172e6e1cd41fdb34ba90210f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/st_ydl.cpp", "max_forks_repo_name": "SoujiThenria/st_ydl", "max_forks_repo_head_hexsha": "4bd47dcf9b909ec6172e6e1cd41fdb34ba90210f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3204633205, "max_line_length": 105, "alphanum_fraction": 0.4676932846, "num_tokens": 1447}
|
module mod_monolis_precond_mf
use mod_monolis_prm
use mod_monolis_com
use mod_monolis_mat
use mod_monolis_fact_mf
use mod_monolis_matrix_fillin
implicit none
contains
subroutine monolis_precond_mf_setup(monoPRM, monoCOM, monoMAT)
implicit none
type(monolis_prm) :: monoPRM
type(monolis_com) :: monoCOM
type(monolis_mat) :: monoMAT
call monolis_precond_mf_init(monoPRM, monoCOM, monoMAT)
call monolis_init_MF_inner(monoPRM, monoCOM, monoMAT%monoTREE)
call monolis_fact_MF_inner(monoPRM, monoCOM, monoMAT%monoTREE)
end subroutine monolis_precond_mf_setup
subroutine monolis_precond_mf_apply(monoPRM, monoCOM, monoMAT, X, Y)
implicit none
type(monolis_prm) :: monoPRM
type(monolis_com) :: monoCOM
type(monolis_mat) :: monoMAT
real(kdouble) :: X(:), Y(:)
integer(kint) :: i, N, NDOF
N = monoMAT%N
NDOF = monoMAT%NDOF
do i = 1, N*NDOF
monoMAT%monoTREE%B(i) = X(i)
enddo
call monolis_solv_MF_inner(monoPRM, monoCOM, monoMAT%monoTREE)
do i = 1, N*NDOF
Y(i) = monoMAT%monoTREE%X(i)
enddo
end subroutine monolis_precond_mf_apply
subroutine monolis_precond_mf_clear(monoPRM, monoCOM, monoMAT)
implicit none
type(monolis_prm) :: monoPRM
type(monolis_com) :: monoCOM
type(monolis_mat) :: monoMAT
deallocate(monoMAT%monoTREE%B)
deallocate(monoMAT%monoTREE%X)
call monolis_clear_MF_inner(monoPRM, monoCOM, monoMAT%monoTREE)
end subroutine monolis_precond_mf_clear
subroutine monolis_precond_mf_init(monoPRM, monoCOM, monoMAT)
implicit none
type(monolis_prm) :: monoPRM
type(monolis_com) :: monoCOM
type(monolis_mat) :: monoMAT
integer(kint) :: N, NDOF
logical :: is_fillin = .true.
logical :: is_asym = .false.
N = monoMAT%N
NDOF = monoMAT%NDOF
monoMAT%monoTREE%N = monoMAT%N
monoMAT%monoTREE%NP = monoMAT%NP
monoMAT%monoTREE%NDOF = monoMAT%NDOF
allocate(monoMAT%monoTREE%B(N*NDOF))
allocate(monoMAT%monoTREE%X(N*NDOF))
monoMAT%monoTREE%B = 0.0d0
monoMAT%monoTREE%X = 0.0d0
call monolis_matrix_get_fillin(monoPRM, monoCOM, monoMAT, monoMAT%monoTREE, is_fillin, is_asym)
call monolis_matrix_copy_with_fillin(monoPRM, monoCOM, monoMAT, monoMAT%monoTREE, is_asym)
end subroutine monolis_precond_mf_init
end module mod_monolis_precond_mf
|
{"hexsha": "55cd3bbe2279fbbc172e94ee40a136122c4d9972", "size": 2350, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/solver/precond/MF.f90", "max_stars_repo_name": "nqomorita/monolis", "max_stars_repo_head_hexsha": "55d746a480fd7b9639216be19e0a253e6137dfe9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-03-11T20:24:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T02:31:06.000Z", "max_issues_repo_path": "src/solver/precond/MF.f90", "max_issues_repo_name": "nqomorita/monolis", "max_issues_repo_head_hexsha": "55d746a480fd7b9639216be19e0a253e6137dfe9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/solver/precond/MF.f90", "max_forks_repo_name": "nqomorita/monolis", "max_forks_repo_head_hexsha": "55d746a480fd7b9639216be19e0a253e6137dfe9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-01T09:34:26.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-01T09:34:26.000Z", "avg_line_length": 31.7567567568, "max_line_length": 99, "alphanum_fraction": 0.730212766, "num_tokens": 791}
|
#
# File:
# NUG_unstructured_ICON_triangles_PyNGL.py
#
# Synopsis:
# Illustrates using polygon fill for triangular mesh data.
#
# Categories:
# contour plots
#
# Author:
# Karin Meier-Fleischer
#
# Date of initial publication:
# February 2016
#
# Description:
# This example shows how to create contours of the triangular ICON
# grid by using polygon fill.
#
# Effects illustrated:
# o Drawing color-filled polygons on a map
# o Drawing a custom labelbar on a map
#
# Output:
# A single visualization is produced.
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
"""
NCL User Guide Python Example: NUG_unstructured_ICON_triangles_PyNGL.py
Grid type: unstructured
Model: ICON
Info: - colored triangles
- add labelbar (colorbar)
18.02.16 meier-fleischer(at)dkrz.de
"""
from __future__ import print_function
import numpy as np
import math, time, sys, os
import Nio, Ngl
t1 = time.time() #-- retrieve start time
#-- define variables
diri = './'
fname = 'ta_ps_850.nc' #-- data path and file name
gname = 'r2b4_amip.nc' #-- grid info file
ffile = os.path.join(diri, fname)
gfile = os.path.join(diri, gname)
VarName = 'ta' #-- variable name
#---Test if files exist
if(not os.path.exists(ffile) or not os.path.exists(gfile)):
print("You do not have the necessary files to run this example, '{}' and '{}'.".format(ffile, gfile))
print("You can get the files from the NCL website at:")
print("http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/")
sys.exit()
#-- open file and read variables
f = Nio.open_file(ffile, 'r') #-- add data file
g = Nio.open_file(gfile, 'r') #-- add grid file (not contained in data file!!!)
#-- read a timestep of 'ta'
variable = f.variables['ta'] #-- first time step, lev, ncells
data = variable[0,0,:] #-- ta [time,lev,ncells]; miss _FillValue
var = data - 273.15 #-- convert to degrees Celsius; miss _FillValue
#-- define _FillValue and missing_value if not existing
missing = -1e20
if not hasattr(var,'_FillValue'):
var._FillValue = missing #-- set _FillValue
if not hasattr(var,'missing_value'):
var.missing_value = missing #-- set missing_value
varM = np.ma.array(var, mask=np.equal(var,missing)) #-- mask array with missing values
nummissing = np.count_nonzero(varM.mask) #-- number of missing values
#-- set data intervals, levels, labels, color indices
varMin, varMax, varInt = -32, 28, 4 #-- set data minimum, maximum, interval
levels = list(range(varMin,varMax,varInt)) #-- set levels array
nlevs = len(levels) #-- number of levels
labels = ['{:.2f}'.format(x) for x in levels] #-- convert list of floats to list of strings
#-- print info to stdout
print('')
print('min/max: {:0.2f} / {:0.2f}'.format(np.min(varM), np.max(varM)))
print('')
print('varMin: {:3d}'.format(varMin))
print('varMax: {:3d}'.format(varMax))
print('varInt: {:3d}'.format(varInt))
print('')
print('missing_value: {}'.format(missing))
print('missing values: {}'.format(nummissing))
#-------------------------------------------------------------------
#-- define the x-, y-values and the polygon points
#-------------------------------------------------------------------
rad2deg = 45./np.arctan(1.) #-- radians to degrees
x, y = g.variables['clon'][:], g.variables['clat'][:]
vlon, vlat = g.variables['clon_vertices'][:], g.variables['clat_vertices'][:]
x, y = x*rad2deg, y*rad2deg #-- cell center, lon, lat
vlat, vlon = vlat*rad2deg, vlon * rad2deg #-- cell latitude/longitude vertices
ncells, nv = vlon.shape #-- ncells: number of cells; nv: number of edges
#-- print information to stdout
print('')
print('cell points: {}'.format(nv))
print('cells: {}'.format(ncells))
print('')
#-- rearrange the longitude values to -180.-180.
def rearrange(vlon):
less_than = vlon < -180.
greater_than = vlon > 180.
vlon[less_than] = vlon[less_than] + 360.
vlon[greater_than] = vlon[greater_than] - 360.
return vlon
vlon = rearrange(vlon) #-- set longitude values to -180.-180. degrees
print('min/max vlon: {} {}'.format(np.min(vlon), np.max(vlon)))
print('min/max vlat: {} {}'.format(np.min(vlat), np.max(vlat)))
print('')
#-- open a workstation for second plot: triangles plot
wkres = Ngl.Resources()
wkres.wkWidth, wkres.wkHeight = 2500, 2500
wks_type = 'png'
wks_name = os.path.basename(__file__).split(".")[0]
wks = Ngl.open_wks(wks_type,wks_name,wkres)
#-- define colormap
colormap = Ngl.read_colormap_file('WhiteBlueGreenYellowRed')[22::12,:] #-- RGB ! [256,4] -> [20,4]
#-- select every 12th color
colormap[19,:] = [1.,1.,1.,0.] #-- white for missing values
print('')
print('levels: {}'.format(levels))
print('labels: {}'.format(labels))
print('')
print('nlevs: {:3d}'.format(nlevs))
print('')
#-- set map resources
mpres = Ngl.Resources()
mpres.nglDraw = False #-- turn off plot draw and frame advance. We will
mpres.nglFrame = False #-- do it later after adding subtitles.
mpres.mpGridAndLimbOn = False
mpres.mpGeophysicalLineThicknessF = 2.
mpres.pmTitleDisplayMode = 'Always'
mpres.tiMainString = 'PyNGL: unstructured grid ICON'
#-- create only a map
map = Ngl.map(wks,mpres)
Ngl.draw(map)
#-- assign and initialize array which will hold the color indices of the cells
gscolors = -1*(np.ones((ncells,),dtype=np.int)) #-- assign array containing zeros; init to transparent: -1
#-- set color index of all cells in between levels
for m in range(nlevs):
vind = [] #-- empty list for color indices
for i in range(ncells-1):
if (varM[i] >= levels[m] and varM[i] < levels[m+1]):
gscolors[i] = m+1 # 1 to nlevs
vind.append(i)
print('finished level {:3d} -- {:5d} polygons considered - gscolors {:3d}'.format(m, len(vind), m+1))
del vind
gscolors[varM < varMin] = 0 #-- set color index for cells less than level[0]
gscolors[varM >= varMax] = nlevs+1 #-- set color index for cells greater than levels[nlevs-1]
gscolors[np.nonzero(varM.mask)] = -1 #-- set color index for missing locations
#-- set polygon resources
pgres = Ngl.Resources()
pgres.gsEdgesOn = True #-- draw the edges
pgres.gsFillIndex = 0 #-- solid fill
pgres.gsLineColor = 'black' #-- edge line color
pgres.gsLineThicknessF = 0.7 #-- line thickness
pgres.gsColors = colormap[gscolors,:] #-- use color array
pgres.gsSegments = list(range(0,len(vlon[:,0])*3,3)) #-- define segments array for fast draw
lon1d, lat1d = np.ravel(vlon), np.ravel(vlat) #-- convert to 1D-arrays
#-- add polygons to map
polyg = Ngl.add_polygon(wks,map,lon1d,lat1d,pgres)
#-- add a labelbar
lbres = Ngl.Resources()
lbres.vpWidthF = 0.85
lbres.vpHeightF = 0.15
lbres.lbOrientation = 'Horizontal'
lbres.lbFillPattern = 'SolidFill'
lbres.lbMonoFillPattern = 21 #-- must be 21 for color solid fill
lbres.lbMonoFillColor = False #-- use multiple colors
lbres.lbFillColors = colormap
lbres.lbLabelFontHeightF= 0.014
lbres.lbLabelAlignment = 'InteriorEdges'
lbres.lbLabelStrings = labels
lb = Ngl.labelbar_ndc(wks,nlevs+1,labels,0.1,0.24,lbres)
#-- maximize and draw the plot and advance the frame
Ngl.draw(map)
Ngl.frame(wks)
#-- get wallclock time
t2 = time.time()
print('')
print('Wallclock time: {:0.3f} seconds'.format(t2-t1))
print('')
Ngl.end()
|
{"hexsha": "b46e354001fae975fd183c25730da3703cec77ca", "size": 8463, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/NUG_unstructured_ICON_triangles_PyNGL.py", "max_stars_repo_name": "yang69can/pyngl", "max_stars_repo_head_hexsha": "78a7040ce9de4b7a442b0c3b5faecccab2f01426", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 125, "max_stars_repo_stars_event_min_datetime": "2016-11-24T09:04:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T14:06:56.000Z", "max_issues_repo_path": "examples/NUG_unstructured_ICON_triangles_PyNGL.py", "max_issues_repo_name": "yang69can/pyngl", "max_issues_repo_head_hexsha": "78a7040ce9de4b7a442b0c3b5faecccab2f01426", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2017-11-08T23:23:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T03:17:39.000Z", "max_forks_repo_path": "examples/NUG_unstructured_ICON_triangles_PyNGL.py", "max_forks_repo_name": "yang69can/pyngl", "max_forks_repo_head_hexsha": "78a7040ce9de4b7a442b0c3b5faecccab2f01426", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2017-08-27T10:50:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T14:56:05.000Z", "avg_line_length": 38.4681818182, "max_line_length": 110, "alphanum_fraction": 0.5809996455, "include": true, "reason": "import numpy", "num_tokens": 2332}
|
[STATEMENT]
lemma dep3_nonspray:
assumes "dep3_event Q R S x"
shows "\<exists>P\<in>\<P>. P \<notin> SPRAY x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>P\<in>\<P>. P \<notin> SPRAY x
[PROOF STEP]
by (metis assms dep3_event_def)
|
{"llama_tokens": 109, "file": "Schutz_Spacetime_Minkowski", "length": 1}
|
from ..gui.main_window import Ui_EditorMainWindow
from PySide.QtGui import QApplication, QMainWindow, QPixmap
from PySide import QtGui, QtCore
from PySide.QtCore import QObject
import sys
import numpy as np
from .. import util
from .brush_dialog import BrushDialog
from .about_dialog import AboutDialog
from .new_image_dialog import NewImageDialog
from .helper_threads import IFTThread
class EditorMainWindow(QMainWindow):
def __init__(self, parent=None):
super(EditorMainWindow, self).__init__(parent)
self.ui = Ui_EditorMainWindow()
self.ui.setupUi(self)
self.ui.action_open.triggered.connect(self.open_file)
self.ui.action_save_spatial.triggered.connect(self.save_spatial)
self.ui.action_new_image.triggered.connect(self.new_image)
self.ui.action_linked_zoom.triggered.connect(self.link_zoom)
self.ui.action_save_both.triggered.connect(self.save_both)
self.ui.action_brush.triggered.connect(self.show_brush)
self.ui.action_website.triggered.connect(self.show_website)
self.ui.action_about.triggered.connect(self.show_about)
self.ui.action_none.triggered.connect(self.remove_brush)
self.ui.image_zoom_in_btn.clicked.connect(self.image_zoom_in)
self.ui.image_zoom_out_btn.clicked.connect(self.image_zoom_out)
self.ui.freq_zoom_in_btn.clicked.connect(self.freq_zoom_in)
self.ui.freq_zoom_out_btn.clicked.connect(self.freq_zoom_out)
self.ui.image_label.installEventFilter(self)
self.ui.freq_label.installEventFilter(self)
self.ui.image_label.setMouseTracking(True)
self.ui.freq_label.setMouseTracking(True)
self.spatial_image = None
# This will store the shifted frequency image
self.frequency_array_magnitude = None
self.frequency_array_angle = None
self.freq_pixmap = None
self.scaled_freq_pixmap = None
self.image_pixmap = None
self.scaled_image_pixmap = None
self.spatial_scale = 1.0
self.frequency_scale = 1.0
self.current_brush = None
self.is_zoom_linked = False
def open_file(self):
""" Signal handler for the Open Menu """
filters = "Image Files (*.png *.jpg *.bmp)"
file_name = QtGui.QFileDialog.getOpenFileName(self, "Open File",
filter=filters)[0]
if file_name:
image = QtGui.QImage(file_name)
filters = "Image Files (*.png *.jpg *.bmp)"
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer",
"Cannot load %s." % file_name)
return
array = util.qimage_to_numpy(image)
self.load_image_from_array(array)
def load_image_from_array(self, array):
""" Loads an array as spatial domain image.
This function recomputes the fft and updates both the UIs. """
image = util.rgb_to_yuv(array)
garray = image[..., 0]
farray = np.fft.fft2(garray)
farray = np.fft.fftshift(farray)
self.set_yuv_image(image)
self.set_freq_image_angle(np.angle(farray))
self.set_freq_image_magnitude(np.absolute(farray))
def set_freq_image_magnitude(self, fimg):
""" Sets a numpy array as a frequncy domain image magnitude.
This function expects an appropriately shifted numpy array as input.
Except taking log, no manipulation to the values is done before
rendering. The function updates recomputes all internal intermediate
values and re renders the frequency UI.
"""
self.frequency_array_magnitude = fimg
qimage = util.fft_to_qimage(self.frequency_array_magnitude)
pixmap = QPixmap.fromImage(qimage)
self.set_freq_pixmap(pixmap)
self.invalidate_freq_scale()
self.render_freq()
def set_freq_pixmap(self, pixmap):
"""Sets the pixmap to be shown for frequency image.
This function only caches the pixmap, not computation or UI updation
is done.
"""
self.freq_pixmap = pixmap
def invalidate_freq_scale(self):
"""Implies scale has changed and recomputes internal fields
This function is to be called when either `self.freq_pixmap` changes
or `self.frequency_scale` changes. This function merely caches the
scaled pixmap, no UI updation is done.
"""
w, h = self.freq_pixmap.width(), self.freq_pixmap.height()
sw, sh = int(w*self.frequency_scale), int(h*self.frequency_scale)
self.scaled_freq_pixmap = self.freq_pixmap.scaled(sw, sh)
def render_freq(self, pixmap=None):
"""Render `pixmap` as the frequency image. If not given display last
known sclaed spatial image pixmap.
This function does not perform any computations internally. The
function is to be called to update the UI to reflect the state of the
internal fields, when called without the 2nd argument. When a brush
is set, a pixmap with the brush drawn on it can supplied as the 2nd
argument.
"""
if not pixmap:
pixmap = self.scaled_freq_pixmap
self.ui.freq_label.setPixmap(pixmap)
def set_freq_image_angle(self, fimg):
" Sets a numpy array as a frequncy domain image angle. "
self.frequency_array_angle = fimg
def set_yuv_image(self, img):
""" Sets the spatial image as YUV array.
The function expects a `uint8` array and will set the spatial domain
image in the UI along with updating all internal fields.
"""
self.spatial_image = img
img = util.yuv_to_rgb(self.spatial_image)
qimage = util.numpy_to_qimage(img)
pixmap = QPixmap.fromImage(qimage)
self.set_image_pixmap(pixmap)
self.invalidate_image_scale()
self.render_image()
def set_image_pixmap(self, pixmap):
"""Sets the pixmap to be shown for spatial image.
This function only caches the pixmap, not computation or UI updation
is done.
"""
self.image_pixmap = pixmap
def invalidate_image_scale(self):
"""Implies scale has changed and recomputes internal fields.
This function is to be called when either `self.image_pixmap` changes
or `self.spatial_scale` changes. This function merely caches the
scaled pixmap, no UI updation is done.
"""
w, h = self.image_pixmap.width(), self.image_pixmap.height()
sw, sh = int(w*self.spatial_scale), int(h*self.spatial_scale)
self.scaled_image_pixmap = self.image_pixmap.scaled(sw, sh)
def render_image(self, pixmap=None):
"""Render the pixmap as spatial image. If not given, display last known
sclaed spatial image pixmap.
"""
if not pixmap:
pixmap = self.scaled_image_pixmap
self.ui.image_label.setPixmap(pixmap)
def image_zoom_in(self):
" Zoom in the spatial domain image "
if self.spatial_image is None:
return
self.spatial_scale += 0.1
self.invalidate_image_scale()
self.render_image()
if self.is_zoom_linked:
self.frequency_scale = self.spatial_scale
self.invalidate_freq_scale()
self.render_freq()
def image_zoom_out(self):
" Zoom out the spatial domain image "
if self.spatial_image is None:
return
self.spatial_scale -= 0.1
self.invalidate_image_scale()
self.render_image()
if self.is_zoom_linked:
self.frequency_scale = self.spatial_scale
self.invalidate_freq_scale()
self.render_freq()
def freq_zoom_out(self):
"Zoom out the frequency domain image."
if self.frequency_array_magnitude is None:
return
self.frequency_scale -= 0.1
self.invalidate_freq_scale()
self.render_freq()
if self.is_zoom_linked:
self.spatial_scale = self.frequency_scale
self.invalidate_image_scale()
self.render_image()
def freq_zoom_in(self):
"Zoom out the frequency domain image."
if self.frequency_array_magnitude is None:
return
self.frequency_scale += 0.1
self.invalidate_freq_scale()
self.render_freq()
if self.is_zoom_linked:
self.spatial_scale = self.frequency_scale
self.invalidate_image_scale()
self.render_image()
def handle_image_move(self, event):
"Handle mouse move on the spatial image."
if self.spatial_image is None:
return
self.handle_image_stats(event)
def handle_image_stats(self, event):
"""Given an event, take care of displaying stats for spatial image.
The assumption made here is that the QLabel is exactly the size of the
image.
"""
pos = event.pos()
x, y = pos.x(), pos.y()
x, y = int(x/self.spatial_scale), int(y/self.spatial_scale)
r, c = y, x
r = np.clip(r, 0, self.spatial_image.shape[0])
c = np.clip(c, 0, self.spatial_image.shape[1])
value = self.spatial_image[r, c].astype(np.int)
msg = "X:%d Y:%d Value:" % (x, y)
msg += str(value)
self.ui.image_info_label.setText(msg)
def handle_freq_move(self, event):
"""Handle mouse move on the frequency domain image.
"""
if self.frequency_array_magnitude is None:
return
self.handle_freq_stats(event)
if self.current_brush:
pixmap = self.scaled_freq_pixmap.copy()
self.current_brush.draw_marker(event.x(), event.y(), pixmap,
self.frequency_scale)
if event.buttons() & QtCore.Qt.MouseButton.LeftButton:
self.handle_freq_modify(event)
# We use the pre computed scaled pixmap and mark the brush on it
# before displaying
self.render_freq(pixmap)
def handle_freq_stats(self, event):
"""Given an event, show frequency image stats.
The assumption made here is that the QLabel is exactly the size of the
image.
"""
pos = event.pos()
x, y = pos.x(), pos.y()
x, y = int(x/self.frequency_scale), int(y/self.frequency_scale)
r, c = y, x
r = np.clip(r, 0, self.frequency_array_magnitude.shape[0] - 1)
c = np.clip(c, 0, self.frequency_array_magnitude.shape[1] - 1)
value = self.frequency_array_magnitude[r, c]
msg = "X:%d Y:%d Value:%d" % (x, y, value)
self.ui.freq_info_label.setText(msg)
def eventFilter(self, obj, event):
"Call to handle relevant events."
if obj == self.ui.image_label:
if event.type() == QtCore.QEvent.MouseMove:
self.handle_image_move(event)
return True
elif obj == self.ui.freq_label:
if not self.ui.freq_label.isEnabled():
return False
if event.type() == QtCore.QEvent.MouseMove:
self.handle_freq_move(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonPress:
if event.button() == QtCore.Qt.MouseButton.LeftButton:
self.handle_freq_modify(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
if event.button() == QtCore.Qt.MouseButton.LeftButton:
if self.current_brush:
self.recompute_spatial_image()
return True
return QObject.eventFilter(self, obj, event)
def handle_freq_modify(self, event):
"Handle an event which will modify the frequency image."
if not self.current_brush is None:
x, y = event.x(), event.y()
x /= self.frequency_scale
y /= self.frequency_scale
h, w = self.frequency_array_magnitude.shape
magnitude = self.frequency_array_magnitude
angle = self.frequency_array_angle
self.current_brush.apply(x, y, magnitude, angle)
self.set_freq_image_magnitude(self.frequency_array_magnitude)
self.render_freq()
def show_brush(self):
"Show the brush dialog box."
d = BrushDialog(self, self.current_brush)
d.exec_()
if d.get_brush():
self.current_brush = d.get_brush()
def remove_brush(self):
"Deselcts a brush."
self.current_brush = None
self.render_freq()
def recompute_spatial_image(self):
"""Recompute the spatial image from the frequency image and render it.
This function just launches a thread to do the task.
"""
magnitude = self.frequency_array_magnitude
angle = self.frequency_array_angle
self.ift_thread = IFTThread(magnitude, angle)
self.ift_thread.ift_done.connect(self.ift_done_recv)
# To prevent mutiple threads modifying images
# we disable is while one thread is working
self.ui.freq_label.setEnabled(False)
self.ift_thread.start()
def ift_done_recv(self, array):
"The reciever for the ift_done signal"
self.spatial_image[:, :, 0] = array
self.set_yuv_image(self.spatial_image)
self.ui.freq_label.setEnabled(True)
def save_spatial(self):
"Save the spatial domain image."
if self.spatial_image is None:
QtGui.QMessageBox.information(self, "Error", "No Image to Save")
return
filters = "Image Files (*.png)"
filename = QtGui.QFileDialog.getSaveFileName(self, "Save Image",
filter=filters)[0]
if not filename.lower().endswith('.png'):
filename += '.png'
arr = util.yuv_to_rgb(self.spatial_image)
image = util.numpy_to_qimage(arr)
success = image.save(filename)
if not success:
msg = "Could not save image at the location."
QtGui.QMessageBox.information(self, "Error", msg)
def save_both(self):
"Save image and its transofrm."
if self.spatial_image is None or \
self.frequency_array_magnitude is None:
QtGui.QMessageBox.information(self, "Error", "No Image to Save")
return
filters = "Image Files (*.png)"
filename = QtGui.QFileDialog.getSaveFileName(self, "Save Image",
filter=filters)[0]
if not filename.lower().endswith('.png'):
filename += '.png'
arr = util.yuv_to_rgb(self.spatial_image)
r, c, ch = arr.shape
out = np.zeros((r, c*2, ch), dtype=arr.dtype)
out[:, :c, :] = arr
freq_img = util.fft_to_qimage(self.frequency_array_magnitude)
freq_arr = util.qimage_to_numpy(freq_img)
out[:, c:, :] = freq_arr
image = util.numpy_to_qimage(out)
success = image.save(filename)
if not success:
msg = "Could not save image at the location."
QtGui.QMessageBox.information(self, "Error", msg)
def show_about(self):
"Display the about dialog."
d = AboutDialog(self)
d.exec_()
def show_website(self):
"Open the website in a browser."
QtGui.QDesktopServices.openUrl("http://fredo-editor.github.io")
def new_image(self):
"Shows a dialog to create a new blank image."
d = NewImageDialog()
d.exec_()
if d.get_size():
w, h = d.get_size()
array = np.zeros((h, w, 3), dtype=np.uint8)
self.load_image_from_array(array)
def link_zoom(self):
"Ensures that both images are at the same scale."
if self.ui.action_linked_zoom.isChecked():
self.is_zoom_linked = True
self.spatial_scale = 1.0
self.invalidate_image_scale()
self.render_image()
self.frequency_scale = 1.0
self.invalidate_freq_scale()
self.render_freq()
else:
self.is_zoom_linked = False
def run():
app = QApplication(sys.argv)
editor = EditorMainWindow()
editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run()
|
{"hexsha": "5c2927be7fd796dc0da9eae2ae4657bc05193d7e", "size": 16609, "ext": "py", "lang": "Python", "max_stars_repo_path": "fredo/editor/main.py", "max_stars_repo_name": "yasiupl/FreDo", "max_stars_repo_head_hexsha": "73bdc380dd82df171fe63998f0affa092e30759a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-08-21T08:43:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T16:16:59.000Z", "max_issues_repo_path": "fredo/editor/main.py", "max_issues_repo_name": "yasiupl/FreDo", "max_issues_repo_head_hexsha": "73bdc380dd82df171fe63998f0affa092e30759a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-03-25T10:16:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-11T19:14:01.000Z", "max_forks_repo_path": "fredo/editor/main.py", "max_forks_repo_name": "yasiupl/FreDo", "max_forks_repo_head_hexsha": "73bdc380dd82df171fe63998f0affa092e30759a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-29T06:15:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T16:42:28.000Z", "avg_line_length": 33.0198807157, "max_line_length": 79, "alphanum_fraction": 0.6207477873, "include": true, "reason": "import numpy", "num_tokens": 3499}
|
"""
This module deals with controlling what is displayed on the screen.
The two major features of this module are switching to the alernative screen and clearing the what is currenlty on the screen.
The [alternative screen](https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer) is a feature of most terminals that allows an application to switch to a screen that has no scrollback, allowing it to draw whatever it wants to the terminal without interfering with the scrollback of the normal screen. This is most useful for full-screen applications, such as vim or emacs. Ansillary allows you to enter the alternative screen with the [`alternative`](@ref) function:
```julia-repl
julia> Screen.alternative() do
println("No scrollback!")
read(stdin, UInt8)
end
```
Ansillary will set raw mode (also known as [non-canonical mode](https://www.gnu.org/software/libc/manual/html_node/Canonical-or-Not.html)) when [`alternative`](@ref) is called as this is almost always what is wanted, to avoid this use the non-exported [`alternative!`](@ref) and [`standard!`](@ref) functions.
It also possible to only set raw mode using the [`raw`](@ref) function. The main benefits of raw mode are that the input stream is not line buffered allowing one byte to be read at a time (which in turn allows the application to respond to key presses), and that the input is not printed directly to the output allowing the application to handle printable input in it's own way (so that it can implement vim-style keybindings, for example).
To clear the screen Ansillary provides the [`clear!`](@ref) function, as well as the [`Area`](@ref) types for specifying which parts of the screen need clearing.
This short script
```julia
print("Some line...")
move!(Left(3))
clear!(FromCursorForward())
print("!")
```
will result in `Some line!` being printed to the terminal.
This module also provides the [`size`](@ref) as a slightly nicer wrapper around `displaysize`.
"""
module Screen
import ..Cursor
import ..TERMINAL
using REPL.Terminals: CSI, raw!
export All,
CurrentLine,
FromCursorBack,
FromCursorDown,
FromCursorForward,
FromCursorUp,
alternative,
clear!,
raw,
size
"""
Permanently activate the alternative screen.
Use [`standard!`](@ref) to switch back to the standard screen.
This function **does not** set the terminal to raw mode, so that must be done manually where necessary.
!!! note
You should prefer using [`alternative`](@ref) where possible as it's very easy to accidently leave the terminal on the alternative screen using this method.
"""
alternative!(terminal = TERMINAL[]) = print(terminal.out_stream, CSI, "?1049h")
"""
Permanently activate the standard screen.
"""
standard!(terminal = TERMINAL[]) = print(terminal.out_stream, CSI, "?1049l")
"""
Temporarily activate the alernative screen for the duration of the function.
This function also sets the terminal to raw mode as it is rare that you'll need the alternative screen but not raw mode. If the alternative screen is needed without setting raw mode, use [`alternative!`](@ref) and [`standard!`](@ref) directly.
"""
function alternative(f, terminal = TERMINAL[])
raw(terminal) do
alternative!(terminal)
try
f()
finally
standard!(terminal)
end
end
end
_clear!(terminal, code) = print(terminal.out_stream, CSI, code)
"""
An area of the screen.
See the documentation on it's subtypes for more details:
* [`All`](@ref)
* [`CurrentLine`](@ref)
* [`FromCursorBack`](@ref)
* [`FromCursorForward`](@ref)
* [`FromCursorDown`](@ref)
* [`FromCursorUp`](@ref)
"""
abstract type Area end
"""
Clear an area of the screen.
See subtypes of [`Area`](@ref) for more details.
"""
clear!(area::Area) = clear!(TERMINAL[], area)
"""
Clear the entire screen.
"""
struct All <: Area end
clear!(terminal, ::All) = _clear!(terminal, "2J")
"""
Clear the line that the cursor is currently on.
"""
struct CurrentLine <: Area end
clear!(terminal, ::CurrentLine) = _clear!(terminal, "2K")
"""
Clear from the start of the current line up to, and including, the cursor.
"""
struct FromCursorBack <: Area end
clear!(terminal, ::FromCursorBack) = _clear!(terminal, "1K")
"""
Clear from the cursor up to the end of the line.
"""
struct FromCursorForward <: Area end
clear!(terminal, ::FromCursorForward) = _clear!(terminal, "K")
"""
Clear from the cursor up to the end of the line and all lines below the cursor.
"""
struct FromCursorDown <: Area end
clear!(terminal, ::FromCursorDown) = _clear!(terminal, "J")
"""
Clear from the start of the current line up to the cursor and any lines above the cursor.
"""
struct FromCursorUp <: Area end
clear!(terminal, ::FromCursorUp) = _clear!(terminal, "1J")
"""
Temporarily set the terminal to raw mode for the duration of the function.
If switching to raw mode permanently is required use `REPL.Terminals.raw!`.
"""
function raw(f, terminal = TERMINAL[])
raw!(terminal, true)
try
f()
finally
raw!(terminal, false)
end
end
struct Size
rows::UInt32
columns::UInt32
end
"""
Get the current size of the terminal.
See also: `displaysize`.
"""
function size(terminal = TERMINAL[])
(rows, columns) = displaysize(terminal.out_stream)
Size(rows, columns)
end
end # module
|
{"hexsha": "e6213ed2dea457542b8e4007d6864d8334c95698", "size": 5353, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Screen.jl", "max_stars_repo_name": "Sean1708/Ansillary.jl", "max_stars_repo_head_hexsha": "999eba97593350ab23abb74ebff3ab8f44e356e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-29T20:12:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T20:12:01.000Z", "max_issues_repo_path": "src/Screen.jl", "max_issues_repo_name": "Sean1708/Ansillary.jl", "max_issues_repo_head_hexsha": "999eba97593350ab23abb74ebff3ab8f44e356e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Screen.jl", "max_forks_repo_name": "Sean1708/Ansillary.jl", "max_forks_repo_head_hexsha": "999eba97593350ab23abb74ebff3ab8f44e356e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5885714286, "max_line_length": 498, "alphanum_fraction": 0.7164206987, "num_tokens": 1287}
|
"""
Example demonstrating the usage for the linear-fit scenario.
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'../src')
from mcmc import MCMC
import matplotlib
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
#----------------------------------------------------------
class Anim(MCMC):
"""
Extends the original MCMC class to sample the parameters of a linear model.
Parameters
-----------
MCMC (Class): Parent MCMC class.
m (Float): Feducial value of the slope of the linear data.
c (Float): Feducial value of the intercept of the linear data.
RedStd (Float): Feducial value of the standard deviation of the linear data.
"""
def __init__(self, m=5.0, c=25.0, RedStd=15.0, alpha=1.5, delay=0.001):
"""
Instantiates the class by synthetically generating data.
"""
MCMC.__init__(self, TargetAcceptedPoints=5000, \
NumberOfParams=2, Mins=[0.0,20.0], Maxs=[10.0,30.0], SDs=[1.0,2.0], alpha=alpha,\
write2file=True, outputfilename='chain.mcmc', randomseed=250192)
self.m = m
self.c = c
self.X=np.linspace(-10, 10, 25)
self.delta = np.random.uniform(low=-1*RedStd, high=RedStd, size=len(self.X))
self.Y = (m*self.X + c) + self.delta
self.delay = delay
#----------------------------------------------------------
def FittingFunction(self, Params):
"""
Parametric form of the model.
Parameters
----------
Params (1d array): Numpy array containing values of the parameters.
Returns
-------
model values (y = mx + c)
"""
return Params[0]*self.X + Params[1]
#----------------------------------------------------------
def chisquare(self, Params):
"""
Computes Chi-square.
Parameters
----------
Params (1d array): Numpy array containing values of the parameters.
Returns
-------
chi square.
"""
kisquare = ((self.Y-self.FittingFunction(Params))/self.delta)**2
return np.sum(kisquare)
#----------------------------------------------------------
def MainChain(self):
"""
Runs the chain.
Returns
-------
Acceptance rate.
"""
f, axarr = plt.subplots(1, 2, figsize=(16,7))
axarr[1].set_xlim(4, 6)
axarr[1].set_ylim(22, 28)
axarr[1].set_xlabel('$\mathtt{m}$', fontsize=22)
axarr[1].set_ylabel('$\mathtt{c}$', fontsize=22)
# Initialising the chain
OldStep = self.FirstStep()
Oldchi2 = self.chisquare(OldStep)
Bestchi2 = Oldchi2
# Preparing output file
# if self.write2file:
# outfile = open(self.outputfilename,'w')
# writestring = '%1.6f \t'*self.NumberOfParams
# Initialising multiplicity and accepted number of points.
multiplicity = 0
acceptedpoints = 0
xlist=[]
ylist=[]
i=0
# Chain starts here...
# for i in range(self.NumberOfSteps):
while True:
i += 1
if acceptedpoints == self.TargetAcceptedPoints:
break
multiplicity += 1
# Generating next step and its chi-square
NewStep = self.NextStep(OldStep)
Newchi2 = self.chisquare(NewStep)
# # Checking if it is to be accepted.
GoodPoint = self.MetropolisHastings(Oldchi2,Newchi2)
# Updating step scale using a threshold chi-square.
if Newchi2<2*len(self.Y):
self.CovMat = self.alpha*np.diag(self.SD**2)
if GoodPoint:
# Updating best chi-square so far in the chain.
if Newchi2<Bestchi2:
BestStep = NewStep
Bestchi2 = Newchi2
print "Best chi-square and step so far: ", Bestchi2, NewStep
acceptedpoints += 1
multiplicity = 0
# Updating number of accepted points.
axarr[0].clear()
axarr[0].set_xlim(-10, 10)
axarr[0].set_ylim(-50, 100)
axarr[0].errorbar(self.X, self.Y, self.delta, color='k', ms=8, ls='', marker='s')
axarr[0].plot(self.X, self.FittingFunction(OldStep), 'k', ls='-', lw=2)
axarr[0].set_title('$\mathtt{Step:\ %i,\ AccPoints: %i}$'%(i+1, acceptedpoints), fontsize=22)
xlist.append(OldStep[0])
ylist.append(OldStep[1])
axarr[1].plot(xlist[-3:-1], ylist[-3:-1], 'k', lw=0.2)
axarr[1].set_title('$\mathtt{m=%1.3f,\ c=%1.3f,\ \chi^2=%1.3f}$'%(OldStep[0], OldStep[1], Oldchi2), fontsize=22)
axarr[0].set_xlabel('$\mathtt{X}$', fontsize=22)
axarr[0].set_ylabel('$\mathtt{Y}$', fontsize=22)
plt.pause(self.delay)
plt.draw()
# Updating the old step.
OldStep = NewStep
Oldchi2 = Newchi2
else:
continue
# Writing Best chi-square of the full chain and the acceptance ratio.
print "Best chi square: %1.5f"%Bestchi2
print "Acceptance Ratio: %1.5f"%(float(acceptedpoints)/i)
axarr[0].clear()
axarr[0].set_xlim(-10, 10)
axarr[0].set_ylim(-50, 100)
axarr[0].errorbar(self.X, self.Y, self.delta, color='k', ms=8, ls='', marker='s')
axarr[0].plot(self.X, self.FittingFunction(BestStep), 'k', ls='-', lw=2)
axarr[0].set_title('$\mathtt{Step:\ %i,\ AccPoints: %i}$'%(i+1, acceptedpoints), fontsize=22)
axarr[1].plot(BestStep[0], BestStep[1], 'or', ms=12, label='$\mathtt{BestFit}$')
axarr[1].plot(self.m, self.c, 'og', ms=12, label='$\mathtt{Fiducial}$')
axarr[1].legend(loc=1, fontsize=18, numpoints=1)
axarr[1].set_title('$\mathtt{m=%1.3f,\ c=%1.3f,\ \chi^2=%1.3f}$'%(BestStep[0], BestStep[1], Bestchi2), fontsize=22)
axarr[0].set_xlabel('$\mathtt{X}$', fontsize=22)
axarr[0].set_ylabel('$\mathtt{Y}$', fontsize=22)
plt.show()
return float(acceptedpoints)/i
#----------------------------------------------------------
#==============================================================================
if __name__=="__main__":
if len(sys.argv) == 1:
alpha = 1.0
delay = 0.001
elif len(sys.argv) == 3:
alpha = float(sys.argv[1])
delay = float(sys.argv[2])
else:
print "Syntax: python anim.py <alpha> <delay>"
sys.exit()
co = Anim(alpha=alpha, delay=delay)
co.MainChain()
|
{"hexsha": "de66933c75708c9269eb026b37a3bb21f8dcaf0c", "size": 5757, "ext": "py", "lang": "Python", "max_stars_repo_path": "animation/anim.py", "max_stars_repo_name": "creativeishu/pychain", "max_stars_repo_head_hexsha": "c9b6a90510af6c8456bc0611117f4bc8e7661940", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "animation/anim.py", "max_issues_repo_name": "creativeishu/pychain", "max_issues_repo_head_hexsha": "c9b6a90510af6c8456bc0611117f4bc8e7661940", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "animation/anim.py", "max_forks_repo_name": "creativeishu/pychain", "max_forks_repo_head_hexsha": "c9b6a90510af6c8456bc0611117f4bc8e7661940", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8115942029, "max_line_length": 117, "alphanum_fraction": 0.6077818308, "include": true, "reason": "import numpy", "num_tokens": 1856}
|
#!/usr/bin/env python
"""
Example: inductive conformal regression using DecisionTreeRegressor
"""
# Authors: Henrik Linusson
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.datasets import load_boston
from nonconformist.base import RegressorAdapter
from nonconformist.icp import IcpRegressor
from nonconformist.nc import RegressorNc, AbsErrorErrFunc, RegressorNormalizer
# -----------------------------------------------------------------------------
# Setup training, calibration and test indices
# -----------------------------------------------------------------------------
data = load_boston()
idx = np.random.permutation(data.target.size)
train = idx[:int(idx.size / 3)]
calibrate = idx[int(idx.size / 3):int(2 * idx.size / 3)]
test = idx[int(2 * idx.size / 3):]
# -----------------------------------------------------------------------------
# Without normalization
# -----------------------------------------------------------------------------
# Train and calibrate
# -----------------------------------------------------------------------------
underlying_model = RegressorAdapter(DecisionTreeRegressor(min_samples_leaf=5))
nc = RegressorNc(underlying_model, AbsErrorErrFunc())
icp = IcpRegressor(nc)
icp.fit(data.data[train, :], data.target[train])
icp.calibrate(data.data[calibrate, :], data.target[calibrate])
# -----------------------------------------------------------------------------
# Predict
# -----------------------------------------------------------------------------
prediction = icp.predict(data.data[test, :], significance=0.1)
header = ['min','max','truth','size']
size = prediction[:, 1] - prediction[:, 0]
table = np.vstack([prediction.T, data.target[test], size.T]).T
df = pd.DataFrame(table, columns=header)
print(df)
# -----------------------------------------------------------------------------
# With normalization
# -----------------------------------------------------------------------------
# Train and calibrate
# -----------------------------------------------------------------------------
underlying_model = RegressorAdapter(DecisionTreeRegressor(min_samples_leaf=5))
normalizing_model = RegressorAdapter(KNeighborsRegressor(n_neighbors=1))
normalizer = RegressorNormalizer(underlying_model, normalizing_model, AbsErrorErrFunc())
nc = RegressorNc(underlying_model, AbsErrorErrFunc(), normalizer)
icp = IcpRegressor(nc)
icp.fit(data.data[train, :], data.target[train])
icp.calibrate(data.data[calibrate, :], data.target[calibrate])
# -----------------------------------------------------------------------------
# Predict
# -----------------------------------------------------------------------------
prediction = icp.predict(data.data[test, :], significance=0.1)
header = ['min','max','truth','size']
size = prediction[:, 1] - prediction[:, 0]
table = np.vstack([prediction.T, data.target[test], size.T]).T
df = pd.DataFrame(table, columns=header)
print(df)
|
{"hexsha": "96018e7d8f87b16d44b78d402b9f336f049f5839", "size": 3015, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/icp_regression_tree.py", "max_stars_repo_name": "smazzanti/nonconformist", "max_stars_repo_head_hexsha": "5ed072c82ae6d923eb6063a6a5c8fa664fbf729c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 301, "max_stars_repo_stars_event_min_datetime": "2015-03-18T01:47:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T14:50:55.000Z", "max_issues_repo_path": "examples/icp_regression_tree.py", "max_issues_repo_name": "smazzanti/nonconformist", "max_issues_repo_head_hexsha": "5ed072c82ae6d923eb6063a6a5c8fa664fbf729c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2016-07-25T21:36:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T00:39:07.000Z", "max_forks_repo_path": "examples/icp_regression_tree.py", "max_forks_repo_name": "smazzanti/nonconformist", "max_forks_repo_head_hexsha": "5ed072c82ae6d923eb6063a6a5c8fa664fbf729c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 91, "max_forks_repo_forks_event_min_datetime": "2015-03-19T08:25:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T08:31:38.000Z", "avg_line_length": 41.875, "max_line_length": 88, "alphanum_fraction": 0.5227197347, "include": true, "reason": "import numpy", "num_tokens": 556}
|
using LinearAlgebra
# degenerate cases (x₁ + x₂ + ⋯ + xₘ)¹
@test [multiexponents(1,1)...] == [[1]]
@test [multiexponents(2,1)...] == [[1,0],[0,1]]
@test [multiexponents(3,1)...] == [[1,0,0],[0,1,0],[0,0,1]]
@test hcat([multiexponents(10,1)...]...) == Matrix{Float64}(I, 10, 10)
# degenerate cases (x₁ + x₂ + ⋯ + xₘ)⁰
@test [multiexponents(1,0)...] == [[0]]
@test [multiexponents(2,0)...] == [[0,0]]
@test [multiexponents(3,0)...] == [[0,0,0]]
@test [multiexponents(10,0)...] == [zeros(Int, 10)]
# degenerate cases (x₁)ⁿ
@test [multiexponents(1,1)...] == [[1]]
@test [multiexponents(1,2)...] == [[2]]
@test [multiexponents(1,3)...] == [[3]]
@test [multiexponents(1,10)...] == [[10]]
# general cases
@test [multiexponents(3,2)...] == [[2,0,0],[1,1,0],[1,0,1],[0,2,0],[0,1,1],[0,0,2]]
@test [multiexponents(2,3)...] == [[3,0],[2,1],[1,2],[0,3]]
@test [multiexponents(2,2)...] == [[2,0],[1,1],[0,2]]
@test [multiexponents(3,3)...] == [[3,0,0],[2,1,0],[2,0,1],[1,2,0],[1,1,1],[1,0,2],[0,3,0],[0,2,1],[0,1,2],[0,0,3]]
|
{"hexsha": "141f72bf1127ca08a70b667546aeca6e8b8e3902", "size": 1016, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/multinomials.jl", "max_stars_repo_name": "Sedictious/Combinatorics.jl", "max_stars_repo_head_hexsha": "fa6c6a893ad5a419ffa56b3aacf64c1053cbc507", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 165, "max_stars_repo_stars_event_min_datetime": "2016-09-13T17:00:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T02:29:52.000Z", "max_issues_repo_path": "test/multinomials.jl", "max_issues_repo_name": "Sedictious/Combinatorics.jl", "max_issues_repo_head_hexsha": "fa6c6a893ad5a419ffa56b3aacf64c1053cbc507", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 86, "max_issues_repo_issues_event_min_datetime": "2016-08-25T18:09:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T12:58:42.000Z", "max_forks_repo_path": "test/multinomials.jl", "max_forks_repo_name": "Sedictious/Combinatorics.jl", "max_forks_repo_head_hexsha": "fa6c6a893ad5a419ffa56b3aacf64c1053cbc507", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2016-10-29T00:18:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-11T10:21:39.000Z", "avg_line_length": 39.0769230769, "max_line_length": 115, "alphanum_fraction": 0.5255905512, "num_tokens": 464}
|
/*
MIT License
Copyright (c) 2020 Group of Electronic Technology and Communications. University of A Coruna.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#define BOOST_NO_CXX11_SCOPED_ENUMS
#include <boost/asio.hpp>
#include <boost/asio/serial_port.hpp>
#include <boost/system/error_code.hpp>
#include <boost/system/system_error.hpp>
#include <boost/bind.hpp>
#include <boost/assert.hpp>
#include <boost/property_tree/xml_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/foreach.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/date_time/gregorian/gregorian.hpp>
#include "boost/thread/mutex.hpp"
#include "boost/thread/thread.hpp"
#include <ros/ros.h>
#include <sensor_msgs/Joy.h>
#include "ros/console.h"
#include <geometry_msgs/PoseWithCovarianceStamped.h>
#define STEP_SET 2
#define STEP_WALK 3
#define STEP_STOP 4
#define degreesToRadians(angleDegrees) (angleDegrees * M_PI / 180.0)
#define radiansToDegrees(angleRadians) (angleRadians *180.0/ M_PI)
class GazeboGuideRTLS
{
public:
GazeboGuideRTLS();
private:
void joyCallback(const sensor_msgs::Joy::ConstPtr& joy);
void publish(double x, double y, double z);
void currentStepEnded(void);
void startNextStep(void);
ros::NodeHandle mHandle, mNodeHandle;
int mStartButton;
bool mCanPublish, mCanStart;
std::string routeFile;
ros::Publisher mPosPublisher;
ros::Subscriber mJoySubscriber;
boost::mutex publish_mutex_;
ros::Timer mTimer, mStepTimer;
double mLinearFactor, mAngularFactor;
typedef struct
{
int type;
double time;
double x;
double y;
double z;
} route_step_t;
std::vector<route_step_t> mSteps;
int mCurrentStep;
double mInitTime, mEndTime;
double mCurrentX, mCurrentY, mCurrentZ, mCurrentAngle, mWalkSpeed, mRunSpeed;
};
GazeboGuideRTLS::GazeboGuideRTLS():
mHandle("~"),
mStartButton(5),
mLinearFactor(0.5),
mAngularFactor(2.0),
mWalkSpeed(0.85),
mRunSpeed(4.0)
{
mHandle.param("start_button", mStartButton, mStartButton);
mHandle.getParam("route_file", routeFile);
mCanStart = false;
mCurrentStep = 0;
//Cargamos el xml con la ruta
try {
//************************************
//Cargamos la configuracion de las suscripciones
//************************************
boost::property_tree::ptree routeTree;
std::stringstream ssRoute;
ssRoute << routeFile;
boost::property_tree::read_xml(ssRoute, routeTree);
BOOST_FOREACH(const boost::property_tree::ptree::value_type & v, routeTree.get_child("route")) {
if (v.first.compare("set") == 0) {
route_step_t routeStep;
routeStep.type = STEP_SET;
routeStep.x = v.second.get<double>("<xmlattr>.x", 1.0);
routeStep.y = v.second.get<double>("<xmlattr>.y", 1.0);
routeStep.z = v.second.get<double>("<xmlattr>.z", 1.0);
mSteps.push_back(routeStep);
} else if (v.first.compare("walk") == 0) {
route_step_t routeStep;
routeStep.type = STEP_WALK;
routeStep.x = v.second.get<double>("<xmlattr>.x", 1.0);
routeStep.y = v.second.get<double>("<xmlattr>.y", 1.0);
routeStep.z = v.second.get<double>("<xmlattr>.z", 1.0);
mSteps.push_back(routeStep);
} else if (v.first.compare("stop") == 0) {
route_step_t routeStep;
routeStep.type = STEP_STOP;
routeStep.time = v.second.get<double>("<xmlattr>.time", 1.0);
mSteps.push_back(routeStep);
}
}
} catch (boost::exception const &ex) {
ROS_INFO("Route read error");
}
ROS_INFO("ROUTE LOADED");
ROS_INFO("Num Steps: %d", (int) mSteps.size());
mPosPublisher = mNodeHandle.advertise<geometry_msgs::PoseWithCovarianceStamped>("/gtec/kfpos", 1000);
mJoySubscriber = mNodeHandle.subscribe<sensor_msgs::Joy>("joy", 10, &GazeboGuideRTLS::joyCallback, this);
}
void GazeboGuideRTLS::currentStepEnded(void) {
ROS_INFO("Num Steps: %d", (int) mSteps.size());
mCurrentStep += 1;
if (mCurrentStep < mSteps.size()) {
startNextStep();
}
}
void GazeboGuideRTLS::startNextStep(void) {
route_step_t step = mSteps[mCurrentStep];
if (step.type == STEP_SET) {
//Solo ponemos la posicion inicial para no tener q suscribirnos al topic de posicion
ROS_INFO("STEP SET");
mCurrentX = step.x;
mCurrentY = step.y;
mCurrentZ = step.z;
mStepTimer = mNodeHandle.createTimer(ros::Duration(0), boost::bind(&GazeboGuideRTLS::currentStepEnded, this), true);
return;
} else if (step.type == STEP_WALK) {
double toDestX = step.x - mCurrentX;
double toDestY = step.y - mCurrentY;
double toDestZ = step.z - mCurrentZ;
double distanceToMove = sqrt(pow(step.x - mCurrentX, 2) + pow(step.y - mCurrentY, 2)+ pow(step.z - mCurrentZ, 2));
double time = distanceToMove / mWalkSpeed;
mCurrentX = step.x;
mCurrentY = step.y;
mCurrentZ = step.z;
//Publish new position
publish(step.x, step.y, step.z);
//Timer to next step
mStepTimer = mNodeHandle.createTimer(ros::Duration(time), boost::bind(&GazeboGuideRTLS::currentStepEnded, this), true);
return;
} else if (step.type == STEP_STOP) {
//Timer to next step
mStepTimer = mNodeHandle.createTimer(ros::Duration(step.time), boost::bind(&GazeboGuideRTLS::currentStepEnded, this), true);
return;
}
}
void GazeboGuideRTLS::joyCallback(const sensor_msgs::Joy::ConstPtr& joy)
{
if (joy->buttons[mStartButton]) {
if (!mCanStart) {
ROS_INFO("Starting ROUTE -----------");
mCanStart = true;
startNextStep();
}
}
}
void GazeboGuideRTLS::publish(double x, double y, double z)
{
geometry_msgs::PoseWithCovarianceStamped pose;
pose.pose.pose.position.x = x;
pose.pose.pose.position.y = y;
pose.pose.pose.position.z = z;
pose.pose.pose.orientation.x = 0;
pose.pose.pose.orientation.y = 0;
pose.pose.pose.orientation.z = 0;
pose.pose.pose.orientation.w = 0;
pose.header.frame_id = "world";
pose.header.stamp = ros::Time::now();
mPosPublisher.publish(pose);
}
int main(int argc, char** argv)
{
ros::init(argc, argv, "gazebo_guide_rtls");
GazeboGuideRTLS gazebo_guide_rtls;
ros::spin();
}
|
{"hexsha": "d69e1c10e137997b05fbeb104903b5855b8dc8f5", "size": 7124, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/publishers/GazeboGuideRTLS.cpp", "max_stars_repo_name": "valentinbarral/gazebo2ros", "max_stars_repo_head_hexsha": "8b1db92056af1009cf60daf44503e695d6465532", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/publishers/GazeboGuideRTLS.cpp", "max_issues_repo_name": "valentinbarral/gazebo2ros", "max_issues_repo_head_hexsha": "8b1db92056af1009cf60daf44503e695d6465532", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/publishers/GazeboGuideRTLS.cpp", "max_forks_repo_name": "valentinbarral/gazebo2ros", "max_forks_repo_head_hexsha": "8b1db92056af1009cf60daf44503e695d6465532", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-09-22T06:44:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-22T06:44:36.000Z", "avg_line_length": 29.9327731092, "max_line_length": 128, "alphanum_fraction": 0.6960976979, "num_tokens": 1916}
|
// Copyright (C) 2018 Thejaka Amila Kanewala, Marcin Zalewski, Andrew Lumsdaine.
// Boost Software License - Version 1.0 - August 17th, 2003
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
// Authors: Thejaka Kanewala
// Andrew Lumsdaine
#include <boost/tuple/tuple.hpp>
#include <limits.h>
#include <boost/graph/agm/model/general_orderings.hpp>
#include <boost/graph/agm/model/eagm_buckets.hpp>
#include <boost/graph/agm/util/eagm_config.hpp>
#include <boost/thread/locks.hpp>
#include <am++/make_mpi_datatype.hpp>
#include <am++/am++.hpp>
#include <am++/mpi_transport.hpp>
#include <boost/graph/util/utils.hpp>
template<typename work_item>
class test_runtime {
private:
const int dummy;
int threads;
int numa_domains;
std::atomic<int> receives;
std::atomic<std::uint64_t> active_count;
std::atomic<int> activity_cnt;
amplusplus::transport& trans;
MPI_Datatype dt;
int num_syncs;
boost::shared_ptr<amplusplus::detail::barrier> t_bar;
std::vector< boost::shared_ptr<amplusplus::detail::barrier> > numa_thread_barriers;
int threads_per_numa;
public:
test_runtime(int _nthreads,
int _nnuma,
amplusplus::transport& _trans): dummy((amplusplus::register_mpi_datatype<work_item>(), 0)),
threads(_nthreads),
numa_domains(_nnuma),
receives(0),
active_count(0),
trans(_trans),
dt(amplusplus::get_mpi_datatype(amplusplus::detail::get_type_info<work_item>())),
num_syncs(0){
t_bar.reset(new amplusplus::detail::barrier(threads));
numa_thread_barriers.resize(numa_domains);
assert((threads % numa_domains) == 0); // for simplicity in tests
threads_per_numa = threads / numa_domains;
for(int i=0; i < numa_domains; ++i) {
numa_thread_barriers[i].reset(new amplusplus::detail::barrier(threads_per_numa));
}
}
void send(const work_item& wi, int tid) {
receives++;
std::cout << "R: (" << std::get<0>(wi) << ", "
<< std::get<1>(wi) << ")" << std::endl;
decrease_activity_count(tid, 1);
}
void initialize_per_thread(int tid){}
void register_receiver(std::function<void(const work_item&, int)> funcrev) {
// set message handler
}
int find_numa_node(int tid) {
return (tid / threads_per_numa);
}
int get_nnuma_nodes() {
return numa_domains;
}
inline int get_nthreads() {
return threads;
}
inline int get_nranks() {
return 1;
}
void do_all_gather(void* _ptosend,
int sendcnt,
void* _precv,
int recvcnt) {
++num_syncs;
MPI_Allgather(_ptosend, sendcnt, dt, _precv, recvcnt, dt, MPI_COMM_WORLD);
}
void increase_activity_count(int tid, uint64_t v) {
active_count.fetch_add(v);
}
void decrease_activity_count(int tid, uint64_t v) {
active_count.fetch_sub(v);
}
uint64_t get_activity_count() {
return active_count.load();
}
int get_nthreads_for_numa_domain(int tid) {
return threads_per_numa;
}
int get_thread_index_in_numa_domain(int tid) {
return (tid % threads_per_numa);
}
void wait_for_numa_domain_threads_to_reach_here(int tid) {
// numa domain
int d = tid / threads_per_numa;
numa_thread_barriers[d]->wait();
}
bool is_main_thread_in_numa_domain(int tid) {
return ((tid%threads_per_numa) == 0);
}
bool begin_epoch(int tid){
trans.begin_epoch();
}
bool is_epoch_completed(int tid) {
assert(false);
}
void end_epoch() {
trans.end_epoch();
}
unsigned long end_epoch_with_value(const unsigned long& _read_value) {
return trans.end_epoch_with_value(_read_value);
}
void pull_work(int tid) {}
//void synchronize(){}
void wait_for_threads_to_reach_here(int tid){
t_bar->wait();
}
bool is_main_thread(int tid) {
return (tid == 0);
}
//void synchronize_numa_domain_threads(int tid){}
int get_num_receives() {
return receives.load();
}
int get_num_syncs() {
return num_syncs;
}
};
typedef std::tuple<uint64_t, uint64_t> work_item_t;
void init_data(std::vector<work_item_t>& wis) {
work_item_t w1(0,5);
work_item_t w2(0,1);
work_item_t w3(0,9);
work_item_t w4(0,25);
work_item_t w5(0,15);
work_item_t w6(0,2);
work_item_t w7(0,34);
work_item_t w8(0,16);
work_item_t w9(0,8);
work_item_t w10(0,9);
work_item_t w11(8, 45);
wis.push_back(w1);
wis.push_back(w2);
wis.push_back(w3);
wis.push_back(w4);
wis.push_back(w5);
wis.push_back(w6);
wis.push_back(w7);
wis.push_back(w8);
wis.push_back(w9);
wis.push_back(w10);
wis.push_back(w11);
}
template<typename buckets>
class thread_executor {
private:
int nthreads;
buckets& all_bkts;
public:
thread_executor(int _n,
buckets& _all) : nthreads(_n),
all_bkts(_all){}
void operator()(int numa,
int tid,
std::vector<work_item_t>& wis) {
for (typename std::vector<work_item_t>::size_type i = tid ;
i < wis.size(); i+= nthreads) {
work_item_t& wi = wis[i];
all_bkts.push(wi, tid);
}
}
};
template<typename T1,
typename T2,
typename T3,
typename T4>
boost::graph::agm::eagm_configs<T1, T2, T3, T4> create_eagm_configs(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4) {
typedef boost::graph::agm::eagm_configs<T1, T2, T3, T4> eagm_config_t;
eagm_config_t config(_t1,
_t2,
_t3,
_t4);
return config;
}
template<typename T1,
typename T2,
typename T3,
typename T4>
boost::graph::agm::eagm_configs<T1, T2, T3, T4, boost::graph::agm::pq_container>
create_eagm_config_w_node_pq(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4) {
typedef boost::graph::agm::eagm_configs<T1, T2, T3, T4,
boost::graph::agm::pq_container> eagm_config_t;
eagm_config_t config(_t1,
_t2,
_t3,
_t4);
return config;
}
template<typename T1,
typename T2,
typename T3,
typename T4>
boost::graph::agm::eagm_configs<T1, T2, T3, T4,boost::graph::agm::buffer_container,
boost::graph::agm::pq_container>
create_eagm_config_w_numa_pq(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4) {
typedef boost::graph::agm::eagm_configs<T1, T2, T3, T4,
boost::graph::agm::buffer_container,
boost::graph::agm::pq_container> eagm_config_t;
eagm_config_t config(_t1,
_t2,
_t3,
_t4);
return config;
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void test_eagm(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
amplusplus::transport& trans,
int _numsyncs=1,
bool process=false) {
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4> ordered_config_t;
CHAOTIC_ORDERING_T ch;
ordered_config_t config = create_eagm_configs<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
debug("Setting threads to 1" );
trans.set_nthreads(1);
typedef test_runtime<work_item_t> runtime_t;
runtime_t rt(1, 1, trans);
typedef boost::graph::agm::eagm_traits<typename ordered_config_t::global_ordering_t,
typename ordered_config_t::node_ordering_t,
typename ordered_config_t::numa_ordering_t,
typename ordered_config_t::thread_ordering_t,
ordered_config_t,
runtime_t> eagm_traits_t;
typedef boost::graph::agm::eagm_buckets<work_item_t,
typename eagm_traits_t::root_level_bucket_trait> all_buckets_t;
all_buckets_t e(config, rt);
std::vector<work_item_t> wis;
init_data(wis);
int tid = 0;
amplusplus::detail::push_thread_id_obj tobj(tid);
rt.begin_epoch(0);
for (int i=0; i < wis.size(); i++) {
e.push(wis[i], tid);
}
e.print();
std::cout << "[===============================================================]" << std::endl;
if (process) {
e.process_global_buckets(tid);
std::cout << "Num receives : " << rt.get_num_receives() << ", wis size : " << wis.size()
<< ", num buckets (syncs) : " << rt.get_num_syncs()
<< std::endl;
assert(_numsyncs == rt.get_num_syncs());
assert((rt.get_num_receives() == wis.size()) && "Number of receives is not equal to sends");
}
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void test_eagm_with_node_pq(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
amplusplus::transport& trans,
int _numsyncs=1,
bool process=false) {
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4, boost::graph::agm::pq_container> ordered_config_t;
CHAOTIC_ORDERING_T ch;
ordered_config_t config = create_eagm_config_w_node_pq<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
debug("Setting threads to 1" );
trans.set_nthreads(1);
typedef test_runtime<work_item_t> runtime_t;
runtime_t rt(1, 1, trans);
typedef boost::graph::agm::eagm_traits<typename ordered_config_t::global_ordering_t,
typename ordered_config_t::node_ordering_t,
typename ordered_config_t::numa_ordering_t,
typename ordered_config_t::thread_ordering_t,
ordered_config_t,
runtime_t> eagm_traits_t;
typedef boost::graph::agm::eagm_buckets<work_item_t,
typename eagm_traits_t::root_level_bucket_trait> all_buckets_t;
all_buckets_t e(config, rt);
std::vector<work_item_t> wis;
init_data(wis);
int tid = 0;
amplusplus::detail::push_thread_id_obj tobj(tid);
rt.begin_epoch(0);
for (int i=0; i < wis.size(); i++) {
e.push(wis[i], tid);
}
e.print();
std::cout << "[===============================================================]" << std::endl;
if (process) {
e.process_global_buckets(tid);
std::cout << "Num receives : " << rt.get_num_receives() << ", wis size : " << wis.size()
<< ", num buckets (syncs) : " << rt.get_num_syncs()
<< std::endl;
assert(_numsyncs == rt.get_num_syncs());
assert((rt.get_num_receives() == wis.size()) && "Number of receives is not equal to sends");
}
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void test_eagm_with_numa_pq(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
amplusplus::transport& trans,
int _numsyncs=1,
bool process=false) {
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4,
boost::graph::agm::buffer_container,
boost::graph::agm::pq_container> ordered_config_t;
CHAOTIC_ORDERING_T ch;
ordered_config_t config = create_eagm_config_w_numa_pq<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
debug("Setting threads to 1" );
trans.set_nthreads(1);
typedef test_runtime<work_item_t> runtime_t;
runtime_t rt(1, 1, trans);
typedef boost::graph::agm::eagm_traits<typename ordered_config_t::global_ordering_t,
typename ordered_config_t::node_ordering_t,
typename ordered_config_t::numa_ordering_t,
typename ordered_config_t::thread_ordering_t,
ordered_config_t,
runtime_t> eagm_traits_t;
typedef boost::graph::agm::eagm_buckets<work_item_t,
typename eagm_traits_t::root_level_bucket_trait> all_buckets_t;
all_buckets_t e(config, rt);
std::vector<work_item_t> wis;
init_data(wis);
int tid = 0;
amplusplus::detail::push_thread_id_obj tobj(tid);
rt.begin_epoch(0);
for (int i=0; i < wis.size(); i++) {
e.push(wis[i], tid);
}
e.print();
std::cout << "[===============================================================]" << std::endl;
if (process) {
e.process_global_buckets(tid);
std::cout << "Num receives : " << rt.get_num_receives() << ", wis size : " << wis.size()
<< ", num buckets (syncs) : " << rt.get_num_syncs()
<< std::endl;
assert(_numsyncs == rt.get_num_syncs());
assert((rt.get_num_receives() == wis.size()) && "Number of receives is not equal to sends");
}
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void test_eagm_multiple_threads(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
int _threads) {
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4> ordered_config_t;
CHAOTIC_ORDERING_T ch;
ordered_config_t config = create_eagm_configs<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
typedef test_runtime<work_item_t> runtime_t;
runtime_t rt(_threads, 1);
typedef boost::graph::agm::eagm_traits<typename ordered_config_t::global_ordering_t,
typename ordered_config_t::node_ordering_t,
typename ordered_config_t::numa_ordering_t,
typename ordered_config_t::thread_ordering_t,
ordered_config_t,
runtime_t> eagm_traits_t;
typedef boost::graph::agm::eagm_buckets<work_item_t,
typename eagm_traits_t::root_level_bucket_trait> all_buckets_t;
all_buckets_t e(config, rt);
std::vector<work_item_t> wis;
init_data(wis);
for (int i=0; i < wis.size(); i++) {
e.push(wis[i], (i%_threads));
}
e.print();
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void test_eagm_multiple_numa_domains(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
int _threads,
int _numa) {
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4> ordered_config_t;
CHAOTIC_ORDERING_T ch;
ordered_config_t config = create_eagm_configs<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
typedef test_runtime<work_item_t> runtime_t;
runtime_t rt(_threads, _numa);
typedef boost::graph::agm::eagm_traits<typename ordered_config_t::global_ordering_t,
typename ordered_config_t::node_ordering_t,
typename ordered_config_t::numa_ordering_t,
typename ordered_config_t::thread_ordering_t,
ordered_config_t,
runtime_t> eagm_traits_t;
typedef boost::graph::agm::eagm_buckets<work_item_t,
typename eagm_traits_t::root_level_bucket_trait> all_buckets_t;
all_buckets_t e(config, rt);
std::vector<work_item_t> wis;
init_data(wis);
for (int i=0; i < wis.size(); i++) {
e.push(wis[i], (i%_threads));
}
e.print();
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void test_eagm_multiple_numa_domains_multiple_threads(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
int _threads,
int _numa) {
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4> ordered_config_t;
CHAOTIC_ORDERING_T ch;
ordered_config_t config = create_eagm_configs<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
typedef test_runtime<work_item_t> runtime_t;
runtime_t rt(_threads, _numa);
typedef boost::graph::agm::eagm_traits<typename ordered_config_t::global_ordering_t,
typename ordered_config_t::node_ordering_t,
typename ordered_config_t::numa_ordering_t,
typename ordered_config_t::thread_ordering_t,
ordered_config_t,
runtime_t> eagm_traits_t;
typedef boost::graph::agm::eagm_buckets<work_item_t,
typename eagm_traits_t::root_level_bucket_trait> all_buckets_t;
all_buckets_t e(config, rt);
std::vector<work_item_t> wis;
init_data(wis);
for (int i=0; i < wis.size(); i++) {
e.push(wis[i], i%_threads);
}
//e.print();
}
template<typename buckets, typename runtime>
class ampp_thread_executor {
private:
int nthreads;
buckets& all_bkts;
runtime& rt;
bool process;
public:
ampp_thread_executor(int _n,
buckets& _all,
runtime& _rt,
bool _p) : nthreads(_n),
all_bkts(_all),
rt(_rt),
process(_p){}
void operator()(int tid,
std::vector<work_item_t>& wis) {
// amplusplus::detail::push_thread_id_obj tobj(tid);
AMPLUSPLUS_WITH_THREAD_ID(tid) {
for (typename std::vector<work_item_t>::size_type i = tid ;
i < wis.size(); i+= nthreads) {
work_item_t& wi = wis[i];
// debug("Pushing workitems in tid : ", tid);
all_bkts.push(wi, tid);
}
if (process) {
// debug("Processing in tid : ", tid);
all_bkts.process_global_buckets(tid);
}
}
}
};
template<typename ordered_config_t>
void mt_general_test_eagm(ordered_config_t& config,
int _threads,
int _numa,
amplusplus::transport& trans,
int _numsyncs,
bool process) {
CHAOTIC_ORDERING_T ch;
debug("Setting threads to : ", _threads);
trans.set_nthreads(_threads);
typedef test_runtime<work_item_t> runtime_t;
runtime_t rt(_threads, _numa, trans);
debug("Runtime initialized ...");
typedef boost::graph::agm::eagm_traits<typename ordered_config_t::global_ordering_t,
typename ordered_config_t::node_ordering_t,
typename ordered_config_t::numa_ordering_t,
typename ordered_config_t::thread_ordering_t,
ordered_config_t,
runtime_t> eagm_traits_t;
typedef boost::graph::agm::eagm_buckets<work_item_t,
typename eagm_traits_t::root_level_bucket_trait> all_buckets_t;
all_buckets_t e(config, rt);
std::vector<work_item_t> wis;
init_data(wis);
ampp_thread_executor<all_buckets_t, runtime_t> te(_threads, e, rt, process);
boost::scoped_array<boost::thread> threads(new boost::thread[_threads - 1]);
for (int i = 0; i < _threads - 1; ++i) {
boost::thread thr(boost::ref(te), i+1, wis);
threads[i].swap(thr);
}
te(0, wis);
for (int i = 0; i < (_threads - 1); ++i)
threads[i].join();
trans.set_nthreads(1);
e.print();
if (process) {
std::cout << "Num receives : " << rt.get_num_receives() << ", wis size : " << wis.size()
<< ", num buckets (syncs) : " << rt.get_num_syncs()
<< std::endl;
assert(_numsyncs == rt.get_num_syncs());
assert((rt.get_num_receives() == wis.size()) && "Number of receives is not equal to sends");
}
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void mt_test_eagm(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
int _threads,
int _numa,
amplusplus::transport& trans,
int _numsyncs,
bool process = false) {
std::string name = "[Global:";/* + std::string(_t1.name()) +
+ ", Node:" + std::string(_t2.name()) + ", Numa:" + std::string(_t3.name())
+ ", Thread:" + std::string(_t4.name()) + "]";*/
std::cout << "Running : "
<< name << ", Threads : " << _threads
<< ", Numa domains : " << _numa
<< ", Process : " << process
<< std::endl;
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4> ordered_config_t;
CHAOTIC_ORDERING_T ch;
ordered_config_t config = create_eagm_configs<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
mt_general_test_eagm(config, _threads, _numa, trans, _numsyncs, process);
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void mt_test_eagm_with_node_pq(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
int _threads,
int _numa,
amplusplus::transport& trans,
int _numsyncs,
bool process = false) {
std::string name = "[Global:";/* + std::string(_t1.name()) +
+ ", Node:" + std::string(_t2.name()) + ", Numa:" + std::string(_t3.name())
+ ", Thread:" + std::string(_t4.name()) + "]";*/
std::cout << "Running With Node PQ : "
<< name << ", Threads : " << _threads
<< ", Numa domains : " << _numa
<< ", Process : " << process
<< std::endl;
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4,
boost::graph::agm::pq_container> ordered_config_t;
ordered_config_t config = create_eagm_config_w_node_pq<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
mt_general_test_eagm(config, _threads, _numa, trans, _numsyncs, process);
}
template<typename T1,
typename T2,
typename T3,
typename T4>
void mt_test_eagm_with_numa_pq(T1 _t1,
T2 _t2,
T3 _t3,
T4 _t4,
int _threads,
int _numa,
amplusplus::transport& trans,
int _numsyncs,
bool process = false) {
std::string name = "[Global:";/* + std::string(_t1.name()) +
+ ", Node:" + std::string(_t2.name()) + ", Numa:" + std::string(_t3.name())
+ ", Thread:" + std::string(_t4.name()) + "]";*/
std::cout << "Running With Node PQ : "
<< name << ", Threads : " << _threads
<< ", Numa domains : " << _numa
<< ", Process : " << process
<< std::endl;
typedef boost::graph::agm::eagm_configs<T1,
T2,
T3,
T4,
boost::graph::agm::buffer_container,
boost::graph::agm::pq_container> ordered_config_t;
ordered_config_t config = create_eagm_config_w_numa_pq<T1,
T2,
T3,
T4>(_t1,
_t2,
_t3,
_t4);
mt_general_test_eagm(config, _threads, _numa, trans, _numsyncs, process);
}
int main(int argc, char* argv[]) {
debug("Creating environment ...");
amplusplus::environment env = amplusplus::mpi_environment(argc, argv);
debug("Creating transport ...");
amplusplus::transport trans = env.create_transport();
debug("Transport created ...");
CHAOTIC_ORDERING_T ch;
DIJKSTRA_ORDERING_T dj;
DIJKSTRA_ORDERING_STD_PQ_T dj_std_pq;
DELTA_ORDERING_T delta(10);
DELTA_ORDERING_T delta1(20);
DELTA_ORDERING_T delta2(5);
std::cout << "Starting tests ..." << std::endl;
test_eagm(ch, ch, ch, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta, ch, ch, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta, ch, ch, dj, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta, delta2, ch, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta, delta2, ch, dj, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta, ch, delta2, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta, delta2, dj, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm(ch, delta, delta2, dj_std_pq, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta1, delta, delta2, dj_std_pq, trans);
std::cout << "======================================================" << std::endl;
test_eagm(ch, ch, ch, dj_std_pq, trans);
std::cout << "======================================================" << std::endl;
test_eagm(delta, ch, ch, dj_std_pq, trans);
std::cout << "======================================================" << std::endl;
test_eagm(ch, delta, ch, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm(ch, dj, ch, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm_with_node_pq(ch, dj_std_pq, ch, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm(ch, delta, ch, dj, trans);
std::cout << "======================================================" << std::endl;
test_eagm(ch, ch, dj, ch, trans);
std::cout << "======================================================" << std::endl;
test_eagm_with_numa_pq(ch, ch, dj_std_pq, ch, trans);
std::cout << "======================================================" << std::endl;
int threads = 4;
//test_eagm_multiple_threads(ch, ch, ch, ch, threads);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_threads(ch, ch, ch, dj_std_pq, threads);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_threads(delta, ch, ch, dj_std_pq, threads);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_threads(ch, delta, ch, ch, threads);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_threads(ch, delta, ch, dj, threads);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_threads(ch, ch, dj, ch, threads);
//std::cout << "======================================================" << std::endl;
int numa_domains = 2;
//test_eagm_multiple_numa_domains(ch, ch, ch, ch, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_numa_domains(ch, ch, dj, ch, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_numa_domains(delta, ch, ch, dj_std_pq, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_numa_domains(ch, delta, ch, ch, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_numa_domains(ch, delta, dj, ch, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//test_eagm_multiple_numa_domains(ch, ch, delta, dj, threads, numa_domains);
numa_domains = 2;
//std::cout << "======================================================" << std::endl;
//mt_test_eagm(ch, ch, delta, dj, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//mt_test_eagm(ch, ch, ch, ch, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//mt_test_eagm(ch, ch, ch, dj, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//mt_test_eagm(delta, ch, ch, dj, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//mt_test_eagm(ch, delta, ch, ch, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//mt_test_eagm(ch, delta, ch, dj, threads, numa_domains);
//std::cout << "======================================================" << std::endl;
//mt_test_eagm(ch, ch, dj, ch, threads, numa_domains);
// With processing
//test_eagm(ch, ch, ch, ch, trans, 1, true);
//std::cout << "======================================================" << std::endl;
//test_eagm(delta, ch, ch, ch, trans,5, true);
//std::cout << "======================================================" << std::endl;
//test_eagm(delta, ch, ch, dj, trans, 5, true);
//std::cout << "======================================================" << std::endl;
//test_eagm(delta, delta2, ch, ch, trans, 5, true);
//std::cout << "======================================================" << std::endl;
//test_eagm(delta, delta2, ch, dj, trans, 5, true);
// std::cout << "======================================================" << std::endl;
// test_eagm(delta, ch, delta2, ch, trans, 5, true);
// std::cout << "======================================================" << std::endl;
//test_eagm(delta, delta2, dj, ch, trans, 5, true);
//std::cout << "======================================================" << std::endl;
//test_eagm(ch, delta, delta2, dj_std_pq, trans, 1, true);
//std::cout << "======================================================" << std::endl;
//test_eagm(delta1, delta, delta2, dj_std_pq, trans, 3, true);
// std::cout << "======================================================" << std::endl;
//test_eagm(ch, ch, ch, dj_std_pq, trans, 1, true);
// std::cout << "======================================================" << std::endl;
//test_eagm(delta, ch, ch, dj_std_pq, trans, 5, true);
// std::cout << "======================================================" << std::endl;
//test_eagm(ch, delta, ch, ch, trans, 1, true);
// std::cout << "======================================================" << std::endl;
// test_eagm(ch, dj, ch, ch, trans, 1, true);
// std::cout << "======================================================" << std::endl;
// test_eagm_with_node_pq(ch, dj_std_pq, ch, ch, trans, 1, true);
// std::cout << "======================================================" << std::endl;
// test_eagm(ch, delta, ch, dj, trans, 1, true);
// std::cout << "======================================================" << std::endl;
// test_eagm(ch, ch, dj, ch, trans, 1, true);
// std::cout << "======================================================" << std::endl;
// test_eagm_with_numa_pq(ch, ch, dj_std_pq, ch, trans, 1, true);
// std::cout << "======================================================" << std::endl;
threads = 8;
numa_domains = 1;
int iterations = 3;
for (int i=0; i < iterations; ++i) {
for (int i=0; i < 4; ++i) {
threads = std::pow(2, i);
numa_domains = 1;
mt_test_eagm(ch, ch, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, delta, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, ch, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta, ch, ch, dj, threads, numa_domains, trans, 5, true);
mt_test_eagm(ch, delta, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, delta, ch, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, dj, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta, ch, ch, ch, threads, numa_domains, trans,5, true);
mt_test_eagm(delta, delta2, ch, ch, threads, numa_domains, trans, 5, true);
mt_test_eagm(delta, delta2, ch, dj, threads, numa_domains, trans, 5, true);
mt_test_eagm(delta, ch, delta2, ch, threads, numa_domains, trans, 5, true);
mt_test_eagm(delta, delta2, dj, ch, threads, numa_domains, trans, 5, true);
mt_test_eagm(ch, delta, delta2, dj_std_pq, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta1, delta, delta2, dj_std_pq, threads, numa_domains, trans, 3, true);
mt_test_eagm(ch, ch, ch, dj_std_pq, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta, ch, ch, dj_std_pq, threads, numa_domains, trans, 5, true);
mt_test_eagm(ch, delta, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, dj, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, delta, ch, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, dj, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm_with_node_pq(ch, dj_std_pq, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm_with_numa_pq(ch, ch, dj_std_pq, ch, threads, numa_domains, trans, 1, true);
}
for (int i=1; i < 4; ++i) {
threads = std::pow(2, i);
numa_domains = 2;
mt_test_eagm(ch, ch, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, delta, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, ch, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta, ch, ch, dj, threads, numa_domains, trans, 5, true);
mt_test_eagm(ch, delta, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, delta, ch, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, dj, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta, ch, ch, ch, threads, numa_domains, trans,5, true);
mt_test_eagm(delta, delta2, ch, ch, threads, numa_domains, trans, 5, true);
mt_test_eagm(delta, delta2, ch, dj, threads, numa_domains, trans, 5, true);
mt_test_eagm(delta, ch, delta2, ch, threads, numa_domains, trans, 5, true);
mt_test_eagm(delta, delta2, dj, ch, threads, numa_domains, trans, 5, true);
mt_test_eagm(ch, delta, delta2, dj_std_pq, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta1, delta, delta2, dj_std_pq, threads, numa_domains, trans, 3, true);
mt_test_eagm(ch, ch, ch, dj_std_pq, threads, numa_domains, trans, 1, true);
mt_test_eagm(delta, ch, ch, dj_std_pq, threads, numa_domains, trans, 5, true);
mt_test_eagm(ch, delta, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, dj, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, delta, ch, dj, threads, numa_domains, trans, 1, true);
mt_test_eagm(ch, ch, dj, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm_with_node_pq(ch, dj_std_pq, ch, ch, threads, numa_domains, trans, 1, true);
mt_test_eagm_with_numa_pq(ch, ch, dj_std_pq, ch, threads, numa_domains, trans, 1, true);
}
}
}
|
{"hexsha": "5db8e2de42f6b7708a23ba2fd03b134af1b83605", "size": 42175, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/graph_parallel/drivers/tests/static_eagm_tests.cpp", "max_stars_repo_name": "thejkane/AGM", "max_stars_repo_head_hexsha": "4d5cfe9522461d207ceaef7d90c1cd10ce9b469c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-09-03T10:22:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-03T10:22:04.000Z", "max_issues_repo_path": "libs/graph_parallel/drivers/tests/static_eagm_tests.cpp", "max_issues_repo_name": "thejkane/AGM", "max_issues_repo_head_hexsha": "4d5cfe9522461d207ceaef7d90c1cd10ce9b469c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/graph_parallel/drivers/tests/static_eagm_tests.cpp", "max_forks_repo_name": "thejkane/AGM", "max_forks_repo_head_hexsha": "4d5cfe9522461d207ceaef7d90c1cd10ce9b469c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5864592864, "max_line_length": 128, "alphanum_fraction": 0.4703260225, "num_tokens": 9920}
|
(*-------------Description ------------------------------------------------------
This file implements maps on lists. Here we define functions to calculate range
of a function on list l. We also define one-one function predicate and its boolean
counterpart.
Following are the notions defined in this file:
s_map f l : range set of f on list l
one_one_on l f : f is one one on l
one_one_onb l f : boolean counterpart of (one_one_on l f)
Lemma one_one_onP (l:list A) (f: A->B)(Hl: NoDup l):
reflect (one_one_on l f)(one_one_onb l f).
Furthermore, we have results relating the cardinality of domain and range
for various kinds of functions (many one/one one).
---------------------------------------------------------------------------------*)
Require Export SetReflect.
Require Export OrdList.
Require Export OrdSet.
Set Implicit Arguments.
Section Set_maps.
Context { A B: ordType }.
Lemma EM_A: forall x y: A, x=y \/ x<>y.
Proof. eauto. Qed.
Lemma EM_B: forall x y:B, x=y \/ x<>y.
Proof. eauto. Qed.
Fixpoint img (f:A->B) (l:list A): list B:= match l with
| nil => nil
| a1::l1 => add (f a1) (img f l1)
end.
Lemma IsOrd_img (f:A->B) (l:list A): IsOrd (img f l).
Proof. { induction l. simpl. constructor. simpl. eauto. } Qed.
Lemma NoDup_img (f:A->B) (l:list A): NoDup (img f l).
Proof. cut (IsOrd (img f l)). eauto. apply IsOrd_img. Qed.
Lemma img_intro1(f: A->B)(l: list A)(a:A)(y: B): In y (img f l)-> In y (img f (a::l)).
Proof. simpl. eapply set_add_intro1. Qed.
Lemma img_intro2 (f: A->B)(l: list A)(x:A): In x l -> In (f x) (img f l).
Proof. { induction l. simpl. tauto.
cut (x=a \/ x <> a).
intro H;destruct H as [Hl | Hr].
{ intro H. rewrite Hl. simpl. eapply set_add_intro2. auto. }
{ intro H. cut (In x l). intro H1. eapply img_intro1;eauto.
eapply in_inv2;eauto. } eauto. } Qed.
Lemma img_elim (f:A->B) (l: list A)(a0:A)(fa:B): In (fa) (img f (a0::l))->
fa = f(a0) \/ In fa (img f l).
Proof. simpl. eapply set_add_elim. Qed.
Lemma img_elim2 (f:A->B) (l: list A)(a0:A)(fa:B): In (fa) (img f (a0::l))->
fa <> f(a0) -> In fa (img f l).
Proof. simpl. eapply set_add_elim2. Qed.
Lemma img_elim3 (f:A->B)(l:list A)(a:A): ~ In a l -> In (f a) (img f l) ->
(exists y, In y l /\ f a = f y).
Proof. { intros H H1. induction l. inversion H1.
assert (H2: ~ In a l). intro H2; apply H. simpl;tauto.
cut ( f a = f a0 \/ f a <> f a0 ). intro H3; destruct H3 as [H3a | H3b]. exists a0.
split; auto. assert (H4: In (f a) (img f l)). eapply img_elim2.
eapply H1. exact H3b. assert (H5: exists y : A, In y l /\ f a = f y). eauto.
destruct H5 as [y0 H5]. exists y0. split; simpl. tauto. tauto.
eapply EM_B. } Qed.
Lemma img_elim4 (f: A->B)(l: list A)(b:B): In b (img f l)-> (exists a, In a l /\ b = f a).
Proof. { induction l.
{ simpl. tauto. }
{ intro H. apply img_elim in H as H1. destruct H1.
{ exists a. split;auto. }
{ apply IHl in H0 as H1.
destruct H1 as [a' H1]; destruct H1 as [H1 H2].
exists a'. split;auto. } } } Qed.
Hint Resolve IsOrd_img NoDup_img : core.
Hint Resolve img_intro1 img_intro2 img_elim: core.
Hint Resolve img_elim2 img_elim3 img_elim4: core.
Lemma funP (f: A->B)(x y: A): f x <> f y -> x <> y.
Proof. intros H H1. apply H;rewrite H1; auto. Qed.
Definition one_one (f: A->B): Prop:= forall x y, x <> y -> f x <> f y.
Lemma one_oneP1 (f:A->B): one_one f -> forall x y, f x = f y -> x =y.
Proof. { unfold one_one;intros H x y H1. elim (EM_A x y). tauto.
intro H2; absurd (f x = f y); auto. } Qed.
Hint Immediate one_oneP1: core.
Definition one_one_on (l: list A) (f: A-> B):Prop:= forall x y, In x l-> In y l -> x<>y -> f x <> f y.
Lemma one_one_on_elim (l:list A)(f: A-> B): one_one_on l f ->
(forall x y, In x l-> In y l-> f x = f y -> x = y).
Proof. { unfold one_one_on. intros H x y H1 H2. elim (EM_A x y). tauto.
intros H3 H4. absurd (f x = f y); auto. } Qed.
Lemma one_one_on_intro(l:list A)(f: A-> B): (forall x y, In x l-> In y l-> f x = f y -> x = y) ->
(one_one_on l f).
Proof. { intros H. unfold one_one_on.
intros x y H1 H2 H3 H4. apply H3. auto. } Qed.
Lemma one_one_on_nil (f:A->B): one_one_on nil f.
Proof. unfold one_one_on. intros x y H H0 H1 H2. inversion H. Qed.
Lemma one_one_on_intro1(l:list A) (f: A->B)(a:A):
(~ In (f a) (img f l)) -> (one_one_on l f) -> one_one_on (a::l) f.
Proof. { unfold one_one_on. intros H H1.
intros x y H2 H3. destruct H2; destruct H3.
rewrite <- H0; rewrite <- H2. tauto.
rewrite <- H0. intros H3 H4. assert (H5: In (f a) (img f l)). rewrite H4.
apply img_intro2;auto. absurd (In (f a) (img f l)); assumption.
rewrite <- H2. intros H3 H4. absurd (In (f a) (img f l)). assumption.
rewrite <- H4. apply img_intro2;auto. apply H1; auto. } Qed.
Lemma one_one_on_elim1 (l:list A) (f: A->B)(a: A): one_one_on (a::l) f -> one_one_on l f.
Proof. { unfold one_one_on. intro H. intros x y H1 H2. eapply H; auto. } Qed.
Lemma one_one_on_elim2 (l:list A) (f: A->B)(a: A)(Hl: NoDup (a::l)):
one_one_on (a::l) f -> ~ In (f a)(img f l).
Proof. { unfold one_one_on. intros H H1.
assert (H2: (exists y, In y l /\ f a = f y)).
{ eapply img_elim3. intro H2; inversion Hl;contradiction. auto. }
destruct H2 as [b H2]; destruct H2 as [H2 H3].
eapply H with (x:=a)(y:=b); auto. intro H4. rewrite <- H4 in H2.
inversion Hl;contradiction. } Qed.
Hint Immediate one_one_on_nil one_one_on_elim one_one_on_elim1 one_one_on_elim2 : core.
Hint Immediate one_one_on_intro one_one_on_intro1: core.
Fixpoint one_one_onb (l: list A) (f: A->B): bool:=
match l with
|nil => true
| a1::l1 => (negb ( memb (f a1) (img f l1))) && (one_one_onb l1 f)
end.
Lemma one_one_onP (l:list A) (f: A->B)(Hl: NoDup l):
reflect (one_one_on l f)(one_one_onb l f).
Proof. { apply reflect_intro. split.
{ induction l.
{ unfold one_one_onb. reflexivity. }
{ intro H. simpl one_one_onb. apply /andP. split. cut (~ In (f a)(img f l)).
intro H1. assert (H2: memb (f a) (img f l) = false ). apply /membP.
auto. rewrite H2. simpl. reflexivity. eapply one_one_on_elim2.
apply Hl. auto. apply IHl.
eauto. eauto. } }
{ induction l.
{ auto. }
{ simpl. move /andP. intro H; destruct H as [H H1].
apply one_one_on_intro1.
intro H2. unfold negb in H.
replace (memb (f a) (img f l)) with true in H. inversion H.
symmetry; apply /membP; eauto. apply IHl. eauto. apply H1. } } } Qed.
(*--------- Some more properties of imgs-----------------------------------*)
Lemma one_one_img_elim (l: list A)(f: A->B)(x: A):
one_one f -> In (f x) (img f l) -> In x l.
Proof. { intros H H1. assert (H2: exists a, In a l /\ f x = f a). auto.
destruct H2 as [a H2]. destruct H2 as [H2 H3].
cut (x = a). intros; subst x; auto. eauto. } Qed.
Lemma img_subset (l s: list A)(f: A->B): l [<=] s -> (img f l) [<=] (img f s).
Proof. { intros H fx H1. assert (H2: exists x, In x l /\ fx = f x). auto.
destruct H2 as [x H2]. destruct H2 as [H2 H3]. subst fx; auto. } Qed.
Lemma img_size_less (l: list A)(f: A->B): |img f l| <= |l|.
Proof. { induction l.
{ simpl;auto. }
{ simpl. assert (H: (| add (f a) (img f l) |) <= S (| img f l |)).
auto. omega. } } Qed.
Lemma img_size_same (l: list A)(f: A->B): NoDup l -> one_one_on l f-> |l|=| img f l|.
Proof. { induction l.
{ simpl. auto. }
{ intros H H1.
assert (Hl: NoDup l). eauto.
assert (H1a: one_one_on l f). eauto.
assert (H2: (| l |) = (| img f l |)). auto.
simpl. assert (H3: ~ In (f a) (img f l)). auto.
rewrite H2; symmetry;auto. } } Qed.
Hint Resolve img_subset img_size_less img_size_same: core.
Lemma img_strict_less (l: list A)(f: A->B):
NoDup l -> (|img f l| < |l|) -> ~ one_one_on l f.
Proof. intros H H1 H2. assert(H3: |l|=| img f l|). auto. omega. Qed.
Hint Immediate one_one_img_elim img_strict_less : core.
Lemma one_one_on_intro2 (l: list A)(f: A->B):
NoDup l -> (|img f l| = |l|)-> one_one_on l f.
Proof. { induction l.
{ simpl; auto. }
{ intros H H0.
assert (Ha: NoDup l). eauto.
assert (Hb: ~ In a l ). auto.
assert (H1: |img f l| = |l|).
{ match_up (| img f l |) (| l |).
{ auto. }
{ assert ((| img f (a :: l) |) <b (| a :: l |)).
{ move /ltP in H1. apply /ltP. simpl.
cut ((| add (f a) (img f l) |) <= S (|img f l|)). omega.
auto. } by_conflict. }
{ assert (H2: |img f l| <= |l|). auto.
move /lebP in H2. auto. } }
assert (H2: one_one_on l f). auto.
assert (H3: ~ In (f a) (img f l)).
{ intro H3.
assert (H4: img f (a :: l) = (img f l)).
{ simpl. eapply add_same. auto. auto. }
rewrite H4 in H0. rewrite H1 in H0. simpl in H0. omega. } auto. } } Qed.
Lemma one_one_on_intro3 (l s: list A)(f: A-> B): s [<=] l -> one_one_on l f -> one_one_on s f.
Proof. intros H0 H1; unfold one_one_on; auto. Qed.
Hint Immediate one_one_on_intro2 one_one_on_intro3 : core.
(* ------------ set maps and set add interaction ------------------------ *)
Lemma img_add (a: A)(l: list A)(f: A-> B): img f (add a l) = add (f a) (img f l).
Proof. { apply set_equal;auto.
induction l.
{ simpl. auto. }
{ simpl.
assert (H: img f (add a l) = add (f a) (img f l)).
apply set_equal; auto.
destruct IHl as [IHl IHl1]. match_up a a0.
{ subst a. simpl. auto. }
{ simpl. auto. }
{ simpl. rewrite H. auto. } } } Qed.
Lemma img_same (l: list A) (f g: A->B): (forall x, In x l -> f x = g x)-> (img f l = img g l).
Proof. { induction l.
{ simpl; auto. }
{ intro h1. simpl. replace (g a) with (f a). replace (img g l) with (img f l).
auto. apply IHl. intros x h2. apply h1; auto. apply h1; auto. } } Qed.
Hint Resolve img_add img_same: core.
Lemma img_inter1 (l s: list A)(f: A-> B): img f (l [i] s) [<=] (img f l) [i] (img f s).
Proof. Admitted.
Lemma img_inter2 (l s: list A)(f: A-> B): one_one_on l f -> one_one_on s f->
img f (l [i] s) = (img f l) [i] (img f s).
Proof. Admitted.
Lemma img_union (l s: list A)(f: A-> B): img f (l [u] s) = (img f l) [u] (img f s).
Proof. Admitted.
Lemma img_diff (l s: list A)(f: A-> B): one_one_on l f -> one_one_on s f->
img f (l [\] s) = (img f l) [\] (img f s).
Proof. Admitted.
Hint Resolve img_inter1 img_inter2 img_union img_diff: core.
End Set_maps.
Hint Resolve IsOrd_img NoDup_img : core.
Hint Resolve img_intro1 img_intro2 img_elim: core.
Hint Resolve img_elim2 img_elim3 img_elim4 : core.
Hint Immediate one_oneP1: core.
Hint Immediate one_one_on_nil one_one_on_elim one_one_on_elim1 one_one_on_elim2 : core.
Hint Immediate one_one_on_intro one_one_on_intro1: core.
Hint Resolve one_one_onP: core.
Hint Resolve img_subset img_size_less img_size_same: core.
Hint Immediate one_one_img_elim img_strict_less : core.
Hint Immediate one_one_on_intro2 one_one_on_intro3 : core.
Hint Resolve img_add img_same: core.
Hint Resolve img_inter1 img_inter2 img_union img_diff: core.
Section Map_composition.
Context {A B C: ordType}.
(*------------------------- A --f--> B --g--> C --------------------------------*)
Lemma range_of_range (l:list A)(f: A->B)(g: B->C):
img g (img f l) = img ( fun x => g (f x)) l.
Proof. { assert (H: Equal (img g (img f l)) (img ( fun x => g (f x)) l) ).
{ unfold Equal.
split.
{ unfold Subset. intros c Hc.
assert (Hb: exists b, In b (img f l) /\ c = g b). auto.
destruct Hb as [b Hb]. destruct Hb as [Hb Hb1].
assert (Ha: exists a, In a l /\ b = f a). auto.
destruct Ha as [a Ha]. destruct Ha as [Ha Ha1].
rewrite Hb1. set (gf := (fun x : A => g (f x))).
rewrite Ha1.
assert (H: (g (f a)) = (gf a)). unfold gf. auto.
rewrite H. eapply img_intro2. auto. }
{ unfold Subset. intros c Hc.
assert (Ha: exists a, In a l /\ c = g (f a)). auto.
destruct Ha as [a Ha]. destruct Ha as [Ha1 Ha2].
subst c. auto. } } auto. } Qed.
Hint Resolve range_of_range: core.
End Map_composition.
Hint Resolve range_of_range: core.
Section Maps_on_A.
Context {A: ordType}.
(*----------Identity map and its properties ---------------------------------*)
Definition id:= fun (x:A)=> x.
Lemma id_is_identity1 (l:list A) : l [=] img id l.
Proof. { induction l.
{ simpl. auto. }
{ simpl. split.
{ intros x h. destruct h as [h | h].
subst a. unfold id. auto.
cut (In x (img id l)). auto. apply IHl. auto. }
{ unfold id. fold id. intros x h.
cut (x=a \/ In x (img id l)).
intro h1. destruct h1 as [h1a | h1b].
subst x. all: auto. cut (In x l). auto. apply IHl. auto. } } } Qed.
Lemma id_is_identity (l:list A)(hl: IsOrd l): l = img id l.
Proof. { induction l.
{ simpl. auto. }
{ apply set_equal. auto. auto.
simpl. replace (img id l) with l.
split.
{ intros x h. destruct h as [h | h].
subst a. unfold id. auto. unfold id. auto. }
{ unfold id. intros x h. cut (x=a \/ In x l).
intro h1. destruct h1 as [h1a | h1b].
subst x. all: auto. } eauto. } } Qed.
Hint Immediate id_is_identity id_is_identity1: core.
End Maps_on_A.
Hint Immediate id_is_identity id_is_identity1: core.
|
{"author": "Abhishek-TIFR", "repo": "List-Set", "sha": "f22e828ca348c8317a5235491e7e1dac848a691f", "save_path": "github-repos/coq/Abhishek-TIFR-List-Set", "path": "github-repos/coq/Abhishek-TIFR-List-Set/List-Set-f22e828ca348c8317a5235491e7e1dac848a691f/SetMaps.v"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.