text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""
Custom JSON serializer for log entries.
Handles Module types for now, more can be added later.
"""
struct LogEntrySerialization <: CommonSerialization end
show_json(io::StructuralContext, ::LogEntrySerialization, m::Module) = show_json(io, LogEntrySerialization(), string(m))
show_json(io::StructuralContext, ::LogEntrySerialization, ptr::Ptr) = show_json(io, LogEntrySerialization(), string(ptr))
show_json(io::StructuralContext, ::LogEntrySerialization, sv::Core.SimpleVector) = show_json(io, LogEntrySerialization(), [sv...])
show_json(io::StructuralContext, ::LogEntrySerialization, typ::DataType) = show_json(io, LogEntrySerialization(), string(typ))
function show_json(io::StructuralContext, ::LogEntrySerialization, level::Logging.LogLevel)
levelstr = (level == Logging.Debug) ? "Debug" :
(level == Logging.Info) ? "Info" :
(level == Logging.Warn) ? "Warn" :
(level == Logging.Error) ? "Error" :
"LogLevel($(level.level))"
show_json(io, LogEntrySerialization(), levelstr)
end
function show_json(io::StructuralContext, ::LogEntrySerialization, exception::Tuple{Exception,Any})
iob = IOBuffer()
Base.show_exception_stack(iob, [exception])
show_json(io, LogEntrySerialization(), String(take!(iob)))
end
as_text(str::String) = str
function as_text(obj)
iob = IOBuffer()
lim = LimitIO(iob, 4*1024) # fixing it as of now to a large enough size for most use cases
try
show(lim, "text/plain", obj)
catch ex
if isa(ex, LimitIOException)
# ignore and take what was printed
print(iob, "...")
else
rethrow()
end
end
String(take!(iob))
end
"""
IndexedLogEntry represents a log entry as a dictionary and its
indexable attributes in a form that is useful to a logging sink.
The index part contains metadata that are to be indexed. Event metadata
consists of attributes like level, module, filepath, line, job id,
process id, user id, etc. It also includes application specific
keywords that the originating piece of code wishes to index.
Keywords that should be considered as metadata are indicated via the
`indexable` constructor parameter.
What metadata can be indexed depends on the type of sink and whether
it has support to index certain types of attributes. Attributes that
the sink can not index are made part of the message itself for storage.
The message part can contain the following keys unless they are empty:
- `metadata`: event metadata that could not be indexed
- `message`: the log message string
- `keywords`: any keywords provided
Constructor parameters:
- `log`: Named tuple containing args to the handle_message method, e.g.: (level, message, _module, group, id, file, line, kwargs)
- `indexable`: list of names from `log` and `log.kwargs` that should be included in the index
"""
struct IndexedLogEntry
index::Dict{Symbol,Any}
message::Dict{Symbol,Any}
end
function IndexedLogEntry(log, indexable::Vector{Symbol}=[:level, :module, :filepath, :line])
index = Dict{Symbol, Any}()
metadata = Dict{Symbol,Any}()
keywords = Dict(log.kwargs)
log_prop_names = propertynames(log)
for name in log_prop_names
(name === :kwargs) && continue # skip the kwargs, deal with that separately
(name === :message) && continue # skip message, we are dealing with that separately
((name in indexable) ? index : metadata)[name] = getproperty(log, name)
end
for name in keys(keywords)
(name in log_prop_names) && continue # avoid clobbering reserved names
if name in indexable
index[name] = keywords[name]
delete!(keywords, name)
end
end
message = Dict{Symbol,Any}()
messagestr = as_text(log.message)
isempty(messagestr) || (message[:message] = messagestr)
isempty(metadata) || (message[:metadata] = metadata)
isempty(keywords) || (message[:keywords] = keywords)
IndexedLogEntry(index, message)
end
message_string(entry::IndexedLogEntry, size_limit::Int, newline::Bool=false) = message_string(entry.message, size_limit, newline)
function message_string(message::Dict{Symbol,Any}, size_limit::Int, newline::Bool=false)
iob = IOBuffer()
lim = LimitIO(iob, size_limit)
try
JSON.show_json(lim, LogEntrySerialization(), message)
newline && write(lim, '\n')
catch ex
if isa(ex, LimitIOException)
if haskey(message, :keywords)
# strip off keywords (if any) and retry
delete!(message, :keywords)
return message_string(message, size_limit, newline)
elseif haskey(message, :message)
# strip off the message and retry (we retain only the metadata in the worst case)
delete!(message, :message)
return message_string(message, size_limit, newline)
end
end
rethrow(ex)
end
String(take!(iob))
end
|
{"hexsha": "0af22348f1333c367f3f0b037b3f4d040f62c2a9", "size": 5016, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/log_utils.jl", "max_stars_repo_name": "xgdgsc/LogRoller.jl", "max_stars_repo_head_hexsha": "84ea509933f3e4a1c8e04a2f5488d921457ff34d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/log_utils.jl", "max_issues_repo_name": "xgdgsc/LogRoller.jl", "max_issues_repo_head_hexsha": "84ea509933f3e4a1c8e04a2f5488d921457ff34d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/log_utils.jl", "max_forks_repo_name": "xgdgsc/LogRoller.jl", "max_forks_repo_head_hexsha": "84ea509933f3e4a1c8e04a2f5488d921457ff34d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8095238095, "max_line_length": 130, "alphanum_fraction": 0.6824162679, "num_tokens": 1168}
|
from pysumma.pysumma.Plotting import Plotting
from pysumma.pysumma.hovmoller import hovmoller
from pysumma.pysumma.layers import layers
from pysumma.pysumma.spatial import spatial
#(4) Display Plotting.py
##1 Display plot from summa_plot created by andrew bennett from UW
import pandas as pd
import xarray as xr
import geopandas as gp
import pandas
import matplotlib.pyplot as plt
from jupyterthemes import jtplot
#jtplot.style('gruvboxd')
jtplot.figsize(x=10,y=10)
### Case 1 - Read output netcdf file and create attribute.
ds = xr.open_dataset('./basinRunoff_2007-2008_distributedTopmodel_1.nc')
### Case 2 - using spatial.py to display output with shapefile.
gdf = gp.GeoDataFrame.from_file('./ReynoldsMountainEast.shp')
gdf.plot()
plt.show()
### Case 3 - using hovmoller.py to display 2D plot with time or hru and variable.
hovmoller(ds['scalarSWE'].isel(hru=slice(0,100)), xdim='hru', ydim='dayofyear')
plt.show()
### Case 4 - using layers.py to display output of variables related layers such as snow and soil.
ds = xr.open_dataset('./basinRunoff_2007-2008_distributedTopmodel_1.nc').isel(hru=0)
layers(ds.isel(time=slice(0,500)), 'mLayerVolFracWat')
plt.show()
##2 plotting line curve with variables(1D, 2D)
import seaborn as sns
import xarray as xr
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
P_file = Plotting('D:\\pysumma\\pysumma_alpha\\pysumma\\pysumma\\BasinRunoff_1dRichards.nc')
plot_info = P_file.open_netcdf()
# Attribute = dict(Text_Info.attrs)
# Dimensions = Text_Info.dims
# Data_variables = Text_Info.data_vars
plot_1D = P_file.plot_1d(plot_info, 8)
plt.show()
plot_1D_hru = P_file.plot_1d_hru(plot_info, 0, 17)
plt.show()
plot_1D_hru = P_file.plot_1d_hru(plot_info, 0, 33)
plt.show()
plot_1D_layer = P_file.plot_1d_layer(plot_info, 0, 21, 14)
plt.show()
plot_2D = P_file.plot_2d(plot_info, 0, 21, 25, 14)
plt.show()
|
{"hexsha": "635971c0371b0ab10933c6998ed6b3c642226c14", "size": 1902, "ext": "py", "lang": "Python", "max_stars_repo_path": "pysumma/pysumma/plot_sample.py", "max_stars_repo_name": "DavidChoi76/pysumma_alpha1", "max_stars_repo_head_hexsha": "c526ff85310524d07314ebfdc8699b61f9234087", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-01-09T02:07:02.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-27T16:12:49.000Z", "max_issues_repo_path": "pysumma/pysumma/plot_sample.py", "max_issues_repo_name": "DavidChoi76/pysumma_alpha1", "max_issues_repo_head_hexsha": "c526ff85310524d07314ebfdc8699b61f9234087", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2018-01-08T16:30:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-27T16:15:39.000Z", "max_forks_repo_path": "pysumma/pysumma/plot_sample.py", "max_forks_repo_name": "DavidChoi76/pysumma_alpha1", "max_forks_repo_head_hexsha": "c526ff85310524d07314ebfdc8699b61f9234087", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-01-10T21:06:59.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-11T23:57:16.000Z", "avg_line_length": 30.6774193548, "max_line_length": 97, "alphanum_fraction": 0.7712933754, "include": true, "reason": "import numpy", "num_tokens": 606}
|
from matplotlib import pyplot as plt, rcParams
from numpy import matlib
from scipy import signal
import json
import numpy as np
import warnings
warnings.filterwarnings("ignore") # Пока matplotlib < 3.3, будет рекомендовать
# use_line_collection в stem
def xcov(x, lags):
mean = np.mean(x)
return [
np.sum([
(x[n] - mean) * (x[n+lag] - mean)
for n in range(len(x) - np.abs(lag))
])
for lag in lags
]
rcParams['lines.linewidth'] = 1
# rcParams['scatter.edgecolors'] = 'r'
rcParams['axes.axisbelow'] = True
rcParams['axes.grid'] = True
print('ЛР №7. Дискретные сигналы')
if bool(input('Загрузить данные из data.json? [1/0]: ')):
with open('data.json', 'r') as file:
data = json.load(file)
for key, value in data.items(): # Задать глобальные переменные из словаря
globals()[key] = value # Не делайте в серьезных проектах
B = np.array([B_1, B_2, B_3])
w = np.array([w_1, w_2, w_3])
A = np.array([a_1, a_2, a_3])
else:
Nb = int(input('Nb = ')) # НОМЕР БРИГАДЫ
N = int(input('N = ')) # ДЛИНА ПОСЛЕДОВАТЕЛЬНОСТИ
T = float(input('T = ')) # ПЕРИОД ДИСКРЕТИЗАЦИИ
a = float(input('a = ')) # ОСНОВАНИЕ ДИСКРЕТНОЙ ЭКСПОНЕНТЫ
C = int(input('C = ')) # АМПЛИТУДА ДИСКРЕТНОГО ГАРМОНИЧЕСКОГО СИГНАЛА
w0 = float(input('w0 = ')) # ЧАСТОТА ДИСКРЕТНОГО ГАРМОНИЧЕСКОГО СИГНАЛА
m = int(input('m = ')) # ВЕЛИЧИНА ЗАДЕРЖКИ
U = int(input('U = ')) # АМПЛИТУДА ИМПУЛЬСА
n0 = int(input('n0 = ')) # МОМЕНТ НАЧАЛА ИМПУЛЬСА
n_imp = int(input('n_imp = ')) # ДЛИТЕЛЬНОСТЬ ИМПУЛЬСА
B = np.array(input('B = ')) # ВЕКТОР АМПЛИТУД
w = np.array(input('w = ')) # ВЕКТОР ЧАСТОТ
A = np.array(input('A = ')) # ВЕКТОР КОЭФФИЦИЕНТОВ ЛИНЕЙНОЙ КОМБИНАЦИИ
Mean = int(input('Mean = ')) # ЗАДАННОЕ МАТЕМАТИЧЕСКОЕ ОЖИДАНИЕ ШУМА
Var = int(input('Var = ')) # ЗАДАННАЯ ДИСПЕРСИЯ ШУМА
print('п1. Цифровой единичный импульс')
input('Для вывода ГРАФИКОВ цифрового единичного импульса нажмите <ENTER>')
n = np.arange(N) # Дискретное нормированное время
nT = T * n # Дискретное ненормированное время
u0 = np.concatenate(([1], np.zeros(N-1)))
plt.subplot(1, 2, 1)
plt.gcf().canvas.set_window_title('Digital Unit Impulse') # Заголовок окна
plt.stem(nT, u0, basefmt='')
plt.title('Digital Unit Impulse u0(nT)')
plt.xlabel('nT')
plt.subplot(1, 2, 2)
plt.stem(n, u0, basefmt='')
plt.title('Digital Unit Impulse u0(n)')
plt.xlabel('n')
plt.tight_layout() # Иначе заголовки налезают друг на друга
plt.show()
print('\n----------------------------------------')
print('п2. Цифровой единичный скачок')
input('Для вывода ГРАФИКОВ цифрового единичного скачка нажмите <ENTER>')
u1 = np.ones(N) # FIXME Может время меньше 0 показать?
plt.subplot(1, 2, 1)
plt.gcf().canvas.set_window_title('Digital Unit Step')
plt.stem(nT, u1, basefmt='')
plt.title('Digital Unit Impulse u0(nT)')
plt.xlabel('nT')
plt.subplot(1, 2, 2)
plt.stem(n, u1, basefmt='')
plt.title('Digital Unit Impulse u0(n)')
plt.xlabel('n')
plt.show()
print('\n----------------------------------------')
print('п3. Дискретная экспонента')
input('Для вывода ГРАФИКОВ дискретной экспоненты нажмите <ENTER>')
x1 = a ** n
plt.subplot(1, 2, 1)
plt.gcf().canvas.set_window_title('Discrete Exponent')
plt.stem(nT, x1, basefmt='')
plt.title('Discrete exponent x1(nT)')
plt.xlabel('nT')
plt.subplot(1, 2, 2)
plt.stem(n, x1, basefmt='')
plt.title('Discrete exponent x1(n)')
plt.xlabel('n')
plt.show()
print('\n----------------------------------------')
print('п4. Дискретный комплексный гармонический сигнал')
print('Для вывода ГРАФИКОВ вещественной и мнимой частей')
input('гармонического сигнала нажмите <ENTER>')
x2 = C * np.exp(1j * w0 * n)
plt.subplot(2, 1, 1)
plt.gcf().canvas.set_window_title('Discrete Harmonic Signal')
plt.title('Discrete Harmonic Signal: REAL [x2(n)]')
plt.stem(n, np.real(x2))
plt.subplot(2, 1, 2)
plt.title('Discrete Harmonic Signal: IMAG [x2(n)]')
plt.stem(n, np.imag(x2))
plt.tight_layout()
plt.show()
print('\n----------------------------------------')
print('п5. Задержанные последовательности')
input('Для вывода ГРАФИКОВ задержанных последовательностей нажмите <ENTER>')
u0_m = np.concatenate([np.zeros(m), u0[0:(N-m)]])
u1_m = np.concatenate([np.zeros(m), u1[0:(N-m)]])
x1_m = np.concatenate([np.zeros(m), x1[0:(N-m)]])
plt.subplot(3, 1, 1)
plt.gcf().canvas.set_window_title('Delayed Discrete Signal')
plt.stem(n, u0_m)
plt.title('Delayed Digital Unit Impulse u0(n-m)')
plt.subplot(3, 1, 2)
plt.stem(n, u1_m)
plt.title('Delayed Digital Unit Step u1(n-m)')
plt.subplot(3, 1, 3)
plt.stem(n, x1_m)
plt.title('Delayed Discrete Exponent x1(n-m)')
plt.tight_layout()
plt.show()
print('\n----------------------------------------')
print('п6. Дискретный прямоугольный импульс')
input('Для вывода ГРАФИКОВ дискретного прямоугольного импульса нажмите <ENTER>')
# В Python нет rectpuls
x3_1 = np.zeros(N)
x3_1[n0:n0+n_imp] = U
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('Discrete Rectangular Impulse')
plt.stem(n, x3_1)
plt.title('Discrete Rectangular Impulse x3 1(n)')
plt.xlabel('n')
plt.show()
print('\n----------------------------------------')
print('п7. Дискретный треугольный имульс')
input('Для вывода ГРАФИКА дискретного треугольного импульса нажмите <ENTER>')
x4 = signal.convolve(x3_1, x3_1) # Дискретный треугольный импульс
L = len(x4) # Длина сверки
n = np.array(range(L)) # Дискретное нормированное время
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('Discrete Triangular Impulse')
plt.title('Discrete Triangular Impulse x4(n)')
plt.stem(n, x4)
plt.xlabel('n')
plt.show()
print('\n----------------------------------------')
print('п8. Линейная комбинация дискретных гармонических сигналов')
input('Для вывода ГРАФИКОВ гармонических сигналов и их линейной комбинации нажмите <ENTER>')
n = np.array(range(5*N - 1))
xi = np.multiply(matlib.repmat(B, len(n), 1), np.sin(matlib.asmatrix(n).transpose() * w))
ai = matlib.repmat(A, len(n), 1)
x5 = np.sum(np.multiply(ai, xi), axis=1)
plt.subplot(4, 1, 1)
plt.gcf().canvas.set_window_title('Discrete Harmonic Signals and their Linear Combination')
plt.stem(n, xi[:, 0])
plt.title('First Discrete Harmonic Signal')
plt.subplot(4, 1, 2)
plt.stem(n, xi[:, 1])
plt.title('Second Discrete Harmonic Signal')
plt.subplot(4, 1, 3)
plt.stem(n, xi[:, 2])
plt.title('Third Discrete Harmonic Signal')
plt.subplot(4, 1, 4)
plt.title('Linear Combination x5(n)')
plt.stem(n, x5)
plt.xlabel('n')
plt.tight_layout()
plt.show()
input('Для вывода СРЕДНЕГО ЗНАЧЕНИЯ, ЭНЕРГИИ и СРЕДНЕЙ МОЩНОСТИ сигнала x5 нажмите <ENTER>')
mean_x5 = np.mean(x5)
E = np.sum(np.square(x5))
P = np.sum(np.square(x5)) / len(x5)
print(f"mean_x5 = {mean_x5}, E = {E}, P = {P}")
print('\n----------------------------------------')
print('п9. Дискретный гармонический сигнал с экспоненциальной огибающей')
input('Для вывода ГРАФИКА гармонического сигнала с экспоненциальной огибающей нажмите <ENTER>')
n = np.array(range(N)) # ДИСКРЕТНОЕ НОРМИРОВАННОЕ ВРЕМЯ
x = C * np.sin(w0 * n) # ДИСКРЕТНЫЙ ГАРМОНИЧЕСКИЙ СИГНАЛ
x6 = np.multiply(x, np.abs(a)**n)
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('Harmonic Signal with Exponential Envelope')
plt.stem(n, x6)
plt.title('Harmonic Signal with Exponential Envelope x6(n)')
plt.xlabel('n')
plt.show()
print('\n----------------------------------------')
print('п10. Периодическая последовательность дискретных прямоугольных импульсов')
input('Для вывода ГРАФИКА пяти периодов последовательности нажмите <ENTER>')
xp = np.concatenate([U * u1[0:n_imp], np.zeros(n_imp)]) # ПЕРИОД ПОСЛЕДОВАТЕЛЬНОСТИ
p = 5 # ЧИСЛО ПЕРИОДОВ
x7 = matlib.repmat(xp, 1, p)[0] # ПЕРИОДИЧЕСКАЯ ПОСЛЕДОВАТЕЛЬНОСТЬ
n = np.arange(len(x7)) # ДИСКРЕТНОЕ НОРМИРОВАННОЕ ВРЕМЯ
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('Periodic Sequence of Rectangular Impulses')
plt.stem(n, x7)
plt.title('Periodic Sequence of Rectangular Impulses x7(n)')
plt.show()
print('\n----------------------------------------')
print('п11. Равномерный белый шум')
input('Для вывода ОЦЕНОК МАТЕМАТИЧЕСКОГО ОЖИДАНИЯ и ДИСПЕРСИИ ШУМА нажмите <ENTER>')
r_uniform = np.random.rand(1, 1000)[0] # РАВНОМЕРНЫЙ БЕЛЫЙ ШУМ
mean_uniform = np.mean(r_uniform) # ОЦЕНКА МАТ. ОЖИДАНИЯ ШУМА
var_uniform = np.var(r_uniform) # ОЦЕНКА ДИСПЕРСИИ ШУМА
print(f'mean_uniform={mean_uniform}, var_uniform={var_uniform}')
input('Для вывода графика АВТОКОВАРИАЦИОННОЙ ФУНКЦИИ нажмите <ENTER>')
m = np.arange(-len(r_uniform), len(r_uniform)) # ВЕКТОР ДИСКРЕТНЫХ СДВИГОВ ДЛЯ АВТОКОВАРИАЦИОННОЙ ФУНКЦИИ
r_r_uniform = xcov(r_uniform, m) # ОЦЕНКА АВТОКОВАРИАЦИОННОЙ ФУНКЦИИ РАВНОМЕРНОГО БЕЛОГО ШУМА
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('Autocovariance Function of Uniform White Noise')
plt.stem(m, r_r_uniform, use_line_collection=True)
plt.title('Autocovariance Function of Uniform White Noise')
plt.xlabel('m')
plt.show()
print('\n----------------------------------------')
print('п12. Нормальный белый шум')
input('Для вывода ОЦЕНОК МАТЕМАТИЧЕСКОГО ОЖИДАНИЯ и ДИСПЕРСИИ шума нажмите <ENTER>')
r_norm = np.random.randn(1000) # РАВНОМЕРНЫЙ БЕЛЫЙ ШУМ
mean_norm = np.mean(r_norm) # ОЦЕНКА МАТ. ОЖИДАНИЯ ШУМА
var_norm = np.var(r_norm) # ОЦЕНКА ДИСПЕРСИИ ШУМА
print(f'mean_norm={mean_norm}, var_uniform={var_norm}')
input('Для вывода графика АКФ нажмите <ENTER>')
R_r_norm = np.correlate(r_norm, r_norm, mode='full') / len(r_norm)
m = np.arange(-len(r_norm), len(r_norm)-1)
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('ACF of White Gaussian Noise')
plt.stem(m, R_r_norm, use_line_collection=True)
plt.title('ACF of White Gaussian Noise')
plt.xlabel('m')
plt.show()
print('\n----------------------------------------')
print('п13. Аддитивная смесь дискретного гармонического сигнала с нормальным белым шумом')
input('Для вывода ГРАФИКА аддитивной смеси сигнала с шумом нажмите <ENTER>')
n = np.arange(N) # ДИСКРЕТНОЕ НОРМИРОВАННОЕ ВРЕМЯ
x8 = x + np.random.randn(N)
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('Mixture of Harmonic Signal')
plt.stem(n, x8)
plt.title('Mixture of Harmonic Signal and White Gaussian Noise x8(n)')
plt.xlabel('n')
plt.show()
print('\n----------------------------------------')
print('п14. АКФ аддитивной смеси дискретного гармонического сигнала с нормальным белым шумом')
input('Для вывода ГРАФИКА АКФ нажмите <ENTER>')
R = (1 / N) * np.correlate(x8, x8, mode='full')
m = np.arange(-(N), N-1)
plt.subplot(1, 1, 1)
plt.gcf().canvas.set_window_title('White Gaussian Noise ACF')
plt.stem(m, R)
plt.title('ACF R(m)')
plt.xlabel('m')
plt.show()
input('Для вывода ДИСПЕРСИИ аддитивной смеси сигнала с шумом и АКФ R(N) нажмите <ENTER>')
print(f"var_x8 = {np.var(x8)}")
print(f"R(N) = {R[N]}")
print('\n----------------------------------------')
print('п15. Нормальный белый шум с заданными статистическими характеристиками')
r_normMean = np.random.randn(1000) + Mean # НОРМАЛЬНЫЙ БЕЛЫЙ ШУМ С ЗАДАННЫМ МАТЕМАТИЧЕСКИМ ОЖИДАНИЕМ
r_normVar = np.sqrt(Var) * np.random.randn(1000) # НОРМАЛЬНЫЙ БЕЛЫЙ ШУМ С ЗАДАННОЙ ДИСПЕРСИЕЙ
r_normMeanVar = np.sqrt(Var) * np.random.randn(1000) + Mean
max_ = np.max([r_norm, r_normMean, r_normVar, r_normMeanVar]) # МАКСИМАЛЬНОЕ ЗНАЧЕНИЕ ШУМА СРЕДИ ЧЕТЫРЕХ ЕГО РАЗНОВИДНОСТЕЙ
input('Для вывода ГРАФИКОВ нормального белого шума нажмите <ENTER>')
plt.subplot(4, 1, 1)
plt.gcf().canvas.set_window_title('White Gaussian Noises with different statistics')
plt.plot(r_norm)
plt.title(f"Mean value = {np.mean(r_norm):.4f}, Variance = {np.var(r_norm):.4f}")
plt.ylim((-max_, max_))
plt.subplot(4, 1, 2)
plt.plot(r_normMean)
plt.title(f"Mean value = {np.mean(r_normMean):.4f}, Variance = {np.var(r_normMean):.4f}")
plt.ylim((-max_, max_))
plt.subplot(4, 1, 3)
plt.plot(r_normVar)
plt.title(f"Mean value = {np.mean(r_normVar):.4f}, Variance = {np.var(r_normVar):.4f}")
plt.ylim((-max_, max_))
plt.subplot(4, 1, 4)
plt.plot(r_normMeanVar)
plt.title(f"Mean value = {np.mean(r_normMeanVar):.4f}, Variance = {np.var(r_normMeanVar):.4f}")
plt.ylim((-max_, max_))
plt.tight_layout()
plt.show()
|
{"hexsha": "e799941a68432733a9fb7abe8321bf07ecefe743", "size": 12043, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lr_07.py", "max_stars_repo_name": "SqrtMinusOne/Digital_Signal_Processing", "max_stars_repo_head_hexsha": "fb746e771a33111e7f3df61beb282883c8d04b85", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lr_07.py", "max_issues_repo_name": "SqrtMinusOne/Digital_Signal_Processing", "max_issues_repo_head_hexsha": "fb746e771a33111e7f3df61beb282883c8d04b85", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-10-02T16:38:50.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-03T04:22:01.000Z", "max_forks_repo_path": "src/lr_07.py", "max_forks_repo_name": "SqrtMinusOne/Digital_Signal_Processing", "max_forks_repo_head_hexsha": "fb746e771a33111e7f3df61beb282883c8d04b85", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9417177914, "max_line_length": 123, "alphanum_fraction": 0.6754961388, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 4905}
|
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <yaml-cpp/yaml.h>
#include <a_star.hpp>
#include <algorithm>
#include <boost/functional/hash.hpp>
#include <boost/heap/fibonacci_heap.hpp>
#include <boost/program_options.hpp>
#include <iostream>
#include <numeric>
#include <vector>
using libMultiRobotPlanning::AStar;
using libMultiRobotPlanning::Neighbor;
using libMultiRobotPlanning::PlanResult;
#define OBSERVATION_R 9
struct State {
State(int x, int y) : x(x), y(y) {}
State() {}
State(const State&) = default;
State(State&&) = default;
State& operator=(const State&) = default;
State& operator=(State&&) = default;
bool operator==(const State& other) const {
return std::tie(x, y) == std::tie(other.x, other.y);
}
friend std::ostream& operator<<(std::ostream& os, const State& s) {
return os << "(" << s.x << "," << s.y << ")";
}
double x;
double y;
};
namespace std {
template <>
struct hash<State> {
size_t operator()(const State& s) const {
// http://boost.ez2learn.com/doc/html/hash/combine.html
size_t seed = 0;
boost::hash_combine(seed, s.x);
boost::hash_combine(seed, s.y);
return seed;
}
};
} // namespace std
struct TrainState {
std::vector<std::vector<int>> obs_map = std::vector<std::vector<int>>(
OBSERVATION_R,
std::vector<int>(OBSERVATION_R, 0)),
pos_map = std::vector<std::vector<int>>(
OBSERVATION_R,
std::vector<int>(OBSERVATION_R, 0)),
form_map = std::vector<std::vector<int>>(
OBSERVATION_R,
std::vector<int>(OBSERVATION_R, 0)),
goal_map = std::vector<std::vector<int>>(
OBSERVATION_R,
std::vector<int>(OBSERVATION_R, 0));
std::tuple<double, double, double> goal_vector;
};
enum class Action {
Up,
Down,
Left,
Right,
Wait,
};
std::ostream& operator<<(std::ostream& os, const Action& a) {
switch (a) {
case Action::Up:
os << "Up";
break;
case Action::Down:
os << "Down";
break;
case Action::Left:
os << "Left";
break;
case Action::Right:
os << "Right";
break;
case Action::Wait:
os << "Wait";
break;
}
return os;
}
class AstarEnvironment {
public:
AstarEnvironment(size_t dimx, size_t dimy,
std::unordered_set<State> obstacles, State goal)
: m_dimx(dimx),
m_dimy(dimy),
m_obstacles(std::move(obstacles)),
m_goal(std::move(goal)) // NOLINT
{}
int admissibleHeuristic(const State& s) {
return std::abs(s.x - m_goal.x) + std::abs(s.y - m_goal.y);
}
bool isSolution(const State& s) { return s == m_goal; }
void getNeighbors(const State& s,
std::vector<Neighbor<State, Action, int>>& neighbors) {
neighbors.clear();
State up(s.x, s.y + 1);
if (stateValid(up)) {
neighbors.emplace_back(Neighbor<State, Action, int>(up, Action::Up, 1));
}
State down(s.x, s.y - 1);
if (stateValid(down)) {
neighbors.emplace_back(
Neighbor<State, Action, int>(down, Action::Down, 1));
}
State left(s.x - 1, s.y);
if (stateValid(left)) {
neighbors.emplace_back(
Neighbor<State, Action, int>(left, Action::Left, 1));
}
State right(s.x + 1, s.y);
if (stateValid(right)) {
neighbors.emplace_back(
Neighbor<State, Action, int>(right, Action::Right, 1));
}
}
void onExpandNode(const State& /*s*/, int /*fScore*/, int /*gScore*/) {}
void onDiscover(const State& /*s*/, int /*fScore*/, int /*gScore*/) {}
public:
bool stateValid(const State& s) {
return s.x >= 0 && s.x < m_dimx && s.y >= 0 && s.y < m_dimy &&
m_obstacles.find(s) == m_obstacles.end();
}
private:
int m_dimx;
int m_dimy;
std::unordered_set<State> m_obstacles;
State m_goal;
};
class Environment {
public:
Environment(size_t dimx, size_t dimy, std::unordered_set<State> obstacles,
std::vector<State> states, std::vector<State> goals)
: m_dimx(dimx),
m_dimy(dimy),
m_obstacles(std::move(obstacles)),
m_poss(states),
m_starts(states),
m_goals(std::move(goals)) {
// std::cout << "Having built an environment containing "
// << m_obstacles.size()<< " Obstacles\t"
// << m_states.size()<<" Starting states\t"
// << m_goals.size()<<" Goal points\n";
holonomic_cost_map = std::vector<std::vector<std::vector<float>>>(
m_starts.size(),
std::vector<std::vector<float>>(m_dimx, std::vector<float>(m_dimy, 0)));
updateCostmap();
if (m_poss.size() != m_goals.size())
std::cout << "ERROR: start points size:" << m_poss.size()
<< " inequal of ending points size" << m_goals.size();
// for (size_t j=dimy; j--;)
// {
// for (size_t i=0;i<dimx;i++)
// std::cout<<map[i][j]<<" ";
// std::cout<<std::endl;
// }
}
Environment(std::string inputFile) {
YAML::Node config = YAML::LoadFile(inputFile);
const auto& dim = config["map"]["dimensions"];
m_dimx = dim[0].as<int>();
m_dimy = dim[1].as<int>();
for (const auto& node : config["map"]["obstacles"]) {
m_obstacles.insert(State(node[0].as<int>(), node[1].as<int>()));
}
for (const auto& node : config["agents"]) {
const auto& start = node["start"];
const auto& goal = node["goal"];
m_starts.emplace_back(State(start[0].as<int>(), start[1].as<int>()));
m_poss.emplace_back(State(start[0].as<int>(), start[1].as<int>()));
m_goals.emplace_back(State(goal[0].as<int>(), goal[1].as<int>()));
}
holonomic_cost_map = std::vector<std::vector<std::vector<float>>>(
m_starts.size(),
std::vector<std::vector<float>>(m_dimx, std::vector<float>(m_dimy, 0)));
updateCostmap();
std::cout << "\nHaving built an environment from file: " << inputFile
<< "\nContaining " << m_obstacles.size() << " Obstacles "
<< m_starts.size() << " Starting points " << m_goals.size()
<< " Goal points\n";
}
Environment(const Environment&) = delete;
Environment& operator=(const Environment&) = delete;
struct compare_node {
bool operator()(const std::pair<State, double>& n1,
const std::pair<State, double>& n2) const {
return (n1.second > n2.second);
}
};
void updateCostmap() {
boost::heap::fibonacci_heap<std::pair<State, double>,
boost::heap::compare<compare_node>>
heap;
for (size_t i = 0; i < m_goals.size(); i++) {
heap.clear();
int goal_x = (int)m_goals[i].x;
int goal_y = (int)m_goals[i].y;
heap.push(std::make_pair(State(goal_x, goal_y), 0));
while (!heap.empty()) {
std::pair<State, double> node = heap.top();
heap.pop();
int x = node.first.x;
int y = node.first.y;
for (int dx = -1; dx <= 1; dx++)
for (int dy = -1; dy <= 1; dy++) {
if (abs(dx) == abs(dy)) continue;
int new_x = x + dx;
int new_y = y + dy;
if (new_x == goal_x && new_y == goal_y) continue;
if (new_x >= 0 && new_x < (int)m_dimx && new_y >= 0 &&
new_y < (int)m_dimy &&
holonomic_cost_map[i][new_x][new_y] == 0 &&
m_obstacles.find(State(new_x, new_y)) == m_obstacles.end()) {
// FIXME: +1
holonomic_cost_map[i][new_x][new_y] =
holonomic_cost_map[i][x][y] + 1;
heap.push(std::make_pair(State(new_x, new_y),
holonomic_cost_map[i][new_x][new_y]));
}
}
}
for (auto it = m_obstacles.begin(); it != m_obstacles.end(); it++) {
holonomic_cost_map[i][it->x][it->y] = 1000;
}
}
// for (size_t idx = 0; idx < m_goals.size(); idx++) {
// for (size_t i = 0; i < m_dimx; i++) {
// for (size_t j = 0; j < m_dimy; j++)
// std::cout << holonomic_cost_map[idx][i][j] << "\t";
// std::cout << std::endl;
// }
// std::cout << "----------------------\n";
// }
}
auto getEnviromentParam() {
return std::make_tuple(m_starts.size(), m_dimx, m_dimy);
}
std::vector<bool> isSolution(
std::vector<State> states = std::vector<State>()) {
if (states.empty()) {
// Using default input
states = m_poss;
}
std::vector<bool> result;
for (auto it = states.begin(); it != states.end(); ++it) {
if (!(*it.base() == m_goals.at(it - states.begin())))
result.emplace_back(false);
else
result.emplace_back(true);
}
return result;
}
double ProcrustesDistance(std::vector<State> states1,
std::vector<State> states2) {
// https://en.wikipedia.org/wiki/Procrustes_analysis
// Translation
// std::cout<<states1[0]<<" "<<states1[1]<<" "<<states2[0]<<"
// "<<states2[1]<<std::endl;
auto acc_x = [](int sum, State s) { return sum + s.x; };
auto acc_y = [](int sum, State s) { return sum + s.y; };
double mean1_x =
std::accumulate(states1.begin(), states1.end(), 0.0, acc_x) /
states1.size();
double mean1_y =
std::accumulate(states1.begin(), states1.end(), 0.0, acc_y) /
states1.size();
std::for_each(states1.begin(), states1.end(),
[&mean1_x, &mean1_y](State& s) {
s.x -= mean1_x;
s.y -= mean1_y;
});
double mean2_x =
std::accumulate(states2.begin(), states2.end(), 0.0, acc_x) /
states2.size();
double mean2_y =
std::accumulate(states2.begin(), states2.end(), 0.0, acc_y) /
states2.size();
std::for_each(states2.begin(), states2.end(),
[&mean2_x, &mean2_y](State& s) {
s.x -= mean2_x;
s.y -= mean2_y;
});
// Uniform scaling is NOT USED here
// Rotation
// std::cout<<states1[0]<<" "<<states1[1]<<" "<<states2[0]<<"
// "<<states2[1]<<std::endl;
double theta, numerator = 0.0, denominator = 0.0;
for (size_t i = 0; i < states1.size(); i++) {
numerator += (states2[i].x * states1[i].y - states2[i].y * states1[i].x);
denominator +=
(states2[i].x * states1[i].x + states2[i].y * states1[i].y);
}
theta = atan2(numerator, denominator);
for (size_t i = 0; i < states2.size(); i++) {
State temps;
temps.x = cos(theta) * states2[i].x - sin(theta) * states2[i].y;
temps.y = sin(theta) * states2[i].x + cos(theta) * states2[i].y;
states2[i] = temps;
}
// std::cout<<states1[0]<<" "<<states1[1]<<" "<<states2[0]<<"
// "<<states2[1]<<std::endl; Shape comparison
double d = 0;
for (size_t i = 0; i < states1.size(); i++) {
// std::cout<<"d"<<d;
d += pow((states1[i].x - states2[i].x), 2) +
pow((states1[i].y - states2[i].y), 2);
}
d = d < 1e-9 ? 0 : sqrt(d);
// std::cout << "Result:" << d <<std::endl;
return d;
}
auto getStateRewards(bool valid) {
double formation_loss = ProcrustesDistance(m_poss, m_goals);
std::vector<bool> isGoal = isSolution(m_poss);
std::tuple<bool, std::vector<bool>, double> reward =
std::make_tuple(valid, isGoal, formation_loss);
std::vector<TrainState> tState;
for (auto it = m_poss.begin(); it != m_poss.end(); it++) {
TrainState tempState;
// obs_map
for (size_t i = 0; i < OBSERVATION_R; i++)
for (size_t j = 0; j < OBSERVATION_R; j++) {
State index(it->x + i - (int)OBSERVATION_R / 2,
it->y + j - (int)OBSERVATION_R / 2);
if (stateValid(index))
tempState.obs_map[i][j] = 0;
else
tempState.obs_map[i][j] = 1;
}
// pos_map
for (auto p_it = m_poss.begin(); p_it != m_poss.end(); p_it++) {
int index_x = p_it->x - it->x;
int index_y = p_it->y - it->y;
if (abs(index_x) <= OBSERVATION_R / 2 &&
abs(index_y) <= OBSERVATION_R / 2)
tempState.pos_map[index_x + (int)OBSERVATION_R / 2]
[index_y + (int)OBSERVATION_R / 2] =
p_it - m_poss.begin() + 1;
}
// cost_map
for (size_t i = 0; i < OBSERVATION_R; i++)
for (size_t j = 0; j < OBSERVATION_R; j++) {
State index(it->x + i - (int)OBSERVATION_R / 2,
it->y + j - (int)OBSERVATION_R / 2);
if (stateValid(index))
tempState.goal_map[i][j] =
holonomic_cost_map[it - m_poss.begin()][index.x][index.y];
else
tempState.goal_map[i][j] = 1000;
}
// other agent goal mark as not valid
for (auto p_it = m_goals.begin(); p_it != m_goals.end(); p_it++) {
if (it - m_poss.begin() != p_it - m_goals.begin()) {
if (m_poss[p_it - m_goals.begin()] == *p_it) {
// std::cout << " \033[33m WARNING :An agent " << it -
// m_poss.begin()
// << "has reaches it goal!\033[0m" << std::endl;
int index_x = p_it->x - it->x;
int index_y = p_it->y - it->y;
if (abs(index_x) <= OBSERVATION_R / 2 &&
abs(index_y) <= OBSERVATION_R / 2)
tempState.goal_map[index_x + (int)OBSERVATION_R / 2]
[index_y + (int)OBSERVATION_R / 2] = 3000;
}
}
}
// form_map
for (auto p_it = m_goals.begin(); p_it != m_goals.end(); p_it++) {
int index = it - m_poss.begin();
int index_x = p_it->x - m_goals[index].x;
int index_y = p_it->y - m_goals[index].y;
if (abs(index_x) <= OBSERVATION_R / 2 &&
abs(index_y) <= OBSERVATION_R / 2)
tempState.form_map[index_x + (int)OBSERVATION_R / 2]
[index_y + (int)OBSERVATION_R / 2] =
p_it - m_goals.begin() + 1;
}
// goal_vector
int index = it - m_poss.begin();
int goal_dx = m_goals[index].x - it->x;
int goal_dy = m_goals[index].y - it->y;
double magnitude = sqrt(pow(goal_dx, 2) + pow(goal_dy, 2));
if (magnitude < 1e-8)
tempState.goal_vector = std::make_tuple(0, 0, 0);
else
tempState.goal_vector =
std::make_tuple((double)goal_dx / magnitude,
(double)goal_dy / magnitude, magnitude);
tState.emplace_back(tempState);
}
// for (auto it = tState.begin(); it != tState.end(); it++) {
// std::cout << "\n--------------id:" << it - tState.begin()
// << "------------" << std::endl;
// for (size_t j = OBSERVATION_R; j--;) {
// for (size_t i = 0; i < OBSERVATION_R; i++)
// std::cout << it->obs_map[i][j] << " ";
// std::cout << std::endl;
// }
// std::cout << std::endl;
// for (size_t j = OBSERVATION_R; j--;) {
// for (size_t i = 0; i < OBSERVATION_R; i++)
// std::cout << it->pos_map[i][j] << " ";
// std::cout << std::endl;
// }
// std::cout << std::endl;
// for (size_t j = OBSERVATION_R; j--;) {
// for (size_t i = 0; i < OBSERVATION_R; i++)
// std::cout << it->goal_map[i][j] << "\t";
// std::cout << std::endl;
// }
// std::cout << std::endl;
// for (size_t j = OBSERVATION_R; j--;) {
// for (size_t i = 0; i < OBSERVATION_R; i++)
// std::cout << it->form_map[i][j] << "\t";
// std::cout << std::endl;
// }
// std::cout << std::endl;
// std::cout << "goal vector:" << std::get<0>(it->goal_vector) << " "
// << std::get<1>(it->goal_vector) << "@"
// << std::get<2>(it->goal_vector) << std::endl;
// }
return std::make_tuple(tState, reward);
}
auto getGlobalState() {
std::vector<std::vector<int>> obs_map = std::vector<std::vector<int>>(
m_dimx, std::vector<int>(m_dimy, 0)),
pos_map = std::vector<std::vector<int>>(
m_dimx, std::vector<int>(m_dimy, 0)),
goal_map = std::vector<std::vector<int>>(
m_dimx, std::vector<int>(m_dimy, 0));
for (size_t i = 0; i < m_dimx; i++)
for (size_t j = 0; j < m_dimy; j++) {
if (stateValid(State(i, j)))
obs_map[i][j] = 0;
else
obs_map[i][j] = 1;
}
for (auto it = m_poss.begin(); it != m_poss.end(); it++) {
pos_map[it->x][it->y] = it - m_poss.begin() + 1;
}
for (auto it = m_goals.begin(); it != m_goals.end(); it++) {
goal_map[it->x][it->y] = it - m_goals.begin() + 1;
}
// std::cout << "-------------------GLOBAL---------------------\n";
// for (size_t j = m_dimy; j--;) {
// for (size_t i = 0; i < m_dimx; i++) std::cout << obs_map[i][j] << "
// "; std::cout << std::endl;
// }
// std::cout << std::endl;
// for (size_t j = m_dimy; j--;) {
// for (size_t i = 0; i < m_dimx; i++) std::cout << pos_map[i][j] << "
// "; std::cout << std::endl;
// }
// std::cout << std::endl;
// for (size_t j = m_dimy; j--;) {
// for (size_t i = 0; i < m_dimx; i++) std::cout << goal_map[i][j] << "
// "; std::cout << std::endl;
// }
// std::cout << std::endl;
return std::make_tuple(obs_map, pos_map, goal_map);
}
auto reset() {
m_poss.clear();
m_poss = m_starts;
bool valid = true;
return getStateRewards(valid);
}
/**
* update states via actions
* actions: input actions of this time stamp
* valid: if this update valid (such as collide with obstacles and other
*agents) nextStates: states after this update reward: reward of this action
**/
auto update(const std::vector<Action> actions) {
if (actions.size() != m_poss.size())
std::cout << "\033[1m\033[31m"
<< "WARNING: The input action size NOT equal with agent "
"size!!\033[0m\n";
bool valid = true;
std::vector<State> nextStates;
nextStates.clear();
for (auto it = actions.begin(); it != actions.end(); it++) {
State tempState;
int index = it - actions.begin();
switch (*it.base()) {
case Action::Up:
tempState = State(m_poss[index].x, m_poss[index].y + 1);
break;
case Action::Down:
tempState = State(m_poss[index].x, m_poss[index].y - 1);
break;
case Action::Left:
tempState = State(m_poss[index].x - 1, m_poss[index].y);
break;
case Action::Right:
tempState = State(m_poss[index].x + 1, m_poss[index].y);
break;
case Action::Wait:
tempState = State(m_poss[index].x, m_poss[index].y);
break;
default:
std::cout << "Warning: actions has unrecognize actions type!\n";
break;
}
if (std::find(nextStates.begin(), nextStates.end(), tempState) !=
nextStates.end()
// vertex conflict
|| !stateValid(tempState)
// obstacle conflict
) {
valid = false;
}
// edge conflict
auto find_it =
std::find(nextStates.begin(), nextStates.end(), m_poss[index]);
if (find_it != nextStates.end()) {
int i = std::distance(nextStates.begin(), find_it);
if (tempState == m_poss[i]) valid = false;
}
nextStates.emplace_back(tempState);
} // end for
m_poss.clear();
m_poss = nextStates;
return getStateRewards(valid);
}
private:
bool stateValid(const State& s) {
return s.x >= 0 && s.x < m_dimx && s.y >= 0 && s.y < m_dimy &&
m_obstacles.find(s) == m_obstacles.end();
}
private:
size_t m_dimx;
size_t m_dimy;
std::unordered_set<State> m_obstacles;
std::vector<State> m_poss;
std::vector<State> m_starts;
std::vector<State> m_goals;
std::vector<std::vector<std::vector<float>>> holonomic_cost_map;
// std::vector< std::vector<int> > m_heuristic;
};
PYBIND11_MODULE(train_env, m) {
m.doc() =
"An environment for Reinforcement Learning to train multi-agent "
"formation problem.";
namespace py = pybind11;
py::class_<State>(m, "State")
.def(py::init<int, int>())
.def_readwrite("x", &State::x)
.def_readwrite("y", &State::y);
py::class_<TrainState>(m, "TrainState")
.def(py::init())
.def_readwrite("obs_map", &TrainState::obs_map)
.def_readwrite("pos_map", &TrainState::pos_map)
.def_readwrite("form_map", &TrainState::form_map)
.def_readwrite("goal_map", &TrainState::goal_map)
.def_readwrite("goal_vector", &TrainState::goal_vector);
py::enum_<Action>(m, "Action")
.value("Up", Action::Up)
.value("Down", Action::Down)
.value("Left", Action::Left)
.value("Right", Action::Right)
.value("Wait", Action::Wait)
.export_values();
py::class_<Neighbor<State, Action, int>>(m, "Neighbor")
.def(py::init<State, Action, int>());
py::class_<PlanResult<State, Action, int>>(m, "PlanResult")
.def(py::init())
.def_readwrite("cost", &PlanResult<State, Action, int>::cost)
.def_readwrite("fmin", &PlanResult<State, Action, int>::fmin)
.def_readwrite("states", &PlanResult<State, Action, int>::states)
.def_readwrite("actions", &PlanResult<State, Action, int>::actions);
py::class_<AstarEnvironment>(m, "AstarEnvironment", "A* algrithm environment")
.def(py::init<int, int, std::unordered_set<State>, State>(),
"Init fuction of Astar envirnment")
.def("admissibleHeuristic", &AstarEnvironment::admissibleHeuristic)
.def("isSolution", &AstarEnvironment::isSolution)
.def("stateValid", &AstarEnvironment::stateValid);
py::class_<AStar<State, Action, int, AstarEnvironment>>(m, "AStar")
.def(py::init<AstarEnvironment&>())
.def("search", &AStar<State, Action, int, AstarEnvironment>::search);
py::class_<Environment>(m, "TrainEnv")
.def(py::init<int, int, std::unordered_set<State>, std::vector<State>,
std::vector<State>>())
.def(py::init<std::string>())
.def("getParam", &Environment::getEnviromentParam)
.def("getGlobalState", &Environment::getGlobalState)
.def("update", &Environment::update)
.def("reset", &Environment::reset)
.def("isSolution", &Environment::isSolution,
py::arg("states") = std::vector<State>());
}
|
{"hexsha": "6dc95806704dd9c8e4f359b97ee91ab467b86443", "size": 22977, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/train_env.cpp", "max_stars_repo_name": "zijinoier/mater", "max_stars_repo_head_hexsha": "7bdbd885debcaec0048c478187694a30edbc387a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2020-10-30T13:01:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T02:53:30.000Z", "max_issues_repo_path": "src/train_env.cpp", "max_issues_repo_name": "zijinoier/mater", "max_issues_repo_head_hexsha": "7bdbd885debcaec0048c478187694a30edbc387a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/train_env.cpp", "max_forks_repo_name": "zijinoier/mater", "max_forks_repo_head_hexsha": "7bdbd885debcaec0048c478187694a30edbc387a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.623255814, "max_line_length": 80, "alphanum_fraction": 0.5307481394, "num_tokens": 6522}
|
import numpy as np
import pandas as pd
from math import modf
format_slash = '%d/%m/%Y %H:%M'
format_dash = '%Y-%m-%d %H:%M:%S'
# 1.10.19 0:25
format_dots = '%d.%m.%Y %H:%M:%S'
# 30-11-20 18:59
format_dash_short = '%d-%m-%y %H:%M'
# unix epoch
epoch = pd.Timestamp("1970-01-01")
# just a reminder how to convert from string to datetime column
# use this for columns: created and resolved
def to_datetime(df, input_column, output_column, fmt=format_dash, errors='coerce'):
df[output_column] = pd.to_datetime(df[input_column], format=fmt, errors=errors)
return df
# unix timestamp from DateTimeIndex
def to_unix(s):
return (s - epoch) // pd.Timedelta("1s")
# alternative implementation would be:
# return s.astype('int64') // int(1e9)
# series of pd.Timestamps -> DateTimeIndex of pd.Timestamps
def series_to_dateTimeIndex(s):
return pd.to_datetime(s.values)
def m2t(minofday):
hour = minofday // 60
minute = minofday % 60
return hour + minute / 60
# minute of the day, int [0 - 1440) to time of the day, float [0 - 24) )
def t2m(timeofday):
hour, minute = modf(timeofday)
minute = minute * 60
return round(timeofday * 60)
# requirement: dataframe must have 'created' and 'resolved' fields
def augment_columns(df):
df['delta'] = df['resolved'] - df['created']
df['delta_m'] = df.apply(lambda r: r['delta'].total_seconds() / 60, axis=1)
# time of day of ticket open (float)
df['tod'] = df.apply(lambda row: row['created'].hour + row['created'].minute / 60, axis=1)
df['mod'] = df.apply(lambda row: t2m(row['tod']), axis = 1)
df['weekday'] = df.apply(lambda row: row['created'].weekday(), axis=1)
df['hour'] = df.apply(lambda row: row['created'].hour, axis=1)
# time of week (float)
df['tow'] = df.weekday + df['tod'] / 24
df['weekhour'] = np.floor(df.tow * 24)
start = df.created.min()
# monday before our 1st incident
start_week_1 = monday_before(start)
# number of day (and week) from beginning of time, that is Monday before the first noted event
df['day_nr'] = df.apply(lambda r: (r.created - start_week_1).days, axis=1)
df['week_nr'] = df.day_nr // 7
return df
# return Monday 00:00:00 before given moment
def monday_before(now):
monday = now - pd.Timedelta(now.weekday(), 'days')
# Monday 00:00:00
return pd.Timestamp(monday.date())
# return Monday 00:00:00 after given moment
def monday_after(now):
# trick: compute Monday before 1 week from now... it's the same.
return monday_before(now + pd.Timedelta(7, 'days'))
# use this to have full week span, spanning tne entire period
# returns: Monday before, Monday after, number of weeks between
def outer_week_boundaries(series):
start, end = monday_before(series.min()), monday_after(series.max())
return start, end, (end - start).days / 7
def inner_week_boundaries(series):
start, end = monday_after(series.min()), monday_before(series.max())
return start, end, (end - start).days / 7
# return: array of weekhour histograms, median histogram, and average histogram
# exact number of days, including fraction of day (float)
def fractional_days(data_start, data_end):
delta = data_end - data_start
return delta.days + delta.seconds / (60 * 60 * 24)
# number of full 24-hour periods
def inner_days(data_start, data_end):
return (data_end - data_start).days
# number of days between midnight-before-first-record and midnight-after-last-record
def outer_days(data_start, data_end):
return (data_end.date() - data_start.date()).days + 1
def weekly_bin_edges(outer_start, howmany: int):
# add 1 for we count bin edges rather than bins
week = pd.Timedelta(7, 'days')
# this fails in previous versions of pandas
# return [outer_start + i * week for i in np.arange(howmany + 1)]
return [outer_start + i * week for i in range(howmany + 1)]
def daily_bin_edges(start, howmany: int):
# add 1 for we count bin edges rather than bins
day = pd.Timedelta(1, 'days')
# this fails in previous versions of pandas
# return [start.date() + i * day for i in np.arange(howmany + 1)]
return [start.date() + i * day for i in range(howmany + 1)]
class WeeklyStats:
def __init__(self, data) -> None:
self.total_records = len(data)
self.outer_start, self.outer_end, self.outer_weeks = outer_week_boundaries(data)
self.inner_start, self.inner_end, self.inner_weeks = inner_week_boundaries(data)
self.data_start, self.data_end = data.min(), data.max()
self.weekly_bins = weekly_bin_edges(self.outer_start, int(self.outer_weeks))
self.days = fractional_days(self.data_start, self.data_end)
self.outer_days = outer_days(self.data_start, self.data_end)
self.daily_bins = daily_bin_edges(self.data_start, int(self.outer_days))
self.weeks = self.days / 7
# to be implemented
# numpy histogram works with numbers only, that's why...
# to_timestamp = np.vectorize(lambda x: x.timestamp())
# to_timestamp = np.vectorize(lambda x: x.value)
# to_timestamp = np.vectorize(lambda x: x.total_seconds())
# ts_data = to_timestamp(data)
# self.week_values, _ = np.histogram(ts_data, bins=self.weekly_bins)
# self.fullweek_values = self.week_values[1:-1]
# self.weekly_minimum = self.fullweek_values.min()
# self.weekly_maximum = self.week_values.max()
# self.day_values, _ = np.histogram(ts_data, bins=self.daily_bins)
# self.fullday_values = self.day_values[1:-1]
# self.daily_minimum = self.fullday_values.min()
# self.daily_maximum = self.daily_values.max()
def describe_histogram(ws: WeeklyStats, week_values, day_values):
fullweek_values = week_values[1:-1]
fullday_values = day_values[1:-1]
print('Basic statistics:\n')
print('Total records:\t{}'.format(ws.total_records))
print('Histogram range (outer weeks):{:.0f}'.format(ws.outer_weeks))
start, end = ws.outer_start, ws.outer_end
print('start:\t{}\t{}\nend:\t{}\t{}'.format(start, start.day_name(), end, end.day_name()))
print('Data range:')
start, end = ws.data_start, ws.data_end
print('start:\t{}\t{}\nend:\t{}\t{}'.format(start, start.day_name(), end, end.day_name()))
print('Full weeks (inner weeks):{:.0f}'.format(ws.inner_weeks))
start, end = ws.inner_start, ws.inner_end
print('start:\t{}\t{}\nend:\t{}\t{}'.format(start, start.day_name(), end, end.day_name()))
print('Data stats:')
print('weeks: {:.1f}\trecords per week:{:.1f},\t weekly min:{},\t weekly max:{}'.
format(ws.weeks, ws.total_records / ws.weeks, int(min(fullweek_values)), int(max(week_values))))
print('days: {:.1f}\trecords per day:{:.1f},\t daily min:{},\t daily max:{}'.
format(ws.days, ws.total_records / ws.days, int(min(fullday_values)), int(max(day_values))))
print('Note: The minima do not take into account the marginal (uncomplete) weeks or days')
def draw_week_edges(axis, ws: WeeklyStats):
axis.axvline(x=ws.inner_start, color='r', linestyle='dashed', linewidth=2, label='inner (full) weeks range')
axis.axvline(x=ws.outer_start, color='b', linestyle='dashed', linewidth=2, label='outer (incomplete) weeks range')
axis.axvline(x=ws.inner_end, color='r', linestyle='dashed', linewidth=2)
axis.axvline(x=ws.outer_end, color='b', linestyle='dashed', linewidth=2)
axis.legend()
def weekly_hist(axis, data):
ws = WeeklyStats(data)
w = axis.hist(x=data, bins=ws.weekly_bins)
draw_week_edges(axis, ws)
return w
def daily_hist(axis, data):
ws = WeeklyStats(data)
draw_week_edges(axis, ws)
d = axis.hist(x=data, bins=ws.daily_bins, edgecolor='black')
return d
# example:
# round_up(345) -> 400
# round_up(83459) -> 90000
def round_up(x):
import math
if (x == 0):
return 0
order = 10 ** math.floor(np.log10(x))
# equal numbers
if ((x // order) == (x / order)):
return x
return order * ((x // order) + 1)
# example: tidy_bins([35, 72, 89], bins = 4)
# -> [0, 25, 50, 75, 100]
def tidy_bins(data, bins = 10):
maxbin = round_up(max(data))
binsize = maxbin // bins
bins = np.arange(0, maxbin, binsize)
return np.append(bins, maxbin)
|
{"hexsha": "ba9d165ca178552eb3766135c79ce5b1425498b7", "size": 8311, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/handy.py", "max_stars_repo_name": "altanova/stuff", "max_stars_repo_head_hexsha": "d55a65e929d95bcb6729ea61f4a655dea6452d0d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-12T15:05:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T21:39:43.000Z", "max_issues_repo_path": "modules/handy.py", "max_issues_repo_name": "altanova/stuff", "max_issues_repo_head_hexsha": "d55a65e929d95bcb6729ea61f4a655dea6452d0d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/handy.py", "max_forks_repo_name": "altanova/stuff", "max_forks_repo_head_hexsha": "d55a65e929d95bcb6729ea61f4a655dea6452d0d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4369369369, "max_line_length": 118, "alphanum_fraction": 0.6664661292, "include": true, "reason": "import numpy", "num_tokens": 2314}
|
/*****************************************************************************
*
* Rokko: Integrated Interface for libraries of eigenvalue decomposition
*
* Copyright (C) 2012-2015 Rokko Developers https://github.com/t-sakashita/rokko
*
* Distributed under the Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*
*****************************************************************************/
#ifndef ROKKO_LOCALIZED_MATRIX_HPP
#define ROKKO_LOCALIZED_MATRIX_HPP
#include <rokko/matrix_major.hpp>
#include <Eigen/Dense>
#include <iostream>
#include <boost/type_traits.hpp>
namespace rokko {
namespace detail {
template<typename MATRIX_MAJOR>
struct eigen3_matrix_major;
template<>
struct eigen3_matrix_major<rokko::matrix_row_major> {
static const int value = Eigen::RowMajor;
};
template<>
struct eigen3_matrix_major<rokko::matrix_col_major> {
static const int value = Eigen::ColMajor;
};
} // end namespace detail
template<typename T, typename MATRIX_MAJOR = rokko::matrix_col_major, int ROWS = Eigen::Dynamic, int COLS = Eigen::Dynamic>
class localized_matrix : public Eigen::Matrix<T, ROWS, COLS, detail::eigen3_matrix_major<MATRIX_MAJOR>::value> {
public:
typedef T value_type;
typedef MATRIX_MAJOR major_type;
typedef Eigen::Matrix<value_type, ROWS, COLS, detail::eigen3_matrix_major<major_type>::value> super_type;
typedef localized_matrix<value_type, major_type, ROWS, COLS> matrix_type;
localized_matrix() : super_type() {};
localized_matrix(int rows, int cols) : super_type(rows, cols) {};
template<typename U>
localized_matrix(U const& other) : super_type(other) {};
template<typename U>
matrix_type& operator=(U const& other) { super_type::operator=(other); return *this; }
int translate_l2g_row(int local_i) const { return local_i; }
int translate_l2g_col(int local_j) const { return local_j; }
int translate_g2l_row(int global_i) const { return global_i; }
int translate_g2l_col(int global_j) const { return global_j; }
int get_m_global() const { return super_type::rows(); }
int get_n_global() const { return super_type::cols(); }
int get_m_local() const { return super_type::rows(); }
int get_n_local() const { return super_type::cols(); }
bool is_gindex_myrow(int) const { return true; }
bool is_gindex_mycol(int) const { return true; }
bool is_gindex(int, int) const { return true; }
void set_local(int local_i, int local_j, value_type value) {
this->operator()(local_i, local_j) = value;
}
void update_local(int local_i, int local_j, value_type value) {
this->operator()(local_i, local_j) += value;
}
value_type get_local(int local_i, int local_j) const {
return this->operator()(local_i, local_j);
}
void set_global(int global_i, int global_j, value_type value) {
set_local(global_i, global_j, value);
}
void update_global(int global_i, int global_j, value_type value) {
update_local(global_i, global_j, value);
}
value_type get_global(int global_i, int global_j) {
return get_local(global_i, global_j);
}
value_type get_global_checked(int global_i, int global_j) {
return get_local(global_i, global_j);
}
template<class FUNC>
void generate(FUNC func) {
for(int local_i = 0; local_i < get_m_local(); ++local_i) {
for(int local_j = 0; local_j < get_n_local(); ++local_j) {
set_local(local_i, local_j, func(local_i, local_j));
}
}
}
void set_zeros() { super_type::setZero(); }
bool is_row_major() const { return boost::is_same<MATRIX_MAJOR, matrix_row_major>::value; }
bool is_col_major() const { return boost::is_same<MATRIX_MAJOR, matrix_col_major>::value; }
void print() const { std::cout << *this << std::endl; }
};
typedef localized_matrix<float> flmatrix;
typedef localized_matrix<double> dlmatrix;
typedef localized_matrix<std::complex<float> > clmatrix;
typedef localized_matrix<std::complex<double> > zlmatrix;
} // namespace rokko
#endif // ROKKO_LOCALIZED_MATRIX_HPP
|
{"hexsha": "2881fcbf2123dc437d8135c4d1d68a270ce844c7", "size": 4050, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "rokko/localized_matrix.hpp", "max_stars_repo_name": "wistaria/rokko", "max_stars_repo_head_hexsha": "7cd9d5155e82f038039a46c1dc8f382b3fe7e2b7", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rokko/localized_matrix.hpp", "max_issues_repo_name": "wistaria/rokko", "max_issues_repo_head_hexsha": "7cd9d5155e82f038039a46c1dc8f382b3fe7e2b7", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rokko/localized_matrix.hpp", "max_forks_repo_name": "wistaria/rokko", "max_forks_repo_head_hexsha": "7cd9d5155e82f038039a46c1dc8f382b3fe7e2b7", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3220338983, "max_line_length": 123, "alphanum_fraction": 0.7049382716, "num_tokens": 1021}
|
(* Cyclone Semantics using TLC/LN in Coq Version 4 *)
(* "SAFE PROGRAMMING AT THE C LEVEL OF ABSTRACTION". Daniel Grossman, August 2003 *)
(* Lemmas for LN infrastructure *)
(* Brian Milnes 2016 *)
Set Implicit Arguments.
Require Export Cyclone_Type_Substitution Cyclone_LN_FV Cyclone_LN_LC_Body Cyclone_LN_Open_Close Cyclone_LN_Tactics.
(* TODO:
TOC
more reasonable lemma naming.
notation
applys- why did this get dropped? what does it do?
all: tactic.
ltac:(some_tactic) - gallina expression evaluates to the terms from the tactic.
Think about variable only substitution for some of the lemmas.
*)
(* Opening type variables in types. *)
Lemma open_rec_t_core:
forall t i j u v,
i <> j ->
(open_rec_t_t j v t) = (open_rec_t_t i u (open_rec_t_t j v t)) ->
t = (open_rec_t_t i u t).
Proof.
induction t; introv Neq Equ; inversion* Equ; simpls; fequals*.
case_nat*.
case_nat*.
Qed.
(* Opening type variables in terms. *)
Lemma open_rec_t_tm_v_core:
forall t i j u v,
i <> j ->
(open_rec_t_tm_v j v t) = (open_rec_t_tm_v i u (open_rec_t_tm_v j v t)) ->
t = (open_rec_t_tm_v i u t).
Proof.
induction t; introv Neq Equ; inversion* Equ; simpls*.
Qed.
Ltac inversion_on_matching_terms :=
match goal with
| H: (?C _) = (?C _) |- _ => inversion H
| H: (?C _ _) = (?C _ _) |- _ => inversion H
| H: (?C _ _ _) = (?C _ _ _) |- _ => inversion H
end.
Ltac open_rec_t_tm_x_core j' v0' v' :=
intros;
simpl in *;
fequals*;
simpl in *;
try inversion_on_matching_terms;
try solve[fequals*];
try apply open_rec_t_tm_v_core with (j:= j') (v:= v0'); try assumption;
apply open_rec_t_core with (j:= j') (v:= v'); try assumption.
(* BUG why can't I put this into the induction without j being bound? *)
(*
Definition Q :=
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_t_tm_st j v t) =
(open_rec_t_tm_st i u (open_rec_t_tm_st j v t)) ->
t = (open_rec_t_tm_st i u t)).
Lemma open_rec_t_tm_st_core':
forall t i j u v,
i <> j ->
(open_rec_t_tm_st j v t) = (open_rec_t_tm_st i u (open_rec_t_tm_st j v t)) ->
t = (open_rec_t_tm_st i u t).
Proof.
apply (St_ind_mutual
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_t_tm_st j v t) =
(open_rec_t_tm_st i u (open_rec_t_tm_st j v t)) ->
t = (open_rec_t_tm_st i u t))
(fun t : E =>
forall i j u v,
i <> j ->
(open_rec_t_tm_e j v t) =
(open_rec_t_tm_e i u (open_rec_t_tm_e j v t)) ->
t = (open_rec_t_tm_e i u t))
(fun t : F =>
forall i j u v,
i <> j ->
(open_rec_t_tm_f j v t) =
(open_rec_t_tm_f i u (open_rec_t_tm_f j v t)) ->
t = (open_rec_t_tm_f i u t)));
open_rec_t_tm_x_core j v0 v.
Qed.
*)
Lemma open_rec_t_tm_st_core:
forall t i j u v,
i <> j ->
(open_rec_t_tm_st j v t) = (open_rec_t_tm_st i u (open_rec_t_tm_st j v t)) ->
t = (open_rec_t_tm_st i u t).
Proof.
apply (St_ind_mutual
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_t_tm_st j v t) =
(open_rec_t_tm_st i u (open_rec_t_tm_st j v t)) ->
t = (open_rec_t_tm_st i u t))
(fun t : E =>
forall i j u v,
i <> j ->
(open_rec_t_tm_e j v t) =
(open_rec_t_tm_e i u (open_rec_t_tm_e j v t)) ->
t = (open_rec_t_tm_e i u t))
(fun t : F =>
forall i j u v,
i <> j ->
(open_rec_t_tm_f j v t) =
(open_rec_t_tm_f i u (open_rec_t_tm_f j v t)) ->
t = (open_rec_t_tm_f i u t)));
open_rec_t_tm_x_core j v0 v.
Qed.
Lemma open_rec_t_tm_e_core:
forall t i j u v,
i <> j ->
(open_rec_t_tm_e j v t) = (open_rec_t_tm_e i u (open_rec_t_tm_e j v t)) ->
t = (open_rec_t_tm_e i u t).
Proof.
apply (E_ind_mutual
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_t_tm_st j v t) = (open_rec_t_tm_st i u (open_rec_t_tm_st j v t)) ->
t = (open_rec_t_tm_st i u t))
(fun t : E =>
forall i j u v,
i <> j ->
(open_rec_t_tm_e j v t) = (open_rec_t_tm_e i u (open_rec_t_tm_e j v t)) ->
t = (open_rec_t_tm_e i u t))
(fun t : F =>
forall i j u v,
i <> j ->
(open_rec_t_tm_f j v t) = (open_rec_t_tm_f i u (open_rec_t_tm_f j v t)) ->
t = (open_rec_t_tm_f i u t)));
open_rec_t_tm_x_core j v0 v.
Qed.
Lemma open_rec_t_tm_f_core:
forall t i j u v,
i <> j ->
(open_rec_t_tm_f j v t) = (open_rec_t_tm_f i u (open_rec_t_tm_f j v t)) ->
t = (open_rec_t_tm_f i u t).
Proof.
apply (F_ind_mutual
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_t_tm_st j v t) = (open_rec_t_tm_st i u (open_rec_t_tm_st j v t)) ->
t = (open_rec_t_tm_st i u t))
(fun t : E =>
forall i j u v,
i <> j ->
(open_rec_t_tm_e j v t) = (open_rec_t_tm_e i u (open_rec_t_tm_e j v t)) ->
t = (open_rec_t_tm_e i u t))
(fun t : F =>
forall i j u v,
i <> j ->
(open_rec_t_tm_f j v t) = (open_rec_t_tm_f i u (open_rec_t_tm_f j v t)) ->
t = (open_rec_t_tm_f i u t)));
open_rec_t_tm_x_core j v0 v.
Qed.
Lemma open_rec_v_tm_core:
forall t i j u v,
i <> j ->
(open_rec_t_tm j v t) = (open_rec_t_tm i u (open_rec_t_tm j v t)) ->
t = (open_rec_t_tm i u t).
Proof.
induction t; introv neq equ;
simpl;
fequals*;
inversion equ.
apply open_rec_t_tm_st_core with (j:= j) (v:= v); assumption.
apply open_rec_t_tm_e_core with (j:= j) (v:= v); assumption.
apply open_rec_t_tm_f_core with (j:= j) (v:= v); assumption.
Qed.
Lemma open_rec_v_tm_v_core:
forall t i j u v,
i <> j ->
(open_rec_v_tm_v j v t) = (open_rec_v_tm_v i u (open_rec_v_tm_v j v t)) ->
t = (open_rec_v_tm_v i u t).
Proof.
induction t; introv Neq Equ; inversion* Equ; simpls*.
case_nat*.
case_nat*.
Qed.
Lemma open_rec_v_tm_p_core:
forall t i j u v,
i <> j ->
(open_rec_v_tm_p j v t) = (open_rec_v_tm_p i u (open_rec_v_tm_p j v t)) ->
t = (open_rec_v_tm_p i u t).
Proof.
induction t; introv Neq Equ; inversion* Equ; simpls*.
case_nat*.
case_nat*.
Qed.
Ltac open_rec_v_tm_x_core j v0:=
try solve[intros;
simpl in *;
inversion_on_matching_terms;
fequals*;
try apply open_rec_v_tm_v_core with (j:= j) (v:= v0); auto;
try apply open_rec_v_tm_p_core with (j:= j) (v:= v0); auto].
Lemma open_rec_v_tm_st_core:
forall t i j u v,
i <> j ->
(open_rec_v_tm_st j v t) = (open_rec_v_tm_st i u (open_rec_v_tm_st j v t)) ->
t = (open_rec_v_tm_st i u t).
Proof.
apply (St_ind_mutual
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_v_tm_st j v t) = (open_rec_v_tm_st i u (open_rec_v_tm_st j v t)) ->
t = (open_rec_v_tm_st i u t))
(fun t : E =>
forall i j u v,
i <> j ->
(open_rec_v_tm_e j v t) = (open_rec_v_tm_e i u (open_rec_v_tm_e j v t)) ->
t = (open_rec_v_tm_e i u t))
(fun t : F =>
forall i j u v,
i <> j ->
(open_rec_v_tm_f j v t) = (open_rec_v_tm_f i u (open_rec_v_tm_f j v t)) ->
t = (open_rec_v_tm_f i u t)));
open_rec_v_tm_x_core j v0.
Qed.
Lemma open_rec_v_tm_e_core:
forall t i j u v,
i <> j ->
(open_rec_v_tm_e j v t) = (open_rec_v_tm_e i u (open_rec_v_tm_e j v t)) ->
t = (open_rec_v_tm_e i u t).
Proof.
apply (E_ind_mutual
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_v_tm_st j v t) = (open_rec_v_tm_st i u (open_rec_v_tm_st j v t)) ->
t = (open_rec_v_tm_st i u t))
(fun t : E =>
forall i j u v,
i <> j ->
(open_rec_v_tm_e j v t) = (open_rec_v_tm_e i u (open_rec_v_tm_e j v t)) ->
t = (open_rec_v_tm_e i u t))
(fun t : F =>
forall i j u v,
i <> j ->
(open_rec_v_tm_f j v t) = (open_rec_v_tm_f i u (open_rec_v_tm_f j v t)) ->
t = (open_rec_v_tm_f i u t)));
open_rec_v_tm_x_core j v0.
Qed.
Lemma open_rec_v_tm_f_core:
forall t i j u v,
i <> j ->
(open_rec_v_tm_f j v t) = (open_rec_v_tm_f i u (open_rec_v_tm_f j v t)) ->
t = (open_rec_v_tm_f i u t).
Proof.
apply (F_ind_mutual
(fun t : St =>
forall i j u v,
i <> j ->
(open_rec_v_tm_st j v t) = (open_rec_v_tm_st i u (open_rec_v_tm_st j v t)) ->
t = (open_rec_v_tm_st i u t))
(fun t : E =>
forall i j u v,
i <> j ->
(open_rec_v_tm_e j v t) = (open_rec_v_tm_e i u (open_rec_v_tm_e j v t)) ->
t = (open_rec_v_tm_e i u t))
(fun t : F =>
forall i j u v,
i <> j ->
(open_rec_v_tm_f j v t) = (open_rec_v_tm_f i u (open_rec_v_tm_f j v t)) ->
t = (open_rec_v_tm_f i u t)));
open_rec_v_tm_x_core j v0.
Qed.
(* Bug bindings of names of LibVar vs LibVarPath. *)
Lemma lc__open_rec_t_identity:
forall t,
lc_t t -> forall k u, t = (open_rec_t_tk u t).
Proof.
introv lct.
induction lct; try intros k u; simpl; auto; try solve[congruence];
introv;
fequals*;
pick_fresh alpha;
(* lets: (open_rec_t_core t0). Lets is failing here. Bummer. *)
lets L: open_rec_t_core;
specialize (L t0 (S k0) 0 u (ftvar alpha));
auto.
Qed.
Ltac inversion_on_lc :=
match goal with
| H : lc_t_tm_st _ |- _ => inversions H
| H : lc_t_tm_e _ |- _ => inversions H
| H : lc_t_tm_f _ |- _ => inversions H
end.
Ltac lc_t_tm_x__open_rec_t_x_identity:=
try solve[intros;
inversion_on_lc;
simpl;
fequals*;
try solve[apply* lc__open_rec_t_identity]].
Lemma lc_t_tm_st__open_rec_t_st_identity:
forall t,
lc_t_tm_st t -> forall k u, t = (open_rec_t_tm_st k u t).
Proof.
apply (St_ind_mutual
(fun t : St =>
lc_t_tm_st t -> forall k u, t = (open_rec_t_tm_st k u t))
(fun t : E =>
lc_t_tm_e t -> forall k u, t = (open_rec_t_tm_e k u t))
(fun t : F =>
lc_t_tm_f t -> forall k u, t = (open_rec_t_tm_f k u t)));
lc_t_tm_x__open_rec_t_x_identity.
Qed.
Lemma lc_t_tm_e__open_rec_t_st_identity:
forall t,
lc_t_tm_e t -> forall k u, t = (open_rec_t_tm_e k u t).
apply (E_ind_mutual
(fun t : St =>
lc_t_tm_st t -> forall k u, t = (open_rec_t_tm_st k u t))
(fun t : E =>
lc_t_tm_e t -> forall k u, t = (open_rec_t_tm_e k u t))
(fun t : F =>
lc_t_tm_f t -> forall k u, t = (open_rec_t_tm_f k u t)));
lc_t_tm_x__open_rec_t_x_identity.
Qed.
Lemma lc_t_tm_f__open_rec_t_st_identity:
forall t,
lc_t_tm_f t -> forall k u, t = (open_rec_t_tm_f k u t).
Proof.
apply (F_ind_mutual
(fun t : St =>
lc_t_tm_st t -> forall k u, t = (open_rec_t_tm_st k u t))
(fun t : E =>
lc_t_tm_e t -> forall k u, t = (open_rec_t_tm_e k u t))
(fun t : F =>
lc_t_tm_f t -> forall k u, t = (open_rec_t_tm_f k u t)));
lc_t_tm_x__open_rec_t_x_identity.
Qed.
Lemma lc_t_tm__open_rec_t_identity:
forall t,
lc_t_tm t -> forall k u, t = (open_rec_t_tm k u t).
Proof.
destruct t; intros; simpl; fequals*; inversion H.
apply* lc_t_tm_st__open_rec_t_st_identity.
apply* lc_t_tm_e__open_rec_t_st_identity.
apply* lc_t_tm_f__open_rec_t_st_identity.
Qed.
(* Substitution identity given fresh variables. *)
Lemma subst_t_tv_t_fresh : forall x t u,
x \notin ftv_t t -> t = (subst_t_tv_t u x t).
Proof.
intros.
induction t; simpls*;
fequals*;
case_var*.
Qed.
Lemma subst_t_tv_t_v_fresh : forall x t u,
x \notin ftv_tm_v t -> t = (subst_t_tv_t_v u x t).
Proof.
induction t; simpls*.
Qed.
Ltac subst_t_tv_t_x_fresh :=
lets: subst_t_tv_t_v_fresh;
lets: subst_t_tv_t_fresh;
intros;
simpls*;
fequals*.
Lemma subst_t_tv_t_st_fresh : forall x u t,
x \notin ftv_tm_st t -> t = (subst_t_tv_t_st u x t).
Proof.
intros x u.
apply (St_ind_mutual
(fun t : St =>
x \notin ftv_tm_st t -> t = (subst_t_tv_t_st u x t))
(fun t : E =>
x \notin ftv_tm_e t -> t = (subst_t_tv_t_e u x t))
(fun t : F =>
x \notin ftv_tm_f t -> t = (subst_t_tv_t_f u x t)));
subst_t_tv_t_x_fresh.
Qed.
Lemma subst_t_tv_t_e_fresh : forall x u t,
x \notin ftv_tm_e t -> t = (subst_t_tv_t_e u x t).
Proof.
intros x u.
apply (E_ind_mutual
(fun t : St =>
x \notin ftv_tm_st t -> t = (subst_t_tv_t_st u x t))
(fun t : E =>
x \notin ftv_tm_e t -> t = (subst_t_tv_t_e u x t))
(fun t : F =>
x \notin ftv_tm_f t -> t = (subst_t_tv_t_f u x t)));
subst_t_tv_t_x_fresh.
Qed.
Lemma subst_t_tv_t_f_fresh : forall x u t,
x \notin ftv_tm_f t -> t = (subst_t_tv_t_f u x t).
Proof.
intros x u.
apply (F_ind_mutual
(fun t : St =>
x \notin ftv_tm_st t -> t = (subst_t_tv_t_st u x t))
(fun t : E =>
x \notin ftv_tm_e t -> t = (subst_t_tv_t_e u x t))
(fun t : F =>
x \notin ftv_tm_f t -> t = (subst_t_tv_t_f u x t)));
subst_t_tv_t_x_fresh.
Qed.
Lemma subst_t_tv_t_tm_fresh : forall x u t,
x \notin ftv_tm t -> t = (subst_t_tv_t_tm u x t).
Proof.
destruct t; intros; simpl; fequals*.
apply* subst_t_tv_t_st_fresh.
apply* subst_t_tv_t_e_fresh.
apply* subst_t_tv_t_f_fresh.
Qed.
(* Substitutions for term variables in terms is identity with a fresh variable. *)
Lemma subst_v_v_tm_v_fresh : forall x t u,
x \notin fv_tm_v t -> t = (subst_v_v_tm_v u x t).
Proof.
intros.
induction t.
all: simpls*.
case_var*.
Qed.
Ltac subst_v_v_tm_x_fresh :=
lets: subst_v_v_tm_v_fresh;
lets: subst_t_tv_t_fresh;
intros;
simpls*;
fequals*.
Lemma subst_v_v_tm_st_fresh : forall x u t,
x \notin fv_tm_st t -> t = (subst_v_v_tm_st u x t).
Proof.
intros x u.
apply (St_ind_mutual
(fun t : St =>
x \notin fv_tm_st t -> t = (subst_v_v_tm_st u x t))
(fun t : E =>
x \notin fv_tm_e t -> t = (subst_v_v_tm_e u x t))
(fun t : F =>
x \notin fv_tm_f t -> t = (subst_v_v_tm_f u x t)));
subst_v_v_tm_x_fresh.
Qed.
Lemma subst_v_v_tm_e_fresh : forall x u t,
x \notin fv_tm_e t -> t = (subst_v_v_tm_e u x t).
Proof.
intros x u.
apply (E_ind_mutual
(fun t : St =>
x \notin fv_tm_st t -> t = (subst_v_v_tm_st u x t))
(fun t : E =>
x \notin fv_tm_e t -> t = (subst_v_v_tm_e u x t))
(fun t : F =>
x \notin fv_tm_f t -> t = (subst_v_v_tm_f u x t)));
subst_v_v_tm_x_fresh.
Qed.
Lemma subst_v_v_tm_f_fresh : forall x u t,
x \notin fv_tm_f t -> t = (subst_v_v_tm_f u x t).
Proof.
intros x u.
apply (F_ind_mutual
(fun t : St =>
x \notin fv_tm_st t -> t = (subst_v_v_tm_st u x t))
(fun t : E =>
x \notin fv_tm_e t -> t = (subst_v_v_tm_e u x t))
(fun t : F =>
x \notin fv_tm_f t -> t = (subst_v_v_tm_f u x t)));
subst_v_v_tm_x_fresh.
Qed.
Lemma subst_v_v_tm_fresh : forall x u t,
x \notin fv_tm t -> t = (subst_v_v_tm u x t).
Proof.
destruct t; intros; simpl; fequals*.
apply* subst_v_v_tm_st_fresh.
apply* subst_v_v_tm_e_fresh.
apply* subst_v_v_tm_f_fresh.
Qed.
(* Substitution distributes on the open operation. *)
(* For types for type variable substitution. *)
Lemma subst_open_t :
forall x u t1 t2,
lc_t u ->
(subst_t_tv_t u x (open_t t2 t1)) =
(open_t (subst_t_tv_t u x t2) (subst_t_tv_t u x t1)).
Proof.
intros.
unfold open_t.
generalize 0.
induction t1; try solve[intros; simpl; fequals*].
intros; simpl; case_nat*.
intros.
simpl.
case_var*.
apply* lc__open_rec_t_identity.
Qed.
(* For types for type variables in terms. *)
(* This does not make sense because we can not substitute terms into term binders
generally in Cyclone.
This is due to the inability to generally substitute a term for a variable
in p_e x p expressions.
Will it make problems in the proofs? Perhaps.
Lemma subst_open_t_tm_st:
forall u alpha s1 s2,
lc_t u ->
(subst_t_tv_t_st u alpha (open_rec_t_tm_st 0 s2 s1)) =
(open_rec_t_tm_st 0 (subst_t_tv_t_st u alpha s2) (subst_t_tv_t_st u alpha s1)).
And neither does this make sense:
Lemma subst_open_t_tm_st:
forall x y s1 s2,
lc_t_tm_st (e_s (v_e y)) ->
(subst_v_v_tm_st x y (open_rec_v_tm_st 0 s2 s1)) =
(open_rec_v_tm_st 0 (subst_v_v_tm_st x y s2) (subst_v_v_tm_st x y s1)).
But both of these might work on variable for varible substition?
*)
(** Substitution and open_var for distinct names commute. *)
Lemma subst_open_tvar:
forall (alpha beta : var) t e,
alpha <> beta ->
lc_t t ->
forall n,
(open_rec_t_tn (ftvar beta) (subst_t_tv_t t alpha e)) =
(subst_t_tv_t t alpha (open_rec_t_t n (ftvar beta) e)).
Proof.
introv Neq lctaut.
induction e; intros;
lets L: (lc__open_rec_t_identity lctaut);
simpls*; fequals*; simpls*.
case_nat*.
simpl.
case_var*.
case_var*.
Qed.
Lemma subst_open_tvar_term:
forall (alpha beta : var) t e,
alpha <> beta ->
lc_t t ->
forall n,
(open_rec_t_tm n (ftvar beta) (subst_t_tv_t_tm t alpha e)) =
(subst_t_tv_t_tm t alpha (open_rec_t_tm n (ftvar beta) e)).
Proof.
introv Neq lctaut.
induction e; intros.
lets L: (lc_t_tm__open_rec_t_identity lctaut).
simpls*; fequals*; simpls*.
case_nat*.
simpl.
case_var*.
case_var*.
Qed.
Lemma subst_open_evar:
forall (x y : var) t e,
x <> y ->
lc_term t ->
forall n,
(open_rec_t_t n (fvar y) (subst_t_tv_t t x e)) =
(subst_t_tv_t t x (open_rec_t_t n (fevar y) e)).
Proof.
introv Neq lctaut.
induction e; intros;
lets L: (lc__open_rec_t_identity lctaut);
simpls*; fequals*; simpls*.
case_nat*.
simpl.
case_var*.
case_var*.
Qed.
(** Opening up an abstraction of body t with a term u is the same as opening
up the abstraction with a fresh name x and then substituting u for x. *)
Lemma subst_intro_tau :
forall alpha t u,
alpha \notin (ftv_t t) ->
lc_t u ->
(open_rec_t_t 0 t u) = (subst_t_tv_t u alpha (open_rec_t_t 0 t (ftvar alpha))).
Proof.
introv Fr lctauu.
induction t; intros; simpls*; case_var*; fequals*; simpls*;
lets L : (lc__open_rec_t_identity lctauu 0);
symmetry;
auto.
Qed.
Lemma subst_intro : forall x t u,
x \notin (fv t) -> term u ->
t ^^ u = [x ~> u](t ^ x).
Proof.
introv Fr Wu. rewrite* subst_open.
rewrite* subst_fresh. simpl. case_var*.
Qed.
(** Tactic to permute subst and open var *)
Ltac cross :=
rewrite subst_open_var; try cross.
Tactic Notation "cross" "*" :=
cross; auto_star.
(* ********************************************************************** *)
(** ** Terms are stable through substitutions *)
(** Terms are stable by substitution *)
Lemma subst_term : forall t z u,
term u -> term t -> term ([z ~> u]t).
Proof.
induction 2; simpls*.
case_var*.
apply_fresh term_abs as y. rewrite* subst_open_var.
Qed.
Lemma subst_body : forall z u t,
body t -> term u -> body ([z ~> u]t).
unfold body. introv [L H]. exists (L \u \{z}).
intros. rewrite~ subst_open_var. apply* subst_term.
Qed.
Hint Resolve subst_term subst_body.
(* ********************************************************************** *)
(** ** Terms are stable through open *)
(** Conversion from locally closed abstractions and bodies *)
Lemma term_abs_to_body : forall t1,
term (trm_abs t1) -> body t1.
Proof.
intros. unfold body. inversion* H.
Qed.
Lemma body_to_term_abs : forall t1,
body t1 -> term (trm_abs t1).
Proof.
intros. inversion* H.
Qed.
Hint Resolve term_abs_to_body body_to_term_abs.
(** ** Opening a body with a term gives a term *)
Lemma open_tm : forall t u,
body t -> term u -> term (t ^^ u).
Proof.
intros. destruct H. pick_fresh y. rewrite* (@subst_intro y).
Qed.
Hint Resolve open_tm.
(* ********************************************************************** *)
(** ** Additional results on primitive operations *)
(** Open_var with fresh names is an injective operation *)
Lemma open_var_inj : forall x t1 t2,
x \notin (fv t1) -> x \notin (fv t2) ->
(t1 ^ x = t2 ^ x) -> (t1 = t2).
Proof.
intros x t1. unfold open. generalize 0.
induction t1; intro k; destruct t2; simpl; intros; inversion H1;
try solve [ f_equal*
| |- do 2 try case_nat; inversions* H1; try notin_false ].
Qed.
(** Close var commutes with open with some freshness conditions,
this is used in the proofs of [close_var_body] and [close_var_open] *)
Lemma close_var_rec_open : forall x y z t1 i j,
i <> j -> y <> x -> y \notin (fv t1) ->
{i ~> trm_fvar y} ({j ~> trm_fvar z} (close_var_rec j x t1) )
= {j ~> trm_fvar z} (close_var_rec j x ({i ~> trm_fvar y}t1) ).
Proof.
induction t1; simpl; intros; try solve [ f_equal* ].
do 2 (case_nat; simpl); try solve [ case_var* | case_nat* ].
case_var*. simpl. case_nat*.
Qed.
(** Close var is an operation returning a body of an abstraction *)
Lemma close_var_fresh : forall x t,
x \notin fv (close_var x t).
Proof.
introv. unfold close_var. generalize 0.
induction t; intros k; simpls; notin_simpl; auto.
case_var; simpls*.
Qed.
(** Close var is an operation returning a body of an abstraction *)
Lemma close_var_body : forall x t,
term t -> body (close_var x t).
Proof.
introv W. exists \{x}. intros y Fr.
unfold open, close_var. generalize 0. gen y.
induction W; intros y Fr k; simpls.
case_var; simpls*. case_nat*.
auto_star.
apply_fresh* term_abs as z.
unfolds open. rewrite* close_var_rec_open.
Qed.
(** Close var is the right inverse of open_var *)
Lemma close_var_open : forall x t,
term t -> t = (close_var x t) ^ x.
Proof.
introv W. unfold close_var, open. generalize 0.
induction W; intros k; simpls; f_equal*.
case_var*. simpl. case_nat*.
let L := gather_vars in match goal with = ?t =>
destruct (var_fresh (L \u fv t)) as [y Fr] end.
apply* (@open_var_inj y).
unfolds open. rewrite* close_var_rec_open.
Qed.
(** An abstract specification of close_var, which packages the
result of the operation and all the properties about it. *)
Lemma close_var_spec : forall t x, term t ->
exists u, t = u ^ x /\ body u /\ x \notin (fv u).
Proof.
intros. exists (close_var x t). splits 3.
apply* close_var_open.
apply* close_var_body.
apply* close_var_fresh.
Qed.
|
{"author": "briangmilnes", "repo": "CycloneCoqSemantics", "sha": "190c0fc57d5aebfde244efb06a119f108de7a150", "save_path": "github-repos/coq/briangmilnes-CycloneCoqSemantics", "path": "github-repos/coq/briangmilnes-CycloneCoqSemantics/CycloneCoqSemantics-190c0fc57d5aebfde244efb06a119f108de7a150/4.1/Cyclone_LN_Lemmas.v"}
|
[STATEMENT]
lemma (in Corps) n_eq_val_eq_idealTr:
"\<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>);
\<forall>j \<le> n. ((\<nu>\<^bsub>K (P j)\<^esub>) x) \<le> ((\<nu>\<^bsub>K (P j)\<^esub>) y)\<rbrakk> \<Longrightarrow> Rxa (O\<^bsub>K P n\<^esub>) y \<subseteq> Rxa (O\<^bsub>K P n\<^esub>) x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
[PROOF STEP]
apply (subgoal_tac "\<forall>j \<le> n. valuation K (\<nu>\<^bsub>K (P j)\<^esub>)")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (case_tac "x = \<zero>\<^bsub>(O\<^bsub>K P n\<^esub>)\<^esub>",
simp add:zero_in_ring_n_pd_zero_K)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>distinct_pds K n P; \<zero> \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) \<zero> \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x = \<zero>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p \<zero>
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (simp add:value_of_zero)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>distinct_pds K n P; \<zero> \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. \<infinity> \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x = \<zero>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p \<zero>
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (subgoal_tac "y = \<zero>", simp,
drule_tac a = n in forall_spec, simp,
drule_tac a=n in forall_spec, simp)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>distinct_pds K n P; \<zero> \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); x = \<zero>; \<infinity> \<le> (\<nu>\<^bsub>K P n\<^esub>) y; valuation K (\<nu>\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> y = \<zero>
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (cut_tac inf_ge_any[of "(\<nu>\<^bsub>K (P n)\<^esub>) y"],
frule ale_antisym[of "(\<nu>\<^bsub>K (P n)\<^esub>) y" "\<infinity>"], assumption+)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>distinct_pds K n P; \<zero> \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); x = \<zero>; \<infinity> \<le> (\<nu>\<^bsub>K P n\<^esub>) y; valuation K (\<nu>\<^bsub>K P n\<^esub>); (\<nu>\<^bsub>K P n\<^esub>) y \<le> \<infinity>; (\<nu>\<^bsub>K P n\<^esub>) y = \<infinity>\<rbrakk> \<Longrightarrow> y = \<zero>
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (rule value_inf_zero, assumption+)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>distinct_pds K n P; \<zero> \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); x = \<zero>; \<infinity> \<le> (\<nu>\<^bsub>K P n\<^esub>) y; valuation K (\<nu>\<^bsub>K P n\<^esub>); (\<nu>\<^bsub>K P n\<^esub>) y \<le> \<infinity>; (\<nu>\<^bsub>K P n\<^esub>) y = \<infinity>\<rbrakk> \<Longrightarrow> y \<in> carrier K
2. \<lbrakk>distinct_pds K n P; \<zero> \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); x = \<zero>; \<infinity> \<le> (\<nu>\<^bsub>K P n\<^esub>) y; valuation K (\<nu>\<^bsub>K P n\<^esub>); (\<nu>\<^bsub>K P n\<^esub>) y \<le> \<infinity>; (\<nu>\<^bsub>K P n\<^esub>) y = \<infinity>\<rbrakk> \<Longrightarrow> (\<nu>\<^bsub>K P n\<^esub>) y = \<infinity>
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
4. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (simp add:mem_ring_n_pd_mem_K, assumption)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (frule ring_n_pd[of n P])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (subgoal_tac "\<forall>j\<le>n. 0 \<le> ((\<nu>\<^bsub>K (P j)\<^esub>) (y \<cdot>\<^sub>r (x\<^bsup>\<hyphen>K\<^esup>)))")
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (subgoal_tac "(y \<cdot>\<^sub>r (x\<^bsup>\<hyphen>K\<^esup>)) \<in> carrier (O\<^bsub>K P n\<^esub>)")
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>); y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)\<rbrakk> \<Longrightarrow> y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>)
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)
4. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (cut_tac field_frac_mul[of "y" "x"],
frule Ring.rxa_in_Rxa[of "O\<^bsub>K P n\<^esub>" "x" "y \<cdot>\<^sub>r (x\<^bsup>\<hyphen>K\<^esup>)"], assumption+,
simp add:ring_n_pd_tOp_K_tOp[THEN sym],
frule Ring.principal_ideal[of "O\<^bsub>K P n\<^esub>" "x"], assumption+)
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>); y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>); y = y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<cdot>\<^sub>r\<^bsub>O\<^bsub>K P n\<^esub>\<^esub> x; y \<in> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x; ideal (O\<^bsub>K P n\<^esub>) ((O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x)\<rbrakk> \<Longrightarrow> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p y \<subseteq> (O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>); y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> y \<in> carrier K
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>); y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> x \<in> carrier K
4. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>); y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> x \<noteq> \<zero>
5. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)\<rbrakk> \<Longrightarrow> y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>)
6. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)
7. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (cut_tac Ring.ideal_cont_Rxa[of "O\<^bsub>K P n\<^esub>" "(O\<^bsub>K P n\<^esub>) \<diamondsuit>\<^sub>p x" "y"],
assumption+,
simp add:mem_ring_n_pd_mem_K,
simp add:mem_ring_n_pd_mem_K,
simp add:zero_in_ring_n_pd_zero_K)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)\<rbrakk> \<Longrightarrow> y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier (O\<^bsub>K P n\<^esub>)
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (frule Ring.rxa_in_Rxa[of "O\<^bsub>K P n\<^esub>" "x" "y \<cdot>\<^sub>r (x\<^bsup>\<hyphen>K\<^esup>)"], assumption+,
simp add:ring_n_pd_def Sr_def,
(erule conjE)+,
cut_tac field_is_ring, rule Ring.ring_tOp_closed, assumption+,
cut_tac invf_closed1[of x], simp, simp,
simp add:ring_n_pd_def Sr_def)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier K \<and> (\<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x); y \<in> carrier K \<and> (\<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) y); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>; Ring (K\<lparr>carrier := {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}, pop := \<lambda>x\<in>{x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}. restrict ((\<plusminus>) x) {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}, mop := restrict (mop K) {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}, tp := \<lambda>x\<in>{x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}. restrict ((\<cdot>\<^sub>r) x) {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}\<rparr>); \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>); (if y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier K then restrict ((\<cdot>\<^sub>r) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)) {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x} else undefined) x \<in> K\<lparr>carrier := {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}, pop := \<lambda>x\<in>{x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}. restrict ((\<plusminus>) x) {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}, mop := restrict (mop K) {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}, tp := \<lambda>x\<in>{x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}. restrict ((\<cdot>\<^sub>r) x) {x \<in> carrier K. \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) x}\<rparr> \<diamondsuit>\<^sub>p x\<rbrakk> \<Longrightarrow> y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup> \<in> carrier K
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (cut_tac Ring.ring_tOp_closed, assumption+,
cut_tac field_is_ring, assumption+, simp+,
cut_tac invf_closed1[of x], simp, simp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>)\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)
2. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (rule allI, rule impI, drule_tac a = j in forall_spec, assumption+,
cut_tac invf_closed1[of x], simp, erule conjE)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>j. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); j \<le> n; (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; x\<^bsup>\<hyphen> K\<^esup> \<in> carrier K; x\<^bsup>\<hyphen> K\<^esup> \<noteq> \<zero>\<rbrakk> \<Longrightarrow> 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) (y \<cdot>\<^sub>r x\<^bsup>\<hyphen> K\<^esup>)
2. \<And>j. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); j \<le> n; (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> x \<in> carrier K - {\<zero>}
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (subst val_t2p [where v="\<nu>\<^bsub>K P j\<^esub>"], simp,
rule mem_ring_n_pd_mem_K[of "n" "P" "y"], assumption+,
frule_tac x = j in spec, simp,
simp add:zero_in_ring_n_pd_zero_K)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>j. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>; Ring (O\<^bsub>K P n\<^esub>); j \<le> n; (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; x\<^bsup>\<hyphen> K\<^esup> \<in> carrier K; x\<^bsup>\<hyphen> K\<^esup> \<noteq> \<zero>\<rbrakk> \<Longrightarrow> 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) y + (\<nu>\<^bsub>K P j\<^esub>) (x\<^bsup>\<hyphen> K\<^esup>)
2. \<And>j. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); j \<le> n; (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> x \<in> carrier K - {\<zero>}
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (subst value_of_inv [where v="\<nu>\<^bsub>K P j\<^esub>"], simp,
simp add:ring_n_pd_def Sr_def, assumption+)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>j. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>; Ring (O\<^bsub>K P n\<^esub>); j \<le> n; (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y; x\<^bsup>\<hyphen> K\<^esup> \<in> carrier K; x\<^bsup>\<hyphen> K\<^esup> \<noteq> \<zero>\<rbrakk> \<Longrightarrow> 0 \<le> (\<nu>\<^bsub>K P j\<^esub>) y + - (\<nu>\<^bsub>K P j\<^esub>) x
2. \<And>j. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>); x \<noteq> \<zero>\<^bsub>O\<^bsub>K P n\<^esub>\<^esub>; Ring (O\<^bsub>K P n\<^esub>); j \<le> n; (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> x \<in> carrier K - {\<zero>}
3. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (frule_tac x = "(\<nu>\<^bsub>K (P j)\<^esub>) x" and y = "(\<nu>\<^bsub>K (P j)\<^esub>) y" in ale_diff_pos,
simp add:diff_ant_def,
simp add:mem_ring_n_pd_mem_K[of "n" "P" "x"] zero_in_ring_n_pd_zero_K)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>distinct_pds K n P; x \<in> carrier (O\<^bsub>K P n\<^esub>); y \<in> carrier (O\<^bsub>K P n\<^esub>); \<forall>j\<le>n. (\<nu>\<^bsub>K P j\<^esub>) x \<le> (\<nu>\<^bsub>K P j\<^esub>) y\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. valuation K (\<nu>\<^bsub>K P j\<^esub>)
[PROOF STEP]
apply (rule allI, rule impI,
simp add:distinct_pds_def, (erule conjE)+,
rule_tac P = "P j" in representative_of_pd_valuation, simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 13248, "file": "Valuation_Valuation2", "length": 20}
|
"""Util for analysis of SIS/backselect data."""
import collections
import numpy as np
import os
import torch
import inference_util
import sis_util
from sufficient_input_subsets import sis
# Function to sort filenames by image index in path.
SR_SORT = lambda s: int(os.path.basename(s).split('_')[-1].split('.')[0])
LoadSISResults = collections.namedtuple(
'LoadSISResults',
[
'sis_results',
'sis_image_idxs',
'sis_pred_class',
'sis_is_correct_class',
'sis_masked_images',
'original_confidences',
],
)
def backselect_mask_from_sis_result(sis_result, features_to_keep):
backselect_mask = np.zeros(sis_result.mask.shape, dtype=bool)
backselect_mask[sis._transform_index_array_into_indexer(
sis_result.ordering_over_entire_backselect[-features_to_keep:])] = True
return backselect_mask
def find_sis_from_backselect_result(sis_result, threshold):
# Assumes SIS exists (initial prediction >= threshold).
backselect_stack = list(zip(
sis_result.ordering_over_entire_backselect,
sis_result.values_over_entire_backselect,
))
sis_idxs = sis._find_sis_from_backselect(backselect_stack, threshold)
mask = ~(sis.make_empty_boolean_mask(sis_result.mask.shape))
mask[sis._transform_index_array_into_indexer(sis_idxs)] = True
new_sis_result = sis.SISResult(
sis=np.array(sis_idxs, dtype=np.int_),
ordering_over_entire_backselect=np.array(
sis_result.ordering_over_entire_backselect, dtype=np.int_),
values_over_entire_backselect=np.array(
sis_result.values_over_entire_backselect, dtype=np.float_),
mask=mask,
)
return new_sis_result
def load_sis_results(dataset, dataset_name, model, sis_results_dir,
fully_masked_image, sis_threshold, max_num=None):
"""Load data and create masks and masked images."""
sis_results = []
sis_image_idxs = []
sis_pred_class = []
sis_is_correct_class = []
sis_masked_images = []
original_confidences = []
num_images = len(dataset)
if max_num:
num_images = min(max_num, len(dataset))
for i in range(num_images):
image, label = dataset[i]
# Check if original prediction >= threshold.
original_preds = inference_util.predict(
model, image.unsqueeze(0).cuda(), add_softmax=True)
original_confidence = float(original_preds.max())
original_label = int(original_preds.argmax())
if original_confidence < sis_threshold:
continue # No SIS exists.
# Compute SIS from backselect data.
sis_file = os.path.join(
sis_results_dir, '%s_%d.npz' % (dataset_name, i))
backselect_sr = sis_util.load_sis_result(sis_file)
sis_result = find_sis_from_backselect_result(
backselect_sr, sis_threshold)
sis_masked_image = sis.produce_masked_inputs(
image.numpy(), fully_masked_image, [sis_result.mask])[0]
sis_results.append(sis_result)
sis_image_idxs.append(i)
sis_pred_class.append(original_label)
sis_is_correct_class.append((original_label == label))
sis_masked_images.append(sis_masked_image)
original_confidences.append(original_confidence)
sis_image_idxs = np.array(sis_image_idxs)
sis_pred_class = np.array(sis_pred_class)
sis_is_correct_class = np.array(sis_is_correct_class)
sis_masked_images = np.array(sis_masked_images)
original_confidences = np.array(original_confidences)
return LoadSISResults(
sis_results=sis_results,
sis_image_idxs=sis_image_idxs,
sis_pred_class=sis_pred_class,
sis_is_correct_class=sis_is_correct_class,
sis_masked_images=sis_masked_images,
original_confidences=original_confidences,
)
def load_backselect_subsets(dataset, dataset_name, pixels_to_keep,
sis_results_dir, fully_masked_image, max_num=None):
bs_masks = []
bs_masked_images = []
num_images = len(dataset)
if max_num:
num_images = min(max_num, len(dataset))
for i in range(num_images):
image, _ = dataset[i]
sis_file = os.path.join(
sis_results_dir, '%s_%d.npz' % (dataset_name, i))
sr = sis_util.load_sis_result(sis_file)
bs_mask = backselect_mask_from_sis_result(sr, pixels_to_keep)
img_masked = sis.produce_masked_inputs(
image.numpy(), fully_masked_image, [bs_mask])[0]
bs_masks.append(bs_mask)
bs_masked_images.append(img_masked)
bs_masks = np.array(bs_masks)
bs_masked_images = np.array(bs_masked_images)
return bs_masks, bs_masked_images
|
{"hexsha": "e6e165bd1b989c69c5a73790d8de27f70d990d9d", "size": 4723, "ext": "py", "lang": "Python", "max_stars_repo_path": "sis_analysis_util.py", "max_stars_repo_name": "b-carter/overinterpretation", "max_stars_repo_head_hexsha": "211d25c83d97e3238109e5c611c1af696989d3b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-11-02T09:53:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T11:42:35.000Z", "max_issues_repo_path": "sis_analysis_util.py", "max_issues_repo_name": "b-carter/overinterpretation", "max_issues_repo_head_hexsha": "211d25c83d97e3238109e5c611c1af696989d3b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-30T17:33:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-30T17:33:11.000Z", "max_forks_repo_path": "sis_analysis_util.py", "max_forks_repo_name": "b-carter/overinterpretation", "max_forks_repo_head_hexsha": "211d25c83d97e3238109e5c611c1af696989d3b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-12-18T12:48:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T09:35:17.000Z", "avg_line_length": 33.4964539007, "max_line_length": 79, "alphanum_fraction": 0.6991319077, "include": true, "reason": "import numpy", "num_tokens": 1082}
|
from collections import namedtuple
import torch
from torch.autograd import Variable
import numpy as np
from const import *
def reps_pad(responses, max_len, evaluation):
x = np.array([resp + [PAD] * (max_len - len(resp)) for resp in responses])
if evaluation:
with torch.no_grad():
x = Variable(torch.from_numpy(x))
else:
x = Variable(torch.from_numpy(x))
return x
def uttes_pad(utterances, max_cont_len, max_utte_len, evaluation):
pad_utte = [[PAD] * max_utte_len]
utterances = [[u + [PAD] * (max_utte_len - len(u))
for u in utte] for utte in utterances]
utterances = [pad_utte * (max_cont_len - len(utte)) +
utte for utte in utterances]
x = np.array(utterances)
if evaluation:
with torch.no_grad():
x = Variable(torch.from_numpy(x))
else:
x = Variable(torch.from_numpy(x))
return x
class DataLoader(object):
def __init__(self,
utterances,
responses,
labels,
max_cont_len,
max_utte_len,
use_cuda,
evaluation=False,
bsz=64,
shuffle=True):
self.sents_size = len(utterances)
self.step = 0
self.stop_step = self.sents_size // bsz
self.bsz = bsz
self.use_cuda = use_cuda
self.evaluation = evaluation
self.max_cont_len = max_cont_len
self.max_utte_len = max_utte_len
self.utterances = np.asarray(utterances)
self.responses = np.asarray(responses)
self.labels = np.asarray(labels)
self.nt = namedtuple(
'dataloader', ['utterances', 'responses', 'labels'])
if shuffle:
self._shuffle()
def _shuffle(self):
indices = np.arange(self.utterances.shape[0])
np.random.shuffle(indices)
self.utterances = self.utterances[indices]
self.responses = self.responses[indices]
self.labels = self.labels[indices]
def __iter__(self):
return self
def __next__(self):
if self.step == self.stop_step:
self.step = 0
raise StopIteration()
start = self.step * self.bsz
bsz = min(self.bsz, self.sents_size - start)
self.step += 1
utterances = uttes_pad(
self.utterances[start:start + bsz], self.max_cont_len, self.max_utte_len, self.evaluation)
responses = reps_pad(
self.responses[start:start + bsz], self.max_utte_len, self.evaluation)
labels = Variable(torch.from_numpy(self.labels[start:start + bsz]))
if self.use_cuda:
utterances = utterances.cuda()
responses = responses.cuda()
labels = labels.cuda()
return self.nt._make([utterances, responses, labels])
if __name__ == '__main__':
data = torch.load('./data/corpus')
training_data = DataLoader(
data['train']['utterances'],
data['train']['responses'],
data['train']['labels'],
data['max_cont_len'],
data['max_utte_len'],
True, bsz=4, shuffle=False, evaluation=True)
dict = data["dict"]["dict"]
idx2word = {v: k for k, v in dict.items()}
u, r, l = next(training_data)
print(u)
print(r)
print(l)
|
{"hexsha": "ca0f5e3767129199380baaf0decafeaf2dd27a44", "size": 3359, "ext": "py", "lang": "Python", "max_stars_repo_path": "retrieval-based-chatbots/data_loader.py", "max_stars_repo_name": "shinoyuki222/torch-light", "max_stars_repo_head_hexsha": "4799805d9bcae82a9f12a574dcf9fdd838c92ee9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 310, "max_stars_repo_stars_event_min_datetime": "2018-11-02T10:12:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:59:51.000Z", "max_issues_repo_path": "retrieval-based-chatbots/data_loader.py", "max_issues_repo_name": "shinoyuki222/torch-light", "max_issues_repo_head_hexsha": "4799805d9bcae82a9f12a574dcf9fdd838c92ee9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2018-11-08T10:09:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-30T08:54:33.000Z", "max_forks_repo_path": "retrieval-based-chatbots/data_loader.py", "max_forks_repo_name": "shinoyuki222/torch-light", "max_forks_repo_head_hexsha": "4799805d9bcae82a9f12a574dcf9fdd838c92ee9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 152, "max_forks_repo_forks_event_min_datetime": "2018-11-02T13:00:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T12:45:08.000Z", "avg_line_length": 28.9568965517, "max_line_length": 102, "alphanum_fraction": 0.5826138732, "include": true, "reason": "import numpy", "num_tokens": 787}
|
CS REAL FUNCTION DAW(XX)
DOUBLE PRECISION FUNCTION DAW(XX)
C----------------------------------------------------------------------
C
C This function program evaluates Dawson's integral,
C
C 2 / x 2
C -x | t
C F(x) = e | e dt
C |
C / 0
C
C for a real argument x.
C
C The calling sequence for this function is
C
C Y=DAW(X)
C
C The main computation uses rational Chebyshev approximations
C published in Math. Comp. 24, 171-178 (1970) by Cody, Paciorek
C and Thacher. This transportable program is patterned after the
C machine-dependent FUNPACK program DDAW(X), but cannot match that
C version for efficiency or accuracy. This version uses rational
C approximations that are theoretically accurate to about 19
C significant decimal digits. The accuracy achieved depends on the
C arithmetic system, the compiler, the intrinsic functions, and
C proper selection of the machine-dependent constants.
C
C*******************************************************************
C*******************************************************************
C
C Explanation of machine-dependent constants. Let
C
C XINF = largest positive machine number
C XMIN = the smallest positive machine number.
C EPS = smallest positive number such that 1+eps > 1.
C Approximately beta**(-p), where beta is the machine
C radix and p is the number of significant base-beta
C digits in a floating-point number.
C
C Then the following machine-dependent constants must be declared
C in DATA statements. IEEE values are provided as a default.
C
C XMAX = absolute argument beyond which DAW(X) underflows.
C XMAX = min(0.5/xmin, xinf).
C XSMALL = absolute argument below DAW(X) may be represented
C by X. We recommend XSMALL = sqrt(eps).
C XLARGE = argument beyond which DAW(X) may be represented by
C 1/(2x). We recommend XLARGE = 1/sqrt(eps).
C
C Approximate values for some important machines are
C
C beta p eps xmin xinf
C
C CDC 7600 (S.P.) 2 48 7.11E-15 3.14E-294 1.26E+322
C CRAY-1 (S.P.) 2 48 7.11E-15 4.58E-2467 5.45E+2465
C IEEE (IBM/XT,
C SUN, etc.) (S.P.) 2 24 1.19E-07 1.18E-38 3.40E+38
C IEEE (IBM/XT,
C SUN, etc.) (D.P.) 2 53 1.11D-16 2.23E-308 1.79D+308
C IBM 3033 (D.P.) 16 14 1.11D-16 5.40D-79 7.23D+75
C VAX 11/780 (S.P.) 2 24 5.96E-08 2.94E-39 1.70E+38
C (D.P.) 2 56 1.39D-17 2.94D-39 1.70D+38
C (G Format) (D.P.) 2 53 1.11D-16 5.57D-309 8.98D+307
C
C XSMALL XLARGE XMAX
C
C CDC 7600 (S.P.) 5.96E-08 1.68E+07 1.59E+293
C CRAY-1 (S.P.) 5.96E-08 1.68E+07 5.65E+2465
C IEEE (IBM/XT,
C SUN, etc.) (S.P.) 2.44E-04 4.10E+03 4.25E+37
C IEEE (IBM/XT,
C SUN, etc.) (D.P.) 1.05E-08 9.49E+07 2.24E+307
C IBM 3033 (D.P.) 3.73D-09 2.68E+08 7.23E+75
C VAX 11/780 (S.P.) 2.44E-04 4.10E+03 1.70E+38
C (D.P.) 3.73E-09 2.68E+08 1.70E+38
C (G Format) (D.P.) 1.05E-08 9.49E+07 8.98E+307
C
C*******************************************************************
C*******************************************************************
C
C Error Returns
C
C The program returns 0.0 for |X| > XMAX.
C
C Intrinsic functions required are:
C
C ABS
C
C
C Author: W. J. Cody
C Mathematics and Computer Science Division
C Argonne National Laboratory
C Argonne, IL 60439
C
C Latest modification: March 9, 1992
C
C----------------------------------------------------------------------
INTEGER I
CS REAL
DOUBLE PRECISION
1 FRAC,HALF,ONE,ONE225,P1,P2,P3,P4,Q1,Q2,Q3,Q4,SIX25,
2 SUMP,SUMQ,TWO5,W2,X,XX,Y,XLARGE,XMAX,XSMALL,ZERO
DIMENSION P1(10),P2(10),P3(10),P4(10),Q1(10),Q2(9),Q3(9),Q4(9)
C----------------------------------------------------------------------
C Mathematical constants.
C----------------------------------------------------------------------
CS DATA ZERO,HALF,ONE/0.0E0,0.5E0,1.0E0/,
CS 1 SIX25,ONE225,TWO5/6.25E0,12.25E0,25.0E0/
DATA ZERO,HALF,ONE/0.0D0,0.5D0,1.0D0/,
1 SIX25,ONE225,TWO5/6.25D0,12.25D0,25.0D0/
C----------------------------------------------------------------------
C Machine-dependent constants
C----------------------------------------------------------------------
CS DATA XSMALL/2.44E-04/, XLARGE/4.10E+03/, XMAX/4.25E+37/
DATA XSMALL/1.05D-08/, XLARGE/9.49D+07/, XMAX/2.24D+307/
C----------------------------------------------------------------------
C Coefficients for R(9,9) approximation for |x| < 2.5
C----------------------------------------------------------------------
CS DATA P1/-2.69020398788704782410E-12, 4.18572065374337710778E-10,
CS 1 -1.34848304455939419963E-08, 9.28264872583444852976E-07,
CS 2 -1.23877783329049120592E-05, 4.07205792429155826266E-04,
CS 3 -2.84388121441008500446E-03, 4.70139022887204722217E-02,
CS 4 -1.38868086253931995101E-01, 1.00000000000000000004E+00/
CS DATA Q1/ 1.71257170854690554214E-10, 1.19266846372297253797E-08,
CS 1 4.32287827678631772231E-07, 1.03867633767414421898E-05,
CS 2 1.78910965284246249340E-04, 2.26061077235076703171E-03,
CS 3 2.07422774641447644725E-02, 1.32212955897210128811E-01,
CS 4 5.27798580412734677256E-01, 1.00000000000000000000E+00/
DATA P1/-2.69020398788704782410D-12, 4.18572065374337710778D-10,
1 -1.34848304455939419963D-08, 9.28264872583444852976D-07,
2 -1.23877783329049120592D-05, 4.07205792429155826266D-04,
3 -2.84388121441008500446D-03, 4.70139022887204722217D-02,
4 -1.38868086253931995101D-01, 1.00000000000000000004D+00/
DATA Q1/ 1.71257170854690554214D-10, 1.19266846372297253797D-08,
1 4.32287827678631772231D-07, 1.03867633767414421898D-05,
2 1.78910965284246249340D-04, 2.26061077235076703171D-03,
3 2.07422774641447644725D-02, 1.32212955897210128811D-01,
4 5.27798580412734677256D-01, 1.00000000000000000000D+00/
C----------------------------------------------------------------------
C Coefficients for R(9,9) approximation in J-fraction form
C for x in [2.5, 3.5)
C----------------------------------------------------------------------
CS DATA P2/-1.70953804700855494930E+00,-3.79258977271042880786E+01,
CS 1 2.61935631268825992835E+01, 1.25808703738951251885E+01,
CS 2 -2.27571829525075891337E+01, 4.56604250725163310122E+00,
CS 3 -7.33080089896402870750E+00, 4.65842087940015295573E+01,
CS 4 -1.73717177843672791149E+01, 5.00260183622027967838E-01/
CS DATA Q2/ 1.82180093313514478378E+00, 1.10067081034515532891E+03,
CS 1 -7.08465686676573000364E+00, 4.53642111102577727153E+02,
CS 2 4.06209742218935689922E+01, 3.02890110610122663923E+02,
CS 3 1.70641269745236227356E+02, 9.51190923960381458747E+02,
CS 4 2.06522691539642105009E-01/
DATA P2/-1.70953804700855494930D+00,-3.79258977271042880786D+01,
1 2.61935631268825992835D+01, 1.25808703738951251885D+01,
2 -2.27571829525075891337D+01, 4.56604250725163310122D+00,
3 -7.33080089896402870750D+00, 4.65842087940015295573D+01,
4 -1.73717177843672791149D+01, 5.00260183622027967838D-01/
DATA Q2/ 1.82180093313514478378D+00, 1.10067081034515532891D+03,
1 -7.08465686676573000364D+00, 4.53642111102577727153D+02,
2 4.06209742218935689922D+01, 3.02890110610122663923D+02,
3 1.70641269745236227356D+02, 9.51190923960381458747D+02,
4 2.06522691539642105009D-01/
C----------------------------------------------------------------------
C Coefficients for R(9,9) approximation in J-fraction form
C for x in [3.5, 5.0]
C----------------------------------------------------------------------
CS DATA P3/-4.55169503255094815112E+00,-1.86647123338493852582E+01,
CS 1 -7.36315669126830526754E+00,-6.68407240337696756838E+01,
CS 2 4.84507265081491452130E+01, 2.69790586735467649969E+01,
CS 3 -3.35044149820592449072E+01, 7.50964459838919612289E+00,
CS 4 -1.48432341823343965307E+00, 4.99999810924858824981E-01/
CS DATA Q3/ 4.47820908025971749852E+01, 9.98607198039452081913E+01,
CS 1 1.40238373126149385228E+01, 3.48817758822286353588E+03,
CS 2 -9.18871385293215873406E+00, 1.24018500009917163023E+03,
CS 3 -6.88024952504512254535E+01,-2.31251575385145143070E+00,
CS 4 2.50041492369922381761E-01/
DATA P3/-4.55169503255094815112D+00,-1.86647123338493852582D+01,
1 -7.36315669126830526754D+00,-6.68407240337696756838D+01,
2 4.84507265081491452130D+01, 2.69790586735467649969D+01,
3 -3.35044149820592449072D+01, 7.50964459838919612289D+00,
4 -1.48432341823343965307D+00, 4.99999810924858824981D-01/
DATA Q3/ 4.47820908025971749852D+01, 9.98607198039452081913D+01,
1 1.40238373126149385228D+01, 3.48817758822286353588D+03,
2 -9.18871385293215873406D+00, 1.24018500009917163023D+03,
3 -6.88024952504512254535D+01,-2.31251575385145143070D+00,
4 2.50041492369922381761D-01/
C----------------------------------------------------------------------
C Coefficients for R(9,9) approximation in J-fraction form
C for |x| > 5.0
C----------------------------------------------------------------------
CS DATA P4/-8.11753647558432685797E+00,-3.84043882477454453430E+01,
CS 1 -2.23787669028751886675E+01,-2.88301992467056105854E+01,
CS 2 -5.99085540418222002197E+00,-1.13867365736066102577E+01,
CS 3 -6.52828727526980741590E+00,-4.50002293000355585708E+00,
CS 4 -2.50000000088955834952E+00, 5.00000000000000488400E-01/
CS DATA Q4/ 2.69382300417238816428E+02, 5.04198958742465752861E+01,
CS 1 6.11539671480115846173E+01, 2.08210246935564547889E+02,
CS 2 1.97325365692316183531E+01,-1.22097010558934838708E+01,
CS 3 -6.99732735041547247161E+00,-2.49999970104184464568E+00,
CS 4 7.49999999999027092188E-01/
DATA P4/-8.11753647558432685797D+00,-3.84043882477454453430D+01,
1 -2.23787669028751886675D+01,-2.88301992467056105854D+01,
2 -5.99085540418222002197D+00,-1.13867365736066102577D+01,
3 -6.52828727526980741590D+00,-4.50002293000355585708D+00,
4 -2.50000000088955834952D+00, 5.00000000000000488400D-01/
DATA Q4/ 2.69382300417238816428D+02, 5.04198958742465752861D+01,
1 6.11539671480115846173D+01, 2.08210246935564547889D+02,
2 1.97325365692316183531D+01,-1.22097010558934838708D+01,
3 -6.99732735041547247161D+00,-2.49999970104184464568D+00,
4 7.49999999999027092188D-01/
C----------------------------------------------------------------------
X = XX
IF (ABS(X) .GT. XLARGE) THEN
IF (ABS(X) .LE. XMAX) THEN
DAW = HALF / X
ELSE
DAW = ZERO
END IF
ELSE IF (ABS(X) .LT. XSMALL) THEN
DAW = X
ELSE
Y = X * X
IF (Y .LT. SIX25) THEN
C----------------------------------------------------------------------
C ABS(X) .LT. 2.5
C----------------------------------------------------------------------
SUMP = P1(1)
SUMQ = Q1(1)
DO 100 I = 2, 10
SUMP = SUMP * Y + P1(I)
SUMQ = SUMQ * Y + Q1(I)
100 CONTINUE
DAW = X * SUMP / SUMQ
ELSE IF (Y .LT. ONE225) THEN
C----------------------------------------------------------------------
C 2.5 .LE. ABS(X) .LT. 3.5
C----------------------------------------------------------------------
FRAC = ZERO
DO 200 I = 1, 9
200 FRAC = Q2(I) / (P2(I) + Y + FRAC)
DAW = (P2(10) + FRAC) / X
ELSE IF (Y .LT. TWO5) THEN
C----------------------------------------------------------------------
C 3.5 .LE. ABS(X) .LT. 5.0
C---------------------------------------------------------------------
FRAC = ZERO
DO 300 I = 1, 9
300 FRAC = Q3(I) / (P3(I) + Y + FRAC)
DAW = (P3(10) + FRAC) / X
ELSE
C----------------------------------------------------------------------
C 5.0 .LE. ABS(X) .LE. XLARGE
C------------------------------------------------------------------
W2 = ONE / X / X
FRAC = ZERO
DO 400 I = 1, 9
400 FRAC = Q4(I) / (P4(I) + Y + FRAC)
FRAC = P4(10) + FRAC
DAW = (HALF + HALF * W2 * FRAC) / X
END IF
END IF
RETURN
C---------- Last line of DAW ----------
END
|
{"hexsha": "e352f1fd315c58e776901c5a38baf132484dc8ff", "size": 13200, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/f2cl/packages/toms/715/daw.f", "max_stars_repo_name": "sbwhitecap/clocc-hg", "max_stars_repo_head_hexsha": "f6cf2591ceef8a3a80e04da9b414cdf60a25a90f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/f2cl/packages/toms/715/daw.f", "max_issues_repo_name": "sbwhitecap/clocc-hg", "max_issues_repo_head_hexsha": "f6cf2591ceef8a3a80e04da9b414cdf60a25a90f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/f2cl/packages/toms/715/daw.f", "max_forks_repo_name": "sbwhitecap/clocc-hg", "max_forks_repo_head_hexsha": "f6cf2591ceef8a3a80e04da9b414cdf60a25a90f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6240601504, "max_line_length": 71, "alphanum_fraction": 0.5346969697, "num_tokens": 4772}
|
%
% Showcase file to demonstrate the abilities of kLabCourse-template.
%
%
\documentclass[ngerman]{kLCReprt}
\usepackage{blindtext}
\usepackage{kLCTitle}
\reportAuthor[Zweiter Autor]{Erster Autor}
\reportAuthorMail[zweite.mail@mail.org]{email@mail.org}
\reportDate{25.02.2015}
\reportSubmissionDate{06.03.2015}
\reportExperiment{Nummer 14}
\reportTitle{Titel des Experiments}
\reportSupervisor{Mein Supervisor}
\bibliography{bib/bibliography.bib}
\begin{document}
\maketitle\thispagestyle{empty}
\cleardoublepage
\tableofcontents\thispagestyle{plain}
\cleardoublepage
\include{tex/levels}
\include{tex/maths}
\include{tex/misc}
\include{tex/font}
\appendix
\section{Grafiken und Tabellen}
\cleardoublepage
\printbibliography
\end{document}
% End of file
|
{"hexsha": "a150ee071a63739f704516ac71cd40bc415ca184", "size": 766, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "showcase.tex", "max_stars_repo_name": "kzoch/kLabCourse-template", "max_stars_repo_head_hexsha": "efaffbde3bea7ea826357398414b35bf54b934c2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "showcase.tex", "max_issues_repo_name": "kzoch/kLabCourse-template", "max_issues_repo_head_hexsha": "efaffbde3bea7ea826357398414b35bf54b934c2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "showcase.tex", "max_forks_repo_name": "kzoch/kLabCourse-template", "max_forks_repo_head_hexsha": "efaffbde3bea7ea826357398414b35bf54b934c2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.6829268293, "max_line_length": 68, "alphanum_fraction": 0.7963446475, "num_tokens": 235}
|
@inline S_inner_to_outer(S_in, u, xi, phi0) =
1/u + xi - u * phi0*phi0/3 * (1 - u * xi) + u*u*u * S_in
@inline S_u_inner_to_outer(S_u_in, S_in, u, xi, phi0) =
-1/(u*u) - phi0*phi0/3 * (1 - u * xi) + u * xi * phi0*phi0/3 +
3 * u*u * S_in + u*u*u * S_u_in
@inline F_inner_to_outer(F_in, u) = u*u * F_in
@inline F_u_inner_to_outer(F_u_in, F_in, u) = 2 * u * F_in + u*u * F_u_in
@inline Sd_inner_to_outer(Sd_in, u, xi, phi0) =
1/(2*u*u) + xi/u + xi*xi/2 - phi0*phi0/6 + u*u * Sd_in
@inline Bd_inner_to_outer(Bd_in, u) = u*u*u * Bd_in
@inline phid_inner_to_outer(phid_in, u, phi0) =
-phi0/2 + u*u * phi0*phi0*phi0 * phid_in
@inline A_inner_to_outer(A_in, u, xi, phi0) =
1/(u*u) + 2*xi/u + xi*xi - 2/3 * phi0*phi0 + u*u * A_in
@inline A_u_inner_to_outer(A_u_in, A_in, u, xi, phi0) =
-2/(u*u*u) - 2*xi/(u*u) + 2*u * A_in + u*u * A_u_in
|
{"hexsha": "2fb0b57d1828fc825bb4f070abf81f8fc0d078ad", "size": 866, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/AdS5_3_1/inner_to_outer.jl", "max_stars_repo_name": "Mikel-Sanchez-Garitaonandia/Jecco.jl", "max_stars_repo_head_hexsha": "d1e030ed0e3534c6bbb7aeaba4e3904fc59a3c35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-09-08T23:37:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T06:19:01.000Z", "max_issues_repo_path": "src/AdS5_3_1/inner_to_outer.jl", "max_issues_repo_name": "Mikel-Sanchez-Garitaonandia/Jecco.jl", "max_issues_repo_head_hexsha": "d1e030ed0e3534c6bbb7aeaba4e3904fc59a3c35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-02T17:57:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-02T17:57:23.000Z", "max_forks_repo_path": "src/AdS5_3_1/inner_to_outer.jl", "max_forks_repo_name": "Mikel-Sanchez-Garitaonandia/Jecco.jl", "max_forks_repo_head_hexsha": "d1e030ed0e3534c6bbb7aeaba4e3904fc59a3c35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-02-26T15:37:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T08:46:55.000Z", "avg_line_length": 33.3076923077, "max_line_length": 73, "alphanum_fraction": 0.5912240185, "num_tokens": 394}
|
import numpy as np
import nibabel
import pytest
from nilearn._utils.testing import write_tmp_imgs
from nilearn.decomposition.dict_learning import DictLearning
from nilearn.decomposition.tests.test_canica import _make_canica_test_data
from nilearn.image import iter_img, get_data
from nilearn.input_data import NiftiMasker
from nilearn.decomposition.tests.test_multi_pca import _tmp_dir
def test_dict_learning():
data, mask_img, components, rng = _make_canica_test_data(n_subjects=8)
masker = NiftiMasker(mask_img=mask_img).fit()
mask = get_data(mask_img) != 0
flat_mask = mask.ravel()
dict_init = masker.inverse_transform(components[:, flat_mask])
dict_learning = DictLearning(n_components=4, random_state=0,
dict_init=dict_init,
mask=mask_img,
smoothing_fwhm=0., alpha=1)
dict_learning_auto_init = DictLearning(n_components=4, random_state=0,
mask=mask_img,
smoothing_fwhm=0., n_epochs=10,
alpha=1)
maps = {}
for estimator in [dict_learning,
dict_learning_auto_init]:
estimator.fit(data)
maps[estimator] = get_data(estimator.components_img_)
maps[estimator] = np.reshape(
np.rollaxis(maps[estimator], 3, 0)[:, mask],
(4, flat_mask.sum()))
masked_components = components[:, flat_mask]
for this_dict_learning in [dict_learning]:
these_maps = maps[this_dict_learning]
S = np.sqrt(np.sum(masked_components ** 2, axis=1))
S[S == 0] = 1
masked_components /= S[:, np.newaxis]
S = np.sqrt(np.sum(these_maps ** 2, axis=1))
S[S == 0] = 1
these_maps /= S[:, np.newaxis]
K = np.abs(masked_components.dot(these_maps.T))
recovered_maps = np.sum(K > 0.9)
assert(recovered_maps >= 2)
# Smoke test n_epochs > 1
dict_learning = DictLearning(n_components=4, random_state=0,
dict_init=dict_init,
mask=mask_img,
smoothing_fwhm=0., n_epochs=2, alpha=1)
dict_learning.fit(data)
def test_component_sign():
# Regression test
# We should have a heuristic that flips the sign of components in
# DictLearning to have more positive values than negative values, for
# instance by making sure that the largest value is positive.
data, mask_img, components, rng = _make_canica_test_data(n_subjects=2,
noisy=True)
for mp in components:
assert -mp.min() <= mp.max()
dict_learning = DictLearning(n_components=4, random_state=rng,
mask=mask_img,
smoothing_fwhm=0., alpha=1)
dict_learning.fit(data)
for mp in iter_img(dict_learning.components_img_):
mp = get_data(mp)
assert np.sum(mp[mp <= 0]) <= np.sum(mp[mp > 0])
def test_masker_attributes_with_fit():
# Test base module at sub-class
data, mask_img, components, rng = _make_canica_test_data(n_subjects=3)
# Passing mask_img
dict_learning = DictLearning(n_components=3, mask=mask_img, random_state=0)
dict_learning.fit(data)
assert dict_learning.mask_img_ == mask_img
assert dict_learning.mask_img_ == dict_learning.masker_.mask_img_
# Passing masker
masker = NiftiMasker(mask_img=mask_img)
dict_learning = DictLearning(n_components=3, mask=masker, random_state=0)
dict_learning.fit(data)
assert dict_learning.mask_img_ == dict_learning.masker_.mask_img_
dict_learning = DictLearning(mask=mask_img, n_components=3)
with pytest.raises(ValueError,
match="Object has no components_ attribute. "
"This is probably because "
"fit has not been called"):
dict_learning.transform(data)
# Test if raises an error when empty list of provided.
with pytest.raises(ValueError,
match='Need one or more Niimg-like objects '
'as input, an empty list was given.'):
dict_learning.fit([])
# Test passing masker arguments to estimator
dict_learning = DictLearning(n_components=3,
target_affine=np.eye(4),
target_shape=(6, 8, 10),
mask_strategy='background')
dict_learning.fit(data)
def test_components_img():
data, mask_img, _, _ = _make_canica_test_data(n_subjects=3)
n_components = 3
dict_learning = DictLearning(n_components=n_components, mask=mask_img)
dict_learning.fit(data)
components_img = dict_learning.components_img_
assert isinstance(components_img, nibabel.Nifti1Image)
check_shape = data[0].shape + (n_components,)
assert components_img.shape, check_shape
def test_with_globbing_patterns_with_single_subject():
# single subject
data, mask_img, _, _ = _make_canica_test_data(n_subjects=1)
n_components = 3
dictlearn = DictLearning(n_components=n_components, mask=mask_img)
with write_tmp_imgs(data[0], create_files=True, use_wildcards=True) as img:
input_image = _tmp_dir() + img
dictlearn.fit(input_image)
components_img = dictlearn.components_img_
assert isinstance(components_img, nibabel.Nifti1Image)
# n_components = 3
check_shape = data[0].shape[:3] + (3,)
assert components_img.shape, check_shape
def test_with_globbing_patterns_with_multi_subjects():
# multi subjects
data, mask_img, _, _ = _make_canica_test_data(n_subjects=3)
n_components = 3
dictlearn = DictLearning(n_components=n_components, mask=mask_img)
with write_tmp_imgs(data[0], data[1], data[2], create_files=True,
use_wildcards=True) as img:
input_image = _tmp_dir() + img
dictlearn.fit(input_image)
components_img = dictlearn.components_img_
assert isinstance(components_img, nibabel.Nifti1Image)
# n_components = 3
check_shape = data[0].shape[:3] + (3,)
assert components_img.shape, check_shape
|
{"hexsha": "0cd3d411573f6592ef5c3e2f00306487d4f18fd7", "size": 6375, "ext": "py", "lang": "Python", "max_stars_repo_path": "nilearn/decomposition/tests/test_dict_learning.py", "max_stars_repo_name": "chouhanaryan/nilearn", "max_stars_repo_head_hexsha": "e26312be96fe5c0211da28889ddd3ab1bd0ddc49", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-01T21:56:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-01T21:56:17.000Z", "max_issues_repo_path": "nilearn/decomposition/tests/test_dict_learning.py", "max_issues_repo_name": "chouhanaryan/nilearn", "max_issues_repo_head_hexsha": "e26312be96fe5c0211da28889ddd3ab1bd0ddc49", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-05-01T17:05:03.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-05T22:14:51.000Z", "max_forks_repo_path": "nilearn/decomposition/tests/test_dict_learning.py", "max_forks_repo_name": "chouhanaryan/nilearn", "max_forks_repo_head_hexsha": "e26312be96fe5c0211da28889ddd3ab1bd0ddc49", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-25T07:35:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-07T12:37:27.000Z", "avg_line_length": 41.9407894737, "max_line_length": 79, "alphanum_fraction": 0.6349803922, "include": true, "reason": "import numpy", "num_tokens": 1443}
|
# Poisson distribution
export Poisson
import Base
using SpecialFunctions: logfactorial
@parameterized Poisson(λ) ≪ CountingMeasure(ℤ[0:∞])
Base.eltype(::Type{P}) where {P<:Poisson} = Int
function logdensity(d::Poisson{(:λ,)}, y)
λ = d.λ
return y * log(λ) - λ - logfactorial(y)
end
function logdensity(d::Poisson{(:logλ,)}, y)
return y * d.logλ - exp(d.logλ) - logfactorial(y)
end
asparams(::Type{<:Poisson}, ::Val{:λ}) = asℝ₊
asparams(::Type{<:Poisson}, ::Val{:logλ}) = asℝ
sampletype(::Poisson) = Int
Base.rand(rng::AbstractRNG, T::Type, d::Poisson{(:λ,)}) = rand(rng, Dists.Poisson(d.λ))
Base.rand(rng::AbstractRNG, T::Type, d::Poisson{(:logλ,)}) = rand(rng, Dists.Poisson(exp(d.logλ)))
≪(::Poisson, ::IntegerRange{lo,hi}) where {lo, hi} = lo ≤ 0 && isinf(hi)
|
{"hexsha": "a95d931da1635a5ab08a5bc5b1bd9e287e430f77", "size": 783, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/parameterized/poisson.jl", "max_stars_repo_name": "jw3126/MeasureTheory.jl", "max_stars_repo_head_hexsha": "419d2f2fc3cb27c9b1d969d2e05022f3a4a01f66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 274, "max_stars_repo_stars_event_min_datetime": "2020-09-24T13:34:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:36:38.000Z", "max_issues_repo_path": "src/parameterized/poisson.jl", "max_issues_repo_name": "jw3126/MeasureTheory.jl", "max_issues_repo_head_hexsha": "419d2f2fc3cb27c9b1d969d2e05022f3a4a01f66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 149, "max_issues_repo_issues_event_min_datetime": "2020-09-23T02:15:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T17:29:21.000Z", "max_forks_repo_path": "src/parameterized/poisson.jl", "max_forks_repo_name": "jw3126/MeasureTheory.jl", "max_forks_repo_head_hexsha": "419d2f2fc3cb27c9b1d969d2e05022f3a4a01f66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2020-09-24T13:34:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T23:30:31.000Z", "avg_line_length": 27.0, "max_line_length": 98, "alphanum_fraction": 0.6513409962, "num_tokens": 298}
|
import numpy as np
from PIL import ImageGrab
import cv2
import time
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def process_img(original_image):
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
#processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
], np.int32)
processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
processed_img = roi(processed_img, [vertices])
# more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# edges rho theta thresh # min length, max gap:
#lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15)
#draw_lines(processed_img,lines)
return processed_img
def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
# fill the mask
cv2.fillPoly(mask, vertices, 255)
# now only show the area that is the mask
masked = cv2.bitwise_and(img, mask)
return masked
def draw_lines(img,lines):
for line in lines:
coords = line[0]
cv2.line(img, (coords[0], coords[1]), (coords[2], coords[3]), [255,255,255], 3)
time.sleep(10)
for i in range(101):
time.sleep(0.25)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
cv2.imwrite("{}.jpg".format(i), process_img(printscreen))
|
{"hexsha": "4a7a2579f4250a38824415ea4a9a931ae2462ff1", "size": 1990, "ext": "py", "lang": "Python", "max_stars_repo_path": "FilterTests.py", "max_stars_repo_name": "siddharths067/CNN-Based-Agent-Modelling-for-Humanlike-Driving-Simulaion", "max_stars_repo_head_hexsha": "42d79fc262d60ecc9eebbe0e77a1576a04979501", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FilterTests.py", "max_issues_repo_name": "siddharths067/CNN-Based-Agent-Modelling-for-Humanlike-Driving-Simulaion", "max_issues_repo_head_hexsha": "42d79fc262d60ecc9eebbe0e77a1576a04979501", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FilterTests.py", "max_forks_repo_name": "siddharths067/CNN-Based-Agent-Modelling-for-Humanlike-Driving-Simulaion", "max_forks_repo_head_hexsha": "42d79fc262d60ecc9eebbe0e77a1576a04979501", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8518518519, "max_line_length": 110, "alphanum_fraction": 0.6447236181, "include": true, "reason": "import numpy", "num_tokens": 556}
|
%% Copyright (C) 2016 Lagu
%% Copyright (C) 2016, 2018-2019, 2022 Colin B. Macdonald
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
%% -*- texinfo -*-
%% @documentencoding UTF-8
%% @deftypemethod @@sym {[@var{A}, @var{b}] =} equationsToMatrix (@var{eqns}, @var{vars})
%% @deftypemethodx @@sym {[@var{A}, @var{b}] =} equationsToMatrix (@var{eqns})
%% @deftypemethodx @@sym {[@var{A}, @var{b}] =} equationsToMatrix (@var{eq1}, @var{eq2}, @dots{})
%% @deftypemethodx @@sym {[@var{A}, @var{b}] =} equationsToMatrix (@var{eq1}, @dots{}, @var{v1}, @var{v2}, @dots{})
%% Convert set of linear equations to matrix form.
%%
%% In its simplest form, equations @var{eq1}, @var{eq2}, etc can be
%% passed as inputs:
%% @example
%% @group
%% syms x y z
%% [A, b] = equationsToMatrix (x + y == 1, x - y + 1 == 0)
%% @result{} A = (sym 2×2 matrix)
%%
%% ⎡1 1 ⎤
%% ⎢ ⎥
%% ⎣1 -1⎦
%%
%% @result{} b = (sym 2×1 matrix)
%%
%% ⎡1 ⎤
%% ⎢ ⎥
%% ⎣-1⎦
%% @end group
%% @end example
%% In this case, appropriate variables @emph{and their ordering} will be
%% determined automatically using @code{symvar} (@pxref{@@sym/symvar}).
%%
%% In some cases it is important to specify the variables as additional
%% inputs @var{v1}, @var{v2}, etc:
%% @example
%% @group
%% syms a
%% [A, b] = equationsToMatrix (a*x + y == 1, y - x == a)
%% @print{} ??? ... nonlinear...
%%
%% [A, b] = equationsToMatrix (a*x + y == 1, y - x == a, x, y)
%% @result{} A = (sym 2×2 matrix)
%%
%% ⎡a 1⎤
%% ⎢ ⎥
%% ⎣-1 1⎦
%%
%% @result{} b = (sym 2×1 matrix)
%%
%% ⎡1⎤
%% ⎢ ⎥
%% ⎣a⎦
%% @end group
%% @end example
%%
%% The equations and variables can also be passed as vectors @var{eqns}
%% and @var{vars}:
%% @example
%% @group
%% eqns = [x + y - 2*z == 0, x + y + z == 1, 2*y - z + 5 == 0];
%% [A, B] = equationsToMatrix (eqns, [x y])
%% @result{} A = (sym 3×2 matrix)
%%
%% ⎡1 1⎤
%% ⎢ ⎥
%% ⎢1 1⎥
%% ⎢ ⎥
%% ⎣0 2⎦
%%
%% B = (sym 3×1 matrix)
%%
%% ⎡ 2⋅z ⎤
%% ⎢ ⎥
%% ⎢1 - z⎥
%% ⎢ ⎥
%% ⎣z - 5⎦
%% @end group
%% @end example
%% @seealso{@@sym/solve}
%% @end deftypemethod
function [A, b] = equationsToMatrix(varargin)
% when Symbols are specified, this won't be used
s = findsymbols (varargin);
cmd = {'L, symvars = _ins'
'if not isinstance(L[-1], MatrixBase):'
' if isinstance(L[-1], Symbol):' % Symbol given, fill vars...
' vars = list()'
' for i in reversed(range(len(L))):'
' if isinstance(L[i], Symbol):'
' vars = [L.pop(i)] + vars'
' else:' % ... until we find a non-Symbol
' break'
' else:'
' vars = symvars'
'else:'
' if len(L) == 1:' % we have only a list of equations
' vars = symvars'
' else:'
' vars = L.pop(-1)'
'if len(L) == 1:' % might be matrix of eqns, don't want [Matrix]
' L = L[0]'
'vars = list(vars)'
'A, B = linear_eq_to_matrix(L, vars)'
'return True, A, B' };
for i = 1:length(varargin)
varargin{i} = sym (varargin{i});
end
[s, A, b] = pycall_sympy__ (cmd, varargin, s);
if ~s
error('Cannot convert to matrix; system may be nonlinear.');
end
end
%!test
%! syms x y z
%! [A, B] = equationsToMatrix ([x + y - z == 1, 3*x - 2*y + z == 3, 4*x - 2*y + z + 9 == 0], [x, y, z]);
%! a = sym ([1 1 -1; 3 -2 1; 4 -2 1]);
%! b = sym ([1; 3; -9]);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!test
%! syms x y z
%! A = equationsToMatrix ([3*x + -3*y - 5*z == 9, 4*x - 7*y + -3*z == -1, 4*x - 9*y - 3*z + 2 == 0], [x, y, z]);
%! a = sym ([3 -3 -5; 4 -7 -3; 4 -9 -3]);
%! assert (isequal (A, a))
%!test
%! syms x y
%! [A, B] = equationsToMatrix ([3*x + 9*y - 5 == 0, -8*x - 3*y == -2]);
%! a = sym ([3 9; -8 -3]);
%! b = sym ([5; -2]);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!test
%! % override symvar order
%! syms x y
%! [A, B] = equationsToMatrix ([3*x + 9*y - 5 == 0, -8*x - 3*y == -2], [y x]);
%! a = sym ([9 3; -3 -8]);
%! b = sym ([5; -2]);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!test
%! syms x y z
%! [A, B] = equationsToMatrix ([x - 9*y + z == -5, -9*y*z == -5], [y, x]);
%! a = sym ([[-9 1]; -9*z 0]);
%! b = sym ([-5 - z; -5]);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!test
%! syms x y
%! [A, B] = equationsToMatrix (-6*x + 4*y == 5, 4*x - 4*y - 5, x, y);
%! a = sym ([-6 4; 4 -4]);
%! b = sym ([5; 5]);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!test
%! % vertical list of equations
%! syms x y
%! [A, B] = equationsToMatrix ([-6*x + 4*y == 5; 4*x - 4*y - 5], [x y]);
%! a = sym ([-6 4; 4 -4]);
%! b = sym ([5; 5]);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!test
%! syms x y
%! [A, B] = equationsToMatrix (5*x == 1, y, x - 6*y - 7, y);
%! a = sym ([0; 1; -6]);
%! b = sym ([1 - 5*x; 0; -x + 7]);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!error <nonlinear>
%! syms x y
%! [A, B] = equationsToMatrix (x^2 + y^2 == 1, x - y + 1, x, y);
%!test
%! % single equation
%! syms x
%! [A, B] = equationsToMatrix (3*x == 2, x);
%! a = sym (3);
%! b = sym (2);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!test
%! % single equation w/ symvar
%! syms x
%! [A, B] = equationsToMatrix (3*x == 2);
%! a = sym (3);
%! b = sym (2);
%! assert (isequal (A, a))
%! assert (isequal (B, b))
%!error <unique>
%! syms x
%! equationsToMatrix (3*x == 2, [x x])
|
{"author": "cbm755", "repo": "octsympy", "sha": "c1ecd1e08f027d5101d0f4250dfc496aa98c8bcd", "save_path": "github-repos/MATLAB/cbm755-octsympy", "path": "github-repos/MATLAB/cbm755-octsympy/octsympy-c1ecd1e08f027d5101d0f4250dfc496aa98c8bcd/inst/@sym/equationsToMatrix.m"}
|
function E_slice_plot(E::ScalarField, dir, slice_location, t;
issliced=false,
save=false,
slice_dir=:x,
kwargs...)
if !issliced
E_slice = slice(E, slice_dir, slice_location)
else
E_slice = E
end
grid = getdomain(E_slice)
cl = ustrip(max(abs.(extrema(E_slice))...))
ts = sprint(show, t, context=:compact => true)
loc = sprint(show, slice_location, context=:compact => true)
xlabel, ylabel = filter(x->x≠string(slice_dir), ["x", "y", "z"])
plt = Plots.heatmap(grid..., E_slice;
title = "E$dir at t=$ts, x=$loc",
xlabel, ylabel, colorbar_title = "E$dir",
seriescolor = :jet1,
aspect_ratio = 1,
framestyle = :box,
clims = (-cl, cl),
kwargs...)
if save
save && Plots.savefig(plt, "E$(dir_slice)_$loc.png")
end
return plt
end
|
{"hexsha": "95ccb7c39ebf4e4fdbe18d50bc61e0f6a237c58a", "size": 869, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/plots/fields.jl", "max_stars_repo_name": "ctp-fpub/SDFResultViewer.jl", "max_stars_repo_head_hexsha": "375a4f7a6cf87d32980b4af8a075c235d4e5ed3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-23T19:29:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-24T07:53:25.000Z", "max_issues_repo_path": "src/plots/fields.jl", "max_issues_repo_name": "ctp-fpub/SDFResultViewer.jl", "max_issues_repo_head_hexsha": "375a4f7a6cf87d32980b4af8a075c235d4e5ed3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-03-23T15:16:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-22T20:27:02.000Z", "max_forks_repo_path": "src/plots/fields.jl", "max_forks_repo_name": "ctp-fpub/SDFResultViewer.jl", "max_forks_repo_head_hexsha": "375a4f7a6cf87d32980b4af8a075c235d4e5ed3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5588235294, "max_line_length": 68, "alphanum_fraction": 0.5730724971, "num_tokens": 249}
|
import numpy as np
from scipy import stats
# ---------------------------
# Independent samples -------
# ---------------------------
def cles_ind(x1, x2):
"""Calc common language effect size
Interpret as the probability that a score sampled
at random from one distribution will be greater than
a score sampled from some other distribution.
Based on: http://psycnet.apa.org/doi/10.1037/0033-2909.111.2.361
:param x1: sample 1
:param x2: sample 2
:return: (float) common language effect size
"""
x1 = np.array(x1)
x2 = np.array(x2)
diff = x1[:, None] - x2
cles = max((diff < 0).sum(), (diff > 0).sum()) / diff.size
return cles
def rbc_ind(x1, x2):
"""Calculate rank-biserial correlation coefficient
Output values range from [0, 1]; interpret as:
* Values closer to 0 are a weaker effect
* Values closer to 1 are a stronger effect
:param x1: sample 1
:param x2: sample 2
:return: (float) rank-biserial correlation coefficient
"""
n1 = x1.size
n2 = x2.size
u, _ = stats.mannwhitneyu(x1, x2)
rbc = 1 - (2 * u) / (n1 * n2)
return rbc
def calc_non_param_ci(x1, x2, alpha=0.05):
"""Calc confidence interval for 2 group median test
Process:
* Find all pairwise diffs
* Sort diffs
* Find appropriate value of k
* Choose lower bound from diffs as: diffs[k]
* Choose upper bound from diffs as: diffs[-k]
Based on: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2545906/
:param x1: sample 1
:param x2: sample 2
:param alpha: significance level
:return: (tuple) confidence interval bounds
"""
x1 = np.array(x1)
x2 = np.array(x2)
n1 = x1.size
n2 = x2.size
cv = stats.norm.ppf(1 - alpha / 2)
# Find pairwise differences for every datapoint in each group
diffs = (x1[:, None] - x2).flatten()
diffs.sort()
# For an approximate (1-a)% confidence interval first calculate K:
k = int(round(n1 * n2 / 2 - (cv * (n1 * n2 * (n1 + n2 + 1) / 12) ** 0.5)))
# The Kth smallest to the Kth largest of the n x m differences
# n1 and n2 should be > ~20
ci_lo = diffs[k]
ci_hi = diffs[-k]
return ci_lo, ci_hi
# ---------------------------
# Paired samples ------------
# ---------------------------
def cles_rel(x1, x2):
"""Calc common language effect size for paired samples
Interpret as the probability that a pair's difference (x1 - x2)
sampled at random will be greater than 0.
:param x1: sample 1
:param x2: sample 2
:return: (float) common language effect size
"""
x1 = np.array(x1)
x2 = np.array(x2)
diffs = x1 - x2
# Convert differences to 0.0, 0.5, or 1.0:
# * 0.0 if x1 < x2
# * 0.5 if x1 == x2
# * 1.0 if x1 > x2
diffs = np.where(diffs == 0.0, 0.5, diffs > 0)
# Take average of array with [0s, 0.5s, 1s]
# This indicates prob of pulling a random
# diff and it being greater than 0
return diffs.mean()
def rbc_rel(x1, x2):
"""Calculate rank-biserial correlation coefficient for paired samples
Output values range from [-1, 1]; interpret as:
* Values closer to 1 indicate that x1 is larger
* Values closer to -1 indicate that x2 is larger
:param x1: sample 1
:param x2: sample 2
:return: (float) rank-biserial correlation coefficient
"""
x1 = np.array(x1)
x2 = np.array(x2)
diffs = x1 - x2
diffs = diffs[diffs != 0]
diff_ranks = stats.rankdata(abs(diffs))
rank_sum = diff_ranks.sum()
pos_rank_sum = np.sum((diffs > 0) * diff_ranks)
neg_rank_sum = np.sum((diffs < 0) * diff_ranks)
rbc = pos_rank_sum / rank_sum - neg_rank_sum / rank_sum
return rbc
|
{"hexsha": "0e1f99837a5e22e89d0ce86e21f967c8a123fd63", "size": 3749, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_exploration/non_param_effect_size.py", "max_stars_repo_name": "0-b1t/spotify_insights", "max_stars_repo_head_hexsha": "a3c50b728c83139c53a21582fe6b867152356b8f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-07T23:54:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-07T23:54:27.000Z", "max_issues_repo_path": "data_exploration/non_param_effect_size.py", "max_issues_repo_name": "0-b1t/spotify_insights", "max_issues_repo_head_hexsha": "a3c50b728c83139c53a21582fe6b867152356b8f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_exploration/non_param_effect_size.py", "max_forks_repo_name": "0-b1t/spotify_insights", "max_forks_repo_head_hexsha": "a3c50b728c83139c53a21582fe6b867152356b8f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-19T17:09:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-19T17:09:04.000Z", "avg_line_length": 26.4014084507, "max_line_length": 78, "alphanum_fraction": 0.6006935183, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1158}
|
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
// test_vector.cpp
// (C) Copyright 2002 Robert Ramey - http://www.rrsd.com .
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// should pass compilation and execution
#include <cstddef> // NULL
#include <fstream>
#include <string>
#include <cstdio> // remove
#include <boost/config.hpp>
#if defined(BOOST_NO_STDC_NAMESPACE)
namespace std{
using ::remove;
}
#endif
#include <I3Test.h>
#include <serialization/nvp.hpp>
#include <serialization/string.hpp>
template <typename TS /*test settings*/>
void test_string_4g(){
auto testfile = I3Test::testfile("test_string_4g");
// test huge string
size_t size = 0x1000000ee; // just over 4GB
std::string foo("bar");
{
std::string astring(size,'a');
typename TS::test_ostream os(testfile, TS::TEST_STREAM_FLAGS);
typename TS::test_oarchive oa(os, TS::TEST_ARCHIVE_FLAGS);
oa << icecube::serialization::make_nvp("astring", astring);
oa << icecube::serialization::make_nvp("foo", foo);
}
std::string astring2;
std::string foo2;
{
typename TS::test_istream is(testfile, TS::TEST_STREAM_FLAGS);
typename TS::test_iarchive ia(is, TS::TEST_ARCHIVE_FLAGS);
ia >> icecube::serialization::make_nvp("astring", astring2);
ia >> icecube::serialization::make_nvp("foo", foo2);
}
std::remove(testfile.c_str());
ENSURE(astring2.size() == size);
ENSURE(astring2[size-1] == 'a');
ENSURE(foo == foo2);
}
TEST_GROUP(string_4g)
#define TEST_SET(name) \
TEST(name ## _std_string_4g){ \
test_string_4g<test_settings>(); \
}
#define I3_ARCHIVE_TEST binary_archive.hpp
#include "select_archive.hpp"
TEST_SET(binary_archive)
#undef I3_ARCHIVE_TEST
#define I3_ARCHIVE_TEST text_archive.hpp
#include "select_archive.hpp"
TEST_SET(text_archive)
#undef I3_ARCHIVE_TEST
#define I3_ARCHIVE_TEST xml_archive.hpp
#include "select_archive.hpp"
TEST_SET(xml_archive)
#undef I3_ARCHIVE_TEST
#define I3_ARCHIVE_TEST portable_binary_archive.hpp
#include "select_archive.hpp"
TEST_SET(portable_binary_archive)
// EOF
|
{"hexsha": "8aa301945fddd0aad817596ed93dbc482cddd0eb", "size": 2295, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "serialization/private/test/test_string_4g.cpp", "max_stars_repo_name": "hschwane/offline_production", "max_stars_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-12-24T22:00:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-24T22:00:01.000Z", "max_issues_repo_path": "serialization/private/test/test_string_4g.cpp", "max_issues_repo_name": "hschwane/offline_production", "max_issues_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "serialization/private/test/test_string_4g.cpp", "max_forks_repo_name": "hschwane/offline_production", "max_forks_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-07-17T09:20:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T16:44:18.000Z", "avg_line_length": 27.3214285714, "max_line_length": 80, "alphanum_fraction": 0.6880174292, "num_tokens": 602}
|
[STATEMENT]
lemma real_sqrt_le_iff': "x \<ge> 0 \<Longrightarrow> y \<ge> 0 \<Longrightarrow> sqrt x \<le> y \<longleftrightarrow> x \<le> y ^ 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 \<le> x; 0 \<le> y\<rbrakk> \<Longrightarrow> (sqrt x \<le> y) = (x \<le> y\<^sup>2)
[PROOF STEP]
using real_le_lsqrt sqrt_le_D
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>0 \<le> ?x; 0 \<le> ?y; ?x \<le> ?y\<^sup>2\<rbrakk> \<Longrightarrow> sqrt ?x \<le> ?y
sqrt ?x \<le> ?y \<Longrightarrow> ?x \<le> ?y\<^sup>2
goal (1 subgoal):
1. \<lbrakk>0 \<le> x; 0 \<le> y\<rbrakk> \<Longrightarrow> (sqrt x \<le> y) = (x \<le> y\<^sup>2)
[PROOF STEP]
by blast
|
{"llama_tokens": 303, "file": null, "length": 2}
|
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from .utils import image_and_pickle
from .utils import exponential_moving_average
def line_graph(values, filename, plotsdir, smoothing=None, title='', xname='', yname='', color='blue'):
if smoothing is not None:
values = exponential_moving_average(values, smoothing)
fig, ax = plt.subplots()
x_range = np.array([int(i) for i in range(len(values))])
line, = plt.plot(x_range, values, color=color)
#if len(x_range) < 10:
# ax.set_xticks(x_range)
#else:
# ax.set_xticks([i for i in range(len(training_loss)) if i % 10 == 0])
#ax.set_yticks(np.linspace(0.0, 1.0, num=21))
plt.grid()
# labelling
plt.suptitle(title)
plt.xlabel(xname)
plt.ylabel(yname)
#plt.legend(loc="best")
imgdir = plotsdir
pkldir = os.path.join(plotsdir, 'pkl')
image_and_pickle(fig, filename, imgdir, pkldir)
plt.close(fig)
|
{"hexsha": "6ab36904ef8437a34e2d026fd305e08d35aeb50e", "size": 962, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/visualizing/line_graph.py", "max_stars_repo_name": "isaachenrion/jets", "max_stars_repo_head_hexsha": "59aeba81788d0741af448192d9dfb764fb97cf8d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-10-09T17:01:52.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-12T18:06:05.000Z", "max_issues_repo_path": "src/visualizing/line_graph.py", "max_issues_repo_name": "isaachenrion/jets", "max_issues_repo_head_hexsha": "59aeba81788d0741af448192d9dfb764fb97cf8d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2017-11-01T14:39:02.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-18T15:34:24.000Z", "max_forks_repo_path": "src/visualizing/line_graph.py", "max_forks_repo_name": "isaachenrion/jets", "max_forks_repo_head_hexsha": "59aeba81788d0741af448192d9dfb764fb97cf8d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-10-17T19:23:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-05T04:44:45.000Z", "avg_line_length": 32.0666666667, "max_line_length": 103, "alphanum_fraction": 0.6715176715, "include": true, "reason": "import numpy", "num_tokens": 269}
|
"""Event-based representation output interface."""
from operator import attrgetter, itemgetter
from typing import TYPE_CHECKING
import numpy as np
from numpy import ndarray
if TYPE_CHECKING:
from ..music import Music
def to_event_representation(
music: "Music",
use_single_note_off_event: bool = False,
use_end_of_sequence_event: bool = False,
encode_velocity: bool = False,
force_velocity_event: bool = True,
max_time_shift: int = 100,
velocity_bins: int = 32,
) -> ndarray:
"""Encode a Music object into event-based representation.
The event-based represetantion represents music as a sequence of
events, including note-on, note-off, time-shift and velocity events.
The output shape is M x 1, where M is the number of events. The
values encode the events. The default configuration uses 0-127 to
encode note-one events, 128-255 for note-off events, 256-355 for
time-shift events, and 356 to 387 for velocity events.
Parameters
----------
music : :class:`muspy.Music`
Music object to encode.
use_single_note_off_event : bool
Whether to use a single note-off event for all the pitches. If
True, the note-off event will close all active notes, which can
lead to lossy conversion for polyphonic music. Defaults to
False.
use_end_of_sequence_event : bool
Whether to append an end-of-sequence event to the encoded
sequence. Defaults to False.
encode_velocity : bool
Whether to encode velocities.
force_velocity_event : bool
Whether to add a velocity event before every note-on event. If
False, velocity events are only used when the note velocity is
changed (i.e., different from the previous one). Defaults to
True.
max_time_shift : int
Maximum time shift (in ticks) to be encoded as an separate
event. Time shifts larger than `max_time_shift` will be
decomposed into two or more time-shift events. Defaults to 100.
velocity_bins : int
Number of velocity bins to use. Defaults to 32.
Returns
-------
ndarray, dtype=uint16, shape=(?, 1)
Encoded array in event-based representation.
"""
# Collect notes
notes = []
for track in music.tracks:
notes.extend(track.notes)
# Raise an error if no notes is found
if not notes and not use_end_of_sequence_event:
raise RuntimeError("No notes found.")
# Sort the notes
notes.sort(key=attrgetter("time", "pitch", "duration", "velocity"))
# Compute offsets
offset_note_on = 0
offset_note_off = 128
offset_time_shift = 129 if use_single_note_off_event else 256
offset_velocity = offset_time_shift + max_time_shift
if use_end_of_sequence_event:
offset_eos = offset_velocity + velocity_bins
# Collect note-related events
note_events = []
last_velocity = -1
for note in notes:
# Velocity event
if encode_velocity:
if force_velocity_event or note.velocity != last_velocity:
note_events.append(
(
note.time,
offset_velocity
+ int(note.velocity * velocity_bins / 128),
)
)
last_velocity = note.velocity
# Note on event
note_events.append((note.time, offset_note_on + note.pitch))
# Note off event
if use_single_note_off_event:
note_events.append((note.end, offset_note_off))
else:
note_events.append((note.end, offset_note_off + note.pitch))
# Sort events by time
note_events.sort(key=itemgetter(0))
# Create a list for all events
events = []
# Initialize the time cursor
time_cursor = 0
# Iterate over note events
for time, code in note_events:
# If event time is after the time cursor, append tick shift
# events
if time > time_cursor:
div, mod = divmod(time - time_cursor, max_time_shift)
for _ in range(div):
events.append(offset_time_shift + max_time_shift - 1)
events.append(offset_time_shift + mod - 1)
events.append(code)
time_cursor = time
else:
events.append(code)
# Append the end-of-sequence event
if use_end_of_sequence_event:
events.append(offset_eos)
return np.array(events, np.uint16).reshape(-1, 1)
|
{"hexsha": "b9645875979571596642105b81a1057acad5b373", "size": 4516, "ext": "py", "lang": "Python", "max_stars_repo_path": "muspy/outputs/event.py", "max_stars_repo_name": "jeremyjordan/muspy", "max_stars_repo_head_hexsha": "160cdcad10ece0618c0a71e75c3370622e786d9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "muspy/outputs/event.py", "max_issues_repo_name": "jeremyjordan/muspy", "max_issues_repo_head_hexsha": "160cdcad10ece0618c0a71e75c3370622e786d9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "muspy/outputs/event.py", "max_forks_repo_name": "jeremyjordan/muspy", "max_forks_repo_head_hexsha": "160cdcad10ece0618c0a71e75c3370622e786d9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7384615385, "max_line_length": 72, "alphanum_fraction": 0.6490256864, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1004}
|
using ViewerGL
GL = ViewerGL
points = rand(50,3)
GL.VIEW([
GL.GLPoints(points),
GL.GLHull(points,GL.Point4d(1,1,1,0.2)),
GL.GLAxis(GL.Point3d(0,0,0),GL.Point3d(1,1,1))
]);
|
{"hexsha": "c406f77c9b8301112b4c1c07308048ec3f9ccc75", "size": 191, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/Convex.jl", "max_stars_repo_name": "cvdlab/ViewerGL.js", "max_stars_repo_head_hexsha": "ae28d7808699f9c34add4ad265b68a84bfa14842", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-07-25T23:07:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-05T18:38:20.000Z", "max_issues_repo_path": "examples/Convex.jl", "max_issues_repo_name": "cvdlab/ViewerGL.js", "max_issues_repo_head_hexsha": "ae28d7808699f9c34add4ad265b68a84bfa14842", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/Convex.jl", "max_forks_repo_name": "cvdlab/ViewerGL.js", "max_forks_repo_head_hexsha": "ae28d7808699f9c34add4ad265b68a84bfa14842", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2019-10-09T14:09:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T14:52:35.000Z", "avg_line_length": 19.1, "max_line_length": 52, "alphanum_fraction": 0.6178010471, "num_tokens": 75}
|
"""
AutoML : Round 0
__author__ : abhishek thakur
"""
import numpy as np
from libscores import *
from sklearn import ensemble, linear_model, preprocessing
from sklearn import decomposition, metrics, cross_validation
np.set_printoptions(suppress=True)
train_data = np.loadtxt('cadata/cadata_train.data')
test_data = np.loadtxt('cadata/cadata_test.data')
valid_data = np.loadtxt('cadata/cadata_valid.data')
feat_type = np.loadtxt('cadata/cadata_feat.type', dtype = 'S20')
labels = np.loadtxt('cadata/cadata_train.solution')
train_data = np.nan_to_num(train_data)
test_data = np.nan_to_num(test_data)
valid_data = np.nan_to_num(valid_data)
clf = ensemble.GradientBoostingRegressor(verbose = 2, n_estimators = 500, max_depth = 5)
clf.fit(train_data, labels)
test_preds = clf.predict(test_data)
valid_preds = clf.predict(valid_data)
np.savetxt('res/cadata_test_001.predict', test_preds, '%1.5f')
np.savetxt('res/cadata_valid_001.predict', valid_preds, '%1.5f')
|
{"hexsha": "a4209063b2c71ab4e98944d34394bf9bd3a6a726", "size": 962, "ext": "py", "lang": "Python", "max_stars_repo_path": "Phase0/cadata_main.py", "max_stars_repo_name": "abhishekkrthakur/AutoML", "max_stars_repo_head_hexsha": "ffdd8709081e1daecc84e0bce6a21ea32f22eeba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2015-07-08T22:02:54.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-01T11:15:45.000Z", "max_issues_repo_path": "Phase0/cadata_main.py", "max_issues_repo_name": "abhishekkrthakur/AutoML", "max_issues_repo_head_hexsha": "ffdd8709081e1daecc84e0bce6a21ea32f22eeba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Phase0/cadata_main.py", "max_forks_repo_name": "abhishekkrthakur/AutoML", "max_forks_repo_head_hexsha": "ffdd8709081e1daecc84e0bce6a21ea32f22eeba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-05-13T07:06:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-12T09:50:48.000Z", "avg_line_length": 32.0666666667, "max_line_length": 88, "alphanum_fraction": 0.7827442827, "include": true, "reason": "import numpy", "num_tokens": 246}
|
"""
KeyGenerator
Can be used to generate a pair of matching secret and public keys. In addition, the `KeyGenerator`
provides functions to obtain relinearization keys (required after multiplication) and Galois keys
(needed for rotation).
See also: [`SecretKey`](@ref), [`PublicKey`](@ref), [`RelinKeys`](@ref)
"""
mutable struct KeyGenerator <: SEALObject
handle::Ptr{Cvoid}
function KeyGenerator(context::SEALContext)
handleref = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_Create1, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
context, handleref)
@check_return_value retval
return KeyGenerator(handleref[])
end
function KeyGenerator(handle::Ptr{Cvoid})
object = new(handle)
finalizer(destroy!, object)
return object
end
end
function destroy!(object::KeyGenerator)
if isallocated(object)
@check_return_value ccall((:KeyGenerator_Destroy, libsealc), Clong, (Ptr{Cvoid},), object)
sethandle!(object, C_NULL)
end
return nothing
end
function create_public_key!(destination::PublicKey, keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreatePublicKey, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, false, keyptr)
@check_return_value retval
# Destroy previous key and reuse its container
destroy!(destination)
sethandle!(destination, keyptr[])
return nothing
end
function create_public_key(keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreatePublicKey, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, true, keyptr)
@check_return_value retval
return PublicKey(keyptr[])
end
function secret_key(keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_SecretKey, libsealc), Clong,
(Ptr{Cvoid}, Ref{Ptr{Cvoid}}),
keygen, keyptr)
@check_return_value retval
return SecretKey(keyptr[])
end
function create_relin_keys!(destination::RelinKeys, keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreateRelinKeys, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, false, keyptr)
@check_return_value retval
# Destroy previous key and reuse its container
destroy!(destination)
sethandle!(destination, keyptr[])
return nothing
end
function create_relin_keys(keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreateRelinKeys, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, true, keyptr)
@check_return_value retval
return RelinKeys(keyptr[])
end
function create_galois_keys!(destination::GaloisKeys, keygen::KeyGenerator)
keyptr = Ref{Ptr{Cvoid}}(C_NULL)
retval = ccall((:KeyGenerator_CreateGaloisKeysAll, libsealc), Clong,
(Ptr{Cvoid}, UInt8, Ref{Ptr{Cvoid}}),
keygen, false, keyptr)
@check_return_value retval
# Destroy previous key and reuse its container
destroy!(destination)
sethandle!(destination, keyptr[])
return nothing
end
|
{"hexsha": "6dc656da83152469fead03318d278bdaf29f19cb", "size": 3247, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/keygenerator.jl", "max_stars_repo_name": "sloede/SEAL.jl", "max_stars_repo_head_hexsha": "68285d72db5c02d8ef692aa989bd02c651fd3ff2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-10-09T01:59:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T13:22:10.000Z", "max_issues_repo_path": "src/keygenerator.jl", "max_issues_repo_name": "krrutkow/SEAL.jl", "max_issues_repo_head_hexsha": "68285d72db5c02d8ef692aa989bd02c651fd3ff2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-07-03T06:04:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-12T19:53:59.000Z", "max_forks_repo_path": "src/keygenerator.jl", "max_forks_repo_name": "sloede/SEAL.jl", "max_forks_repo_head_hexsha": "68285d72db5c02d8ef692aa989bd02c651fd3ff2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-10-24T01:54:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-11T22:56:02.000Z", "avg_line_length": 30.3457943925, "max_line_length": 98, "alphanum_fraction": 0.6886356637, "num_tokens": 888}
|
PROGRAM A10Q2
!--
! A program to implement the `Sieve of Eratosthenes` algorithm
!--
IMPLICIT NONE
INTEGER, DIMENSION(1:4999) :: S, Sfinal
INTEGER :: i, j, jprev, p, pnew
REAL :: size
PRINT *, "Determines the list of prime numbers from 0-5000 using the 'Sieve of Eratosthenes' method"
!- initialize the arrays
DO i = 2,5000
S(i - 1) = i
Sfinal(i-1) = 0
END DO
!- implement the algorithm
p = 2
pnew = 2
size = 5000
DO WHILE (SQRT(size) .ge. p)
DO i= 1,4999
IF (MOD(S(i),p) .eq. 0 .and. S(i) .ne. p) S(i) = 0
END DO
DO WHILE (pnew .eq. p)
DO i = 1,4999
IF (S(i) .ne. 0 .and. S(i) .gt. p .and. p .eq. pnew) pnew = S(i)
END DO
END DO
p = pnew
END DO
!- isolate the non zeros
j = 1
DO i=1,4999
IF (S(i) .ne. 0) THEN
Sfinal(j) = S(i)
j = j + 1
END IF
END DO
!- print the final result
j = 0
jprev = 1
DO i = 1,334
IF (j + 15 > 334) THEN
j = 4999
ELSE
j = j + 15
END IF
PRINT '(15I5)', PACK(Sfinal(jprev:j), Sfinal(jprev:j)/=0)
jprev = j +1
END DO
END PROGRAM A10Q2
|
{"hexsha": "dc39dc4d61ef8d647ccd4d4c5547511689a26747", "size": 1090, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Assignment 10/A10Q2.f90", "max_stars_repo_name": "Chris-Drury/COMP3731", "max_stars_repo_head_hexsha": "59d70f4fe8354b7b50fd2911ec2d8e7aad8401bc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment 10/A10Q2.f90", "max_issues_repo_name": "Chris-Drury/COMP3731", "max_issues_repo_head_hexsha": "59d70f4fe8354b7b50fd2911ec2d8e7aad8401bc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment 10/A10Q2.f90", "max_forks_repo_name": "Chris-Drury/COMP3731", "max_forks_repo_head_hexsha": "59d70f4fe8354b7b50fd2911ec2d8e7aad8401bc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.9615384615, "max_line_length": 101, "alphanum_fraction": 0.5541284404, "num_tokens": 455}
|
# coding: utf-8
# In[51]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math
video_path = "360.mp4"
p_frame_thresh = 300000 # You may need to adjust this threshold
cap = cv2.VideoCapture(video_path)
# Read the first frame.
ret, prev_frame = cap.read()
alto,ancho,canales=prev_frame.shape
def bata(a, b):
""" Bhattacharyya distance between distributions (lists of floats). """
if not len(a) == len(b):
raise ValueError("a and b must be of the same size")
return -math.log(sum((math.sqrt(u * w) for u, w in zip(a, b))))
i=0
while ret:
ret, curr_frame = cap.read()
if ret:
diff = cv2.absdiff(curr_frame, prev_frame)
non_zero_count = np.count_nonzero(diff)
if non_zero_count > p_frame_thresh:
i=i+1
#vector=np.asarray([])
if i<300:
#cv2.imwrite(str(i)+".jpg",curr_frame)
gris_c=cv2.cvtColor(curr_frame,cv2.COLOR_BGR2HSV)
gris_p=cv2.cvtColor(prev_frame,cv2.COLOR_BGR2HSV)
for j in range(alto):
for k in range(ancho):
h,s,v=curr_frame[j,k]
print (h)
#x = bata(gris_c[0],gris_p[0])
#print(x)
#elVar=np.var(curr_frame)
#print(elVar)
elPromedio=np.average(gris)
print(elPromedio)
#hist, bins = np.histogram(curr_frame,bins=256)
#plt.plot(bins[:-1],hist,c='blue')
#plt.plot(vector[::1],np.linespace(0,len(vector)))
prev_frame = curr_frame
|
{"hexsha": "aa81591877458ea4867a0fc611fab2bad7fa1e8b", "size": 1783, "ext": "py", "lang": "Python", "max_stars_repo_path": "oldies/futbol.py", "max_stars_repo_name": "jrodrigopuca/futbolpy", "max_stars_repo_head_hexsha": "4583e0ce3dfe1521e2f0b6f656407c640bbf3149", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "oldies/futbol.py", "max_issues_repo_name": "jrodrigopuca/futbolpy", "max_issues_repo_head_hexsha": "4583e0ce3dfe1521e2f0b6f656407c640bbf3149", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "oldies/futbol.py", "max_forks_repo_name": "jrodrigopuca/futbolpy", "max_forks_repo_head_hexsha": "4583e0ce3dfe1521e2f0b6f656407c640bbf3149", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4307692308, "max_line_length": 75, "alphanum_fraction": 0.5143017386, "include": true, "reason": "import numpy", "num_tokens": 418}
|
/* Copyright (C) 2019-2020 Thomas Jespersen, TKJ Electronics. All rights reserved.
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the MIT License
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the MIT License for further details.
*
* Contact information
* ------------------------------------------
* Thomas Jespersen, TKJ Electronics
* Web : http://www.tkjelectronics.dk
* e-mail : thomasj@tkjelectronics.dk
* ------------------------------------------
*/
#include <ros/ros.h>
#include <dynamic_reconfigure/server.h>
#include <boost/thread/recursive_mutex.hpp>
#include "std_msgs/String.h"
#include "geometry_msgs/Quaternion.h"
#include "geometry_msgs/Twist.h"
#include <tf/LinearMath/Quaternion.h>
#include <tf/transform_datatypes.h>
#include <string>
// For CLion to update/capture the updated parameter and message types, open the "build" folder and run "make"
/* Include generated Dynamic Reconfigure parameters */
#include <jetsoncar_driver/TestParametersConfig.h>
/* Include generated Services */
#include <jetsoncar_interfaces/AddTwoInts.h>
/* Include generated Message Types */
#include <jetsoncar_interfaces/Test.h>
void TestServiceClient(ros::NodeHandle &n);
dynamic_reconfigure::Server<jetsoncar_driver::TestParametersConfig> * serverPtr;
/* Initialize the parameters at once, otherwise the values will be random */
jetsoncar_driver::TestParametersConfig config;
jetsoncar_driver::TestParametersConfig prevConfig;
void paramChangeCallback(jetsoncar_driver::TestParametersConfig &config, uint32_t level) {
ROS_INFO("Reconfigure Request: %d %f %s %s %d",
config.int_param, config.double_param,
config.str_param.c_str(),
config.bool_param?"True":"False",
config.size);
if (config.int_param != prevConfig.int_param) ROS_INFO("int_param changed");
if (config.double_param != prevConfig.double_param) ROS_INFO("double_param changed");
if (config.str_param.compare(prevConfig.str_param) != 0) ROS_INFO("str_param changed");
if (config.bool_param != prevConfig.bool_param) ROS_INFO("bool_param changed");
if (config.size != prevConfig.size) ROS_INFO("size changed");
prevConfig = config;
}
bool add(jetsoncar_interfaces::AddTwoInts::Request &req,
jetsoncar_interfaces::AddTwoInts::Response &res)
{
res.sum = req.a + req.b;
ROS_INFO("request: x=%ld, y=%ld", (long int)req.a, (long int)req.b);
ROS_INFO("sending back response: [%ld]", (long int)res.sum);
// As a test, update int_param with the result
//ros::NodeHandle n("~");
//n.setParam("int_param", (int)res.sum);
config.int_param = res.sum;
serverPtr->updateConfig(config);
return true;
}
int main(int argc, char **argv) {
std::string nodeName = "test_node";
ros::init(argc, argv, nodeName.c_str());
ros::NodeHandle n("~"); // default/current namespace node handle
// Enable reconfigurable parameters - note that any parameters set on the node by roslaunch <param> tags will be seen by a dynamically reconfigurable node just as it would have been by a conventional node.
boost::recursive_mutex configMutex;
dynamic_reconfigure::Server<jetsoncar_driver::TestParametersConfig> server(configMutex);
dynamic_reconfigure::Server<jetsoncar_driver::TestParametersConfig>::CallbackType f;
f = boost::bind(¶mChangeCallback, _1, _2);
server.getConfigDefault(prevConfig);
server.setCallback(f);
serverPtr = &server;
ROS_INFO_STREAM("config.str_param = " << prevConfig.str_param);
// Create service
ros::ServiceServer service = n.advertiseService("add_two_ints", add);
// Create custom message publisher
ros::Publisher pub = n.advertise<jetsoncar_interfaces::Test>("test", 1000);
jetsoncar_interfaces::Test testMsg;
testMsg.first_name = "Thomas";
testMsg.last_name = "Jespersen";
testMsg.age = 10;
testMsg.score = 99;
ros::Rate loop_rate(10); // 10 hz loop rate
while (ros::ok())
{
pub.publish(testMsg);
ros::spinOnce(); // walks the callback queue and calls registered callbacks for any outstanding events (incoming msgs, svc reqs, timers)
loop_rate.sleep();
}
}
// Note that the service call (client) can not be performed from within the same node from where the service is provided
void TestServiceClient(ros::NodeHandle &n)
{
ros::ServiceClient client = n.serviceClient<jetsoncar_interfaces::AddTwoInts>("add_two_ints");
jetsoncar_interfaces::AddTwoInts srv;
srv.request.a = 10;
srv.request.b = 20;
if (client.call(srv))
{
ROS_INFO("Sum: %ld", (long int)srv.response.sum);
}
else
{
ROS_ERROR("Failed to call service add_two_ints");
}
}
|
{"hexsha": "1d27d218e3e0f579cc5ff603bf6f2b8e7a5a9a5c", "size": 4802, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ros/jetsoncar_driver/src/test_node.cpp", "max_stars_repo_name": "mindThomas/JetsonCar", "max_stars_repo_head_hexsha": "74636d4da1f7f71ca9f2315a1b2347393b081eda", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2020-11-09T08:52:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T15:18:36.000Z", "max_issues_repo_path": "ros/jetsoncar_driver/src/test_node.cpp", "max_issues_repo_name": "mindThomas/JetsonCar", "max_issues_repo_head_hexsha": "74636d4da1f7f71ca9f2315a1b2347393b081eda", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ros/jetsoncar_driver/src/test_node.cpp", "max_forks_repo_name": "mindThomas/JetsonCar", "max_forks_repo_head_hexsha": "74636d4da1f7f71ca9f2315a1b2347393b081eda", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-07-06T15:18:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T15:18:41.000Z", "avg_line_length": 35.8358208955, "max_line_length": 206, "alphanum_fraction": 0.7217825906, "num_tokens": 1181}
|
#%%
%load_ext autoreload
%autoreload 2
import jax
import jax.numpy as np
import numpy as onp
import distrax
import optax
import gym
from functools import partial
from env import Navigation2DEnv, Navigation2DEnv_Disc
import cloudpickle
import pathlib
import haiku as hk
from jax.config import config
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True) # break on nans
#%%
from utils import normal_log_density, sample_gaussian
from utils import disc_policy as policy
from utils import eval, init_policy_fcn, Disc_Vector_Buffer, discount_cumsum, \
tree_mean, mean_vmap_jit, sum_vmap_jit
env_name = 'Navigation2D'
env = Navigation2DEnv_Disc(max_n_steps=200) # maml debug env
n_actions = env.action_space.n
obs_dim = env.observation_space.shape[0]
print(f'[LOGGER] n_actions: {n_actions} obs_dim: {obs_dim}')
# value function / baseline
# https://github.com/rll/rllab/blob/master/rllab/baselines/linear_feature_baseline.py
def v_features(obs):
o = np.clip(obs, -10, 10)
l = len(o)
al = np.arange(l).reshape(-1, 1) / 100.0
return np.concatenate([o, o ** 2, al, al ** 2, al ** 3, np.ones((l, 1))], axis=1)
def v_fit(trajectories, feature_fcn=v_features, reg_coeff=1e-5):
featmat = np.concatenate([feature_fcn(traj['obs']) for traj in trajectories])
r = np.concatenate([traj['r'] for traj in trajectories])
for _ in range(5):
# solve argmin_x (F x = R) <-- unsolvable (F non-sqr)
# == argmin_x (F^T F x = F^T R) <-- solvable (sqr F^T F)
# where F = Features, x = Weights, R = rewards
_coeffs = np.linalg.lstsq(
featmat.T.dot(featmat) + reg_coeff * np.identity(featmat.shape[1]),
featmat.T.dot(r)
)[0]
if not np.any(np.isnan(_coeffs)):
return _coeffs, 0 # succ
reg_coeff *= 10
return np.zeros_like(_coeffs), 1 # err
def sample_trajectory(traj, p):
traj_len = int(traj[0].shape[0] * p)
idxs = onp.random.choice(traj_len, size=traj_len, replace=False)
sampled_traj = jax.tree_map(lambda x: x[idxs], traj)
return sampled_traj
def rollout(env, p_params, rng):
buffer = Disc_Vector_Buffer(obs_dim, max_n_steps)
obs = env.reset()
for _ in range(max_n_steps):
rng, subkey = jax.random.split(rng, 2)
a, log_prob = policy(p_frwd, p_params, obs, subkey, False)
a = jax.lax.stop_gradient(a)
log_prob = jax.lax.stop_gradient(log_prob)
a = a.item()
obs2, r, done, _ = env.step(a)
buffer.push((obs, a, r, obs2, done, log_prob))
obs = obs2
if done: break
trajectory = buffer.contents()
return trajectory
#%%
# inner optim
@jax.jit
def _reinforce_loss(p_params, obs, a, adv):
pi = p_frwd(p_params, obs)
log_prob = distrax.Categorical(probs=pi).log_prob(a)
loss = -(log_prob * adv).sum()
return loss
reinforce_loss = sum_vmap_jit(_reinforce_loss, (None, 0, 0, 0))
reinforce_loss_grad = jax.jit(jax.value_and_grad(reinforce_loss))
@jax.jit
def sgd_step_int(params, grads, alpha):
sgd_update = lambda param, grad: param - alpha * grad
return jax.tree_multimap(sgd_update, params, grads)
@jax.jit
def sgd_step_tree(params, grads, alphas):
sgd_update = lambda param, grad, alpha: param - alpha * grad
return jax.tree_multimap(sgd_update, params, grads, alphas)
def sgd_step(params, grads, alpha):
step_fcn = sgd_step_int if type(alpha) in [int, float] else sgd_step_tree
return step_fcn(params, grads, alpha)
# %%
seed = onp.random.randint(1e5)
epochs = 500
eval_every = 1
max_n_steps = 100 # env._max_episode_steps
## TRPO
delta = 0.01
n_search_iters = 10
cg_iters = 10
gamma = 0.99
lmbda = 0.95
## MAML
task_batch_size = 40
train_n_traj = 20
eval_n_traj = 40
alpha = 0.1
damp_lambda = 0.01
rng = jax.random.PRNGKey(seed)
onp.random.seed(seed)
## model init
p_frwd, p_params = init_policy_fcn('discrete', env, rng)
## save path
model_path = pathlib.Path(f'./models/maml/{env_name}')
model_path.mkdir(exist_ok=True, parents=True)
# %%
task = env.sample_tasks(1)[0]
env.reset_task(task)
# %%
@jax.jit
def compute_advantage(W, traj):
# linear fcn predict
v_obs = v_features(traj['obs']) @ W
# baseline
adv = traj['r'] - v_obs
# normalize
adv = (adv - adv.mean()) / (adv.std() + 1e-8)
return adv.squeeze()
def maml_inner(p_params, env, rng, n_traj, alpha):
subkeys = jax.random.split(rng, n_traj)
trajectories = []
for i in range(n_traj):
traj = rollout(env, p_params, subkeys[i])
traj['r'] = discount_cumsum(traj['r'], discount=gamma)
trajectories.append(traj)
W = v_fit(trajectories)[0]
for i in range(len(trajectories)):
trajectories[i]['adv'] = compute_advantage(W, trajectories[i])
gradients = []
for traj in trajectories:
_, grad = reinforce_loss_grad(p_params, traj['obs'], traj['a'], traj['adv'])
gradients.append(grad)
grads = jax.tree_multimap(lambda *x: np.stack(x).sum(0), *gradients)
inner_params_p = sgd_step(p_params, grads, alpha)
return inner_params_p, W
def _trpo_policy_loss(p_params, obs, a, adv, old_log_prob):
pi = p_frwd(p_params, obs)
dist = distrax.Categorical(probs=pi)
ratio = np.exp(dist.log_prob(a) - old_log_prob)
loss = -(ratio * adv).sum()
return loss
trpo_policy_loss = mean_vmap_jit(_trpo_policy_loss, (None, *([0]*4)))
def maml_outer(p_params, env, rng):
subkeys = jax.random.split(rng, 3)
newp, W = maml_inner(p_params, env, subkeys[0], train_n_traj, 0.1)
traj = rollout(env, p_params, subkeys[1])
adv = compute_advantage(W, traj)
loss = trpo_policy_loss(newp, traj['obs'], traj['a'], adv, traj['log_prob'])
return loss, traj
(loss, traj), grads = jax.value_and_grad(maml_outer, has_aux=True)(p_params, env, rng)
loss
# grads = grad(maml_outer)
# compute natural gradient
# line search step
# %%
def _natural_gradient(params, grads, obs):
f = lambda w: p_frwd(w, obs)
rho = D_KL_probs
ngrad, _ = jax.scipy.sparse.linalg.cg(
tree_mvp_dampen(lambda v: gnh_vp(f, rho, params, v), damp_lambda),
grads, maxiter=cg_iters)
vec = lambda x: x.flatten()[:, None]
mat_mul = lambda x, y: np.sqrt(2 * delta / (vec(x).T @ vec(y)).flatten())
alpha = jax.tree_multimap(mat_mul, grads, ngrad)
return ngrad, alpha
natural_gradient = mean_vmap_jit(_natural_gradient, (None, 0))
ngrad, alpha = natural_gradient(p_params, grads, traj['obs'])
alpha
# %%
probs = p_frwd(p_params, traj['obs'][0])
probs
# %%
jax.hessian(D_KL_probs)(probs, np.array(onp.array([0.55, 0.1, 0.25, 0.1])))
# %%
# %%
# %%
#%%
### TRPO FCNS
from utils import gnh_vp, tree_mvp_dampen
def D_KL_probs(p1, p2):
d_kl = (p1 * (np.log(p1) - np.log(p2))).sum()
return d_kl
def D_KL_probs_params(param1, param2, obs):
p1, p2 = p_frwd(param1, obs), p_frwd(param2, obs)
return D_KL_probs(p1, p2)
def sample(traj, p):
traj_len = int(traj['obs'].shape[0] * p)
idxs = onp.random.choice(traj_len, size=traj_len, replace=False)
sampled_traj = jax.tree_map(lambda x: x[idxs], traj)
return sampled_traj
import operator
tree_scalar_op = lambda op: lambda tree, arg2: jax.tree_map(lambda x: op(x, arg2), tree)
tree_scalar_divide = tree_scalar_op(operator.truediv)
tree_scalar_mult = tree_scalar_op(operator.mul)
# backtracking line-search
def line_search(alpha_start, init_loss, p_params, p_ngrad, rollout, n_iters, delta):
obs = rollout[0]
for i in np.arange(n_iters):
alpha = tree_scalar_divide(alpha_start, 2 ** i)
new_p_params = sgd_step_tree(p_params, p_ngrad, alpha)
new_loss = batch_policy_loss(new_p_params, rollout)
d_kl = jax.vmap(partial(D_KL_probs_params, new_p_params, p_params))(obs).mean()
if (new_loss < init_loss) and (d_kl <= delta):
writer.add_scalar('info/line_search_n_iters', i, e)
return new_p_params # new weights
writer.add_scalar('info/line_search_n_iters', -1, e)
return p_params # no new weights
# %%
# %%
# %%
# %%
|
{"hexsha": "916cb3d5f5d86cfe661925c45b9951221f6e545b", "size": 8100, "ext": "py", "lang": "Python", "max_stars_repo_path": "maml/maml.py", "max_stars_repo_name": "gebob19/rl_with_jax", "max_stars_repo_head_hexsha": "a30df06de3035c460e5339611974664a2130ca6e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-08-31T22:35:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T23:07:36.000Z", "max_issues_repo_path": "maml/maml.py", "max_issues_repo_name": "gebob19/rl_with_jax", "max_issues_repo_head_hexsha": "a30df06de3035c460e5339611974664a2130ca6e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maml/maml.py", "max_forks_repo_name": "gebob19/rl_with_jax", "max_forks_repo_head_hexsha": "a30df06de3035c460e5339611974664a2130ca6e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6703296703, "max_line_length": 88, "alphanum_fraction": 0.6697530864, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 2461}
|
import os
import math
import shutil
import time
import collections
from pathlib import Path
import logging
import uuid
import numpy as np
from fmpy.fmi1 import FMU1Slave, FMU1Model
from fmpy.fmi2 import FMU2Slave, FMU2Model
from fmpy import read_model_description, extract
from energym.envs.env import Env
from energym.envs.utils.weather import EPW, MOS
from energym.envs.utils.kpi import KPI
from energym.spaces.dict import Dict
from energym.spaces.discrete import Discrete
from energym.spaces.box import Box
logger = logging.getLogger(__name__)
class EnvFMU(Env):
"""The FMU base class for Energym.
It encapsulates an environment whose simulation is performed in an FMU.
The methods step(), reset() and close() from Env are implemented here.
Attributes
----------
fmu_file : str
Full path to the FMU file
model_description : fmpy ModelDescription object
Encapsulated description of the model extracted from the FMU using
FMPy inspection methods
fmi_version : str
Version number of FMI, inspected inside the FMU. Should be '1.0'
or '2.0'
step_size : int or double
Simulation stepsize in seconds. Int for EnergyPlus, double for Modelica
weather : str
Indicates the used weather profile
is_fmu_initialized : bool
Flags the FMU initialization process
vrs : dict
Contains the variable names and their references
fmi_type : str
The simulation type as speified by the FMU, either 'cosim' or 'modex'
start_time : int
Start of simulation time in seconds
stop_time : int
End of simulation time in seconds
kpis : KPI object
To track the KPI relevant metrics
input_space : dict
Contains controllable input variables
output_space : dict
Contains output variables
observation_history : list
Collects all observations of one simulation
unzipdir : str
Directory for extracting the FMU
fmu : FMU1Slave or FMU2Slave or FMU1Model or FMU2Model
Simulation object
time : int or double
Current simulation time (int for EnergyPlus, double for Modelica)
Methods
-------
initialize()
Initializes simulation object.
__build_input_space(input_specs)
Collects the inputs from the simulation object.
__build_output_space(output_specs)
Collects the outputs from the simulation object.
__initialize_fmu()
Initializes the FMU after instantiation.
get_inputs_names()
Retrieves list of inputs from model description.
get_outputs_names()
Retrieves list of outputs from model description.
get_date()
Gets the current simulation time.
step(inputs=None)
Advances the simulation one timestep.
print_kpis()
Prints the KPIs.
get_kpi(start_ind=0, end_ind=-1)
Retrieves the KPIs.
get_cumulative_kpi(phrase, kpi_type, out_type)
Retrieves the cumulative KPIs over multiple variables.
sample_random_action()
Samples random actions from the action space.
get_forecast(forecast_length = 24, **kwargs)
Generates a weather forecast of a given length.
look_for_weather_file(name = None)
Finds a weather file in the FMU.
post_process(list_rel_out, res, arrays=False)
Post-process output of FMPY.
reset()
Resets the simulation.
close()
Terminates the FMU and removes leftover folders.
"""
def __init__(
self,
model_path,
start_time,
stop_time,
step_size,
weather=None,
input_specs=None,
output_specs=None,
kpi_options=None,
default_path=True,
weather_file_path=None,
):
"""
Parameters
----------
model_path: str
Path to the fmu model file, relative inside the simulation folder
start_time: int
Begin of the simulation time in seconds in relation to the
beginning of the year
stop_time: int
End of the simulation time in seconds in relation to the
beginning of the year
step_size: double
Step size in second. May be chosen freely for some models (modelica),
or needs to be identical to model step size in other cases (EnergyPlus)
weather : EPW or MOS, optional
Specifies the used weather file, by default None
input_specs : dict, optional
Contains the model inputs, by default None
output_specs : dict, optional
Contains the model outputs, by default None
kpi_options : dict, optional
Dict to specify the tracked KPIs, by default None
default_path : bool
Whether to use the deault path or an absolute path in model_path
Raises
------
ValueError
If the FMU supprts neither co-simulation nor model exchange
"""
super().__init__()
if default_path:
self.fmu_file = self.energym_path / "simulation" / model_path
else:
self.fmu_file = model_path
self.model_description = read_model_description(self.fmu_file)
self.step_size = step_size
self.weather = weather
self.weather_file_path = weather_file_path
self.is_fmu_initialized = False
# Extract variables references
self.vrs = {}
for variable in self.model_description.modelVariables:
self.vrs[variable.name] = variable.valueReference
# detect fmi_version
self.fmi_version = self.model_description.fmiVersion
# detect FMI type
if self.model_description.coSimulation is not None:
self.fmi_type = "cosim"
elif self.model_description.modelExchange is not None:
self.fmi_type = "modex"
else:
raise ValueError("the type of FMU could not be identified")
# extract the FMU
self.start_time = start_time
self.stop_time = stop_time
self.input_specs = input_specs
# Fix inputs and outputs keys
if output_specs is not None:
self.output_keys = sorted(
[
p.name
for p in self.model_description.modelVariables
if p.name in list(output_specs.keys())
]
)
self.__build_output_space(output_specs)
else:
self.output_keys = sorted(
[
p.name
for p in self.model_description.modelVariables
if p.causality == "output"
]
)
if input_specs is not None:
self.input_keys = sorted(
[
p.name
for p in self.model_description.modelVariables
if p.name in list(input_specs.keys())
]
)
self.__build_input_space(input_specs)
else:
self.input_keys = sorted(
[
p.name
for p in self.model_description.modelVariables
if p.causality == "input"
]
)
self.kpis = KPI(kpi_options)
# # initialize FMU and spaces
self.initialize()
def __build_input_space(self, input_specs):
"""Collects the inputs from the simulation object.
The inputs have to be contained in input_specs but
not every key of the two needs to be an input to the specific model.
Parameters
----------
input_specs : dict
Contains possible control inputs from the model.
"""
input_array = self.get_inputs_names()
input_space_list = []
for act_name in input_array:
if act_name in input_specs:
act_specs = input_specs[act_name]
if act_specs["type"] == "scalar":
input_space_list += [
(
act_name,
Box(
low=act_specs["lower_bound"],
high=act_specs["upper_bound"],
shape=[1],
dtype=np.float32,
),
)
]
elif act_specs["type"] == "discrete":
input_space_list += [(act_name, Discrete(act_specs["size"]))]
else:
raise TypeError("Wrong type in INPUT_SPECS.")
else:
raise ValueError("Undefined Input {}".format(act_name))
self.input_space = Dict(spaces=input_space_list)
def __build_output_space(self, output_specs):
"""Collects the outputs from the simulation object.
The outputs have to be contained in output_specs, but not every
key needs to be an output to the specific model.
Parameters
----------
output_specs : dict
Contains possible outputs from the model.
"""
output_array = self.get_outputs_names()
output_space_list = []
for obs_name in output_array:
obs_specs = output_specs[obs_name]
if obs_specs["type"] == "scalar":
output_space_list += [
(
obs_name,
Box(
low=obs_specs["lower_bound"],
high=obs_specs["upper_bound"],
shape=[1],
dtype=np.float32,
),
)
]
elif obs_specs["type"] == "discrete":
output_space_list += [(obs_name, Discrete(obs_specs["size"]))]
self.output_space = Dict(spaces=output_space_list)
self.observation_history = []
def __initialize_fmu(self):
"""Initializes the FMU after instantiation."""
if self.fmi_version == "1.0":
self.fmu.initialize(tStart=self.start_time, stopTime=self.stop_time)
elif self.fmi_version == "2.0":
self.fmu.enterInitializationMode()
self.fmu.exitInitializationMode()
self.is_fmu_initialized = True
def initialize(self):
"""Initializes simulation object.
Instantiates FMPy FMUSalve1 or FMUSlave2 object based on FMI
version detected.
"""
init_time = str(time.time())[0:10]
random_id = str(uuid.uuid4().fields[-1])[:7]
fmu_path = os.path.join(self.runs_path, init_time + "_" + random_id)
os.mkdir(fmu_path)
self.unzipdir = extract(self.fmu_file, unzipdir=fmu_path)
weather_folder = Path(self.unzipdir) / "resources"
possible_weather_files = list(weather_folder.rglob("*.mos")) + list(
weather_folder.rglob("*.epw")
)
weather_default_file_path = weather_folder / possible_weather_files[0]
try:
os.remove(weather_default_file_path)
shutil.copy(self.weather_file_path, weather_default_file_path)
except BaseException as e:
logging.error(e)
logging.error("Problem with the weather file handling")
# initialize
instance_name = "instance" + init_time
# model identifier
if self.fmi_type == "modex":
model_id = self.model_description.modelExchange.modelIdentifier
else:
model_id = self.model_description.coSimulation.modelIdentifier
kwargs = dict(
guid=self.model_description.guid,
unzipDirectory=self.unzipdir,
modelIdentifier=model_id,
instanceName=instance_name,
)
if self.fmi_version == "1.0":
if self.fmi_type == "cosim":
self.fmu = FMU1Slave(**kwargs)
else:
self.fmu = FMU1Model(**kwargs)
elif self.fmi_version == "2.0":
if self.fmi_type == "cosim":
self.fmu = FMU2Slave(**kwargs)
else:
self.fmu = FMU2Model(**kwargs)
self.fmu.instantiate(loggingOn=True)
if self.fmi_version == "2.0":
self.fmu.setupExperiment(startTime=self.start_time, stopTime=self.stop_time)
# Initialize time and the last_output values
self.time = self.start_time
def get_inputs_names(self):
"""Retrieves list of inputs from model description.
Returns
-------
input_keys : list of str
List with input names.
"""
return self.input_keys
def get_outputs_names(self):
"""Retrieves list of outputs from model description.
Returns
-------
output_keys : list of str
Variable names that specify outputs.
"""
return self.output_keys # res
def get_date(self):
"""Gets the current simulation time.
Returns
-------
int
Minutes of the current simulation time
int
Hours of the current simulation time
int
Day of the current simulation time
int
Month of the current simulation time
"""
time_tuple = (2013, 1, 1, 0, 0, 0, 1, 1, 0)
base_time = time.mktime(time_tuple)
date = time.localtime(base_time + self.time)
return date[4], date[3], date[2], date[1]
def step(self, inputs=None):
"""Advances the simulation one timestep.
Applies input for current step, simulate the system in FMU and retrieves outputs.
Parameters
----------
inputs: dict
Inputs for the system. Keys are input names, values are iterables of input values.
If not defined, assumes no inputs required.
Returns
----------
outputs: dict
Outputs for the system.
"""
if inputs is None:
inputs = {}
# Initializes FMU is not already
if not self.is_fmu_initialized:
self.__initialize_fmu()
# Inputs is a dictionary of arrays
res = []
# simulation loop
if bool(inputs):
inp_keys = sorted(list(inputs.keys()))
n_steps = len(inputs[inp_keys[0]])
non_inp_keys = set(self.input_keys) - set(list(inputs))
else:
n_steps = 1
inp_keys = []
non_inp_keys = self.input_keys
for p in range(n_steps):
key_list = []
input_list = []
for key in inp_keys:
key_list.append(self.vrs[key])
input_list.append(inputs[key][p])
for key in non_inp_keys:
key_list.append(self.vrs[key])
input_list.append(self.input_specs[key]["default"])
self.fmu.setReal(
key_list,
input_list,
)
# perform one step
self.fmu.doStep(
currentCommunicationPoint=self.time,
communicationStepSize=self.step_size,
)
# get the values
out_values = self.fmu.getReal([self.vrs[key] for key in self.output_keys])
# advance the time
self.time += self.step_size
# append the results
res.append((self.time, out_values))
output = self.post_process(self.output_keys, res, arrays=False)
self.kpis.add_observation(output)
return output
def print_kpis(self):
"""Prints the KPIs."""
kpi_summary = self.get_kpi()
for key in kpi_summary:
print(
"####################################################################"
)
kpi_name = kpi_summary[key]["name"]
kpi_type = kpi_summary[key]["type"]
kpi_val = kpi_summary[key]["kpi"]
print(
"Variable name: {}, kpi type: {}, kpi value: {}".format(
kpi_name, kpi_type, kpi_val
)
)
def get_kpi(self, start_ind=0, end_ind=-1):
"""Retrieves the KPIs.
For implementation details see the KPI class.
Parameters
----------
start_ind : int, optional
Index from where the KPI computation starts, by default 0
end_ind: int, optional
Index where the KPI computation ends, by default -1
Returns
-------
kpi_summary : dict
Dict containing all the tracked variables and their KPIs.
"""
return self.kpis.get_kpi(start_ind, end_ind)
def get_cumulative_kpi(self, names, kpi_type, out_type):
"""Retrieves the cumulative KPIs over multiple variables.
For implementation details see the KPI class.
Parameters
----------
names : list or str
List of variable names or common string to filter the variables.
kpi_type : str
One of the 4 KPI types to filter the variables.
out_type : str
Cumulative KPI type ("avg" or "sum").
Returns
-------
float or int
The computed KPI.
"""
return self.kpis.get_cumulative_kpi(names, kpi_type, out_type)
def sample_random_action(self):
"""Samples random actions from the action space.
Returns
-------
dict
Inputs with random values, within a specified range
"""
action = self.input_space.sample()
return dict(list(action.items()))
def get_forecast(self, forecast_length=24):
"""Generates a weather forecast of a given length.
Parameters
----------
forecast_length : int, optional
Number of timesteps that will be forecasted, by default 24
Returns
-------
forecast : dict
Forecasted values for default keys or ones specified in kwargs
"""
time_resolution = self.step_size / 60
hourly_steps = int(60 / time_resolution)
tot_length = math.ceil(forecast_length / hourly_steps) + 2
start_index = 0
forecast = {}
if isinstance(self.weather, EPW):
minute, hour, day, month = self.get_date()
start_index = int(minute / time_resolution)
forecast = self.weather.get_forecast(hour, day, month, tot_length)
elif isinstance(self.weather, MOS):
res = self.time % 3600
start_index = int(res / self.step_size)
forecast = self.weather.get_forecast(self.time - res, forecast_length)
forecast = self._interpolate_forecast(forecast, hourly_steps)
for key in forecast:
forecast[key] = forecast[key][start_index : forecast_length + start_index]
return forecast
def _interpolate_forecast(self, forecast, hourly_steps):
for key in forecast:
mod_list = forecast[key]
new_list = []
for i in range(len(mod_list) - 1):
for j in range(hourly_steps):
weight = j / hourly_steps
new_list.append(
(1 - weight) * mod_list[i] + weight * mod_list[i + 1]
)
new_list.append(mod_list[len(mod_list) - 1])
forecast[key] = new_list
return forecast
def look_for_weather_file(
self,
name=None,
generate_forecasts=True,
generate_forecast_method="perfect",
generate_forecast_keys=None,
):
"""Finds a weather file in the FMU.
Parameters
----------
name : str
Name of weather file
Raises
------
Exception
If no weather file/more than one weather file is found or the file has a wrong type
"""
weather_folder = Path(self.unzipdir) / "resources"
if name is None:
possible_weather_files = list(weather_folder.rglob("*.mos")) + list(
weather_folder.rglob("*.epw")
)
else:
possible_weather_files = list(weather_folder.rglob(name))
if len(possible_weather_files) == 0:
raise Exception("No weather file found in FMU")
elif len(possible_weather_files) > 1:
raise Exception(
"Found more than one weather file: {}. specify a name to select one.".format(
possible_weather_files
)
)
else:
wf = weather_folder / possible_weather_files[0]
if wf.suffix == ".mos":
self.weather = MOS()
self.weather.read(
wf,
generate_forecasts,
generate_forecast_method,
generate_forecast_keys,
)
elif wf.suffix == ".epw":
self.weather = EPW()
self.weather.read(
wf,
generate_forecasts,
generate_forecast_method,
generate_forecast_keys,
)
else:
raise Exception(
"File {} cannot be interpreted as a weather file".format(wf)
)
def post_process(self, list_rel_out, res, arrays=False):
"""Post-process output of FMPY.
Parameters
----------
list_rel_out : list of str
Output labels
res : list
Output of doStep FMPy method
arrays : bool, optional
If True, array output in processed structure, default is False
Returns
-------
dic_res: collections.OrderedDict
Dictionary with values of output for each key
"""
N = len(res)
dic_res = collections.OrderedDict()
position = {e: i for i, e in enumerate(list_rel_out)}
# Store time
if arrays:
dic_res["time"] = []
for key in list_rel_out:
dic_res[key] = []
for p in range(N):
for key in list_rel_out:
if arrays:
dic_res[key] += [res[p][1][position[key]]]
else:
dic_res[key] = res[p][1][position[key]]
if arrays:
dic_res["time"] += [res[p][0]]
else:
dic_res["time"] = res[p][0]
if arrays:
for key in dic_res:
dic_res[key] = np.asarray(dic_res[key]).flatten()
return dic_res
def reset(self):
"""Resets the simulation."""
self.close()
self.kpis.reset()
self.initialize()
def close(self, save=True):
"""Terminates the FMU and removes leftover folders."""
instance_name = self.fmu.instanceName
self.fmu.terminate()
self.fmu.freeInstance()
self.is_fmu_initialized = False
try:
shutil.rmtree(self.unzipdir)
except PermissionError as e:
logger.error(f"Folder could not be removed. {e}")
cwd = os.getcwd()
wd_sub_list = os.listdir(cwd)
if save:
for directory in wd_sub_list:
if instance_name in directory:
try:
shutil.move(
os.path.join(cwd, directory),
os.path.join(self.runs_path, directory),
)
except PermissionError as e:
logger.error(f"Folder could not be moved. {e}")
else:
for directory in wd_sub_list:
if instance_name in directory:
try:
shutil.rmtree(
os.path.join(cwd, directory),
)
except PermissionError as e:
logger.error(f"Folder could not be removed. {e}")
|
{"hexsha": "fadf99d44a0b596dc80caf1030f1af116edb4527", "size": 24264, "ext": "py", "lang": "Python", "max_stars_repo_path": "energym/envs/env_fmu.py", "max_stars_repo_name": "bsl546/energym", "max_stars_repo_head_hexsha": "0133ca7a19d21352a427e1913755e1ebf6fd8bb6", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-03-12T20:24:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T22:13:45.000Z", "max_issues_repo_path": "energym/envs/env_fmu.py", "max_issues_repo_name": "bsl546/energym", "max_issues_repo_head_hexsha": "0133ca7a19d21352a427e1913755e1ebf6fd8bb6", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-07-04T13:12:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-02T11:02:12.000Z", "max_forks_repo_path": "energym/envs/env_fmu.py", "max_forks_repo_name": "bsl546/energym", "max_forks_repo_head_hexsha": "0133ca7a19d21352a427e1913755e1ebf6fd8bb6", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-11-30T07:56:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T22:15:05.000Z", "avg_line_length": 33.3296703297, "max_line_length": 95, "alphanum_fraction": 0.5559676888, "include": true, "reason": "import numpy", "num_tokens": 4945}
|
module MessageRequest
export body_is_a_stream, body_was_streamed, setuseragent!, resource
import ..Layer, ..request
using ..IOExtras
using URIs
using ..Messages
import ..Messages: bodylength
import ..Headers
import ..Form, ..content_type
"""
"request-target" per https://tools.ietf.org/html/rfc7230#section-5.3
"""
resource(uri::URI) = string( isempty(uri.path) ? "/" : uri.path,
!isempty(uri.query) ? "?" : "", uri.query,
!isempty(uri.fragment) ? "#" : "", uri.fragment)
"""
request(MessageLayer, method, ::URI, headers, body) -> HTTP.Response
Construct a [`Request`](@ref) object and set mandatory headers.
"""
struct MessageLayer{Next <: Layer} <: Layer{Next} end
export MessageLayer
function request(::Type{MessageLayer{Next}},
method::String, url::URI, headers::Headers, body;
http_version=v"1.1",
target=resource(url),
parent=nothing, iofunction=nothing, kw...) where Next
defaultheader!(headers, "Host" => url.host)
if USER_AGENT[] !== nothing
defaultheader!(headers, "User-Agent" => USER_AGENT[])
end
if !hasheader(headers, "Content-Length") &&
!hasheader(headers, "Transfer-Encoding") &&
!hasheader(headers, "Upgrade")
l = bodylength(body)
if l != unknown_length
setheader(headers, "Content-Length" => string(l))
elseif method == "GET" && iofunction isa Function
setheader(headers, "Content-Length" => "0")
end
end
if !hasheader(headers, "Content-Type") && body isa Form && method == "POST"
# "Content-Type" => "multipart/form-data; boundary=..."
setheader(headers, content_type(body))
end
req = Request(method, target, headers, bodybytes(body);
parent=parent, version=http_version)
return request(Next, url, req, body; iofunction=iofunction, kw...)
end
const USER_AGENT = Ref{Union{String, Nothing}}("HTTP.jl/$VERSION")
"""
setuseragent!(x::Union{String, Nothing})
Set the default User-Agent string to be used in each HTTP request.
Can be manually overridden by passing an explicit `User-Agent` header.
Setting `nothing` will prevent the default `User-Agent` header from being passed.
"""
function setuseragent!(x::Union{String, Nothing})
USER_AGENT[] = x
return
end
bodylength(body) = unknown_length
bodylength(body::AbstractVector{UInt8}) = length(body)
bodylength(body::AbstractString) = sizeof(body)
bodylength(body::Form) = length(body)
bodylength(body::Vector{T}) where T <: AbstractString = sum(sizeof, body)
bodylength(body::Vector{T}) where T <: AbstractArray{UInt8,1} = sum(length, body)
bodylength(body::IOBuffer) = bytesavailable(body)
bodylength(body::Vector{IOBuffer}) = sum(bytesavailable, body)
const body_is_a_stream = UInt8[]
const body_was_streamed = bytes("[Message Body was streamed]")
bodybytes(body) = body_is_a_stream
bodybytes(body::Vector{UInt8}) = body
bodybytes(body::IOBuffer) = read(body)
bodybytes(body::AbstractVector{UInt8}) = Vector{UInt8}(body)
bodybytes(body::AbstractString) = bytes(body)
bodybytes(body::Vector) = length(body) == 1 ? bodybytes(body[1]) :
body_is_a_stream
end # module MessageRequest
|
{"hexsha": "195318f588daadbb6e70fb301981c11a30137339", "size": 3310, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MessageRequest.jl", "max_stars_repo_name": "cmcaine/HTTP.jl", "max_stars_repo_head_hexsha": "7bf03e2f29b8a25eeffd7223a60a6352173fe1da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MessageRequest.jl", "max_issues_repo_name": "cmcaine/HTTP.jl", "max_issues_repo_head_hexsha": "7bf03e2f29b8a25eeffd7223a60a6352173fe1da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MessageRequest.jl", "max_forks_repo_name": "cmcaine/HTTP.jl", "max_forks_repo_head_hexsha": "7bf03e2f29b8a25eeffd7223a60a6352173fe1da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8421052632, "max_line_length": 81, "alphanum_fraction": 0.6589123867, "num_tokens": 802}
|
import numpy as np
import requests
from StringIO import StringIO
from matplotlib import image as img
import geopy
import yaml
# First hard-code what is needed for correct output of green_between()
class Map(object):
def __init__(self, latitude, longitude, satellite=True,
zoom=10, size=(400, 400), sensor=False):
base = "http://maps.googleapis.com/maps/api/staticmap?"
params = dict(
sensor=str(sensor).lower(),
zoom=zoom,
size="x".join(map(str, size)),
center=",".join(map(str, (latitude, longitude))),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"] = "satellite"
self.image = requests.get(base, params=params).content
# Fetch our PNG image data
self.pixels = img.imread(StringIO(self.image))
# Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 0]
greener_than_blue = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold=1.1):
return np.sum(self.green(threshold))
class Greengraph(object):
def __init__(self, start, end):
self.start = start
self.end = end
self.geocoder = geopy.geocoders.GoogleV3(
domain="maps.google.co.uk")
def geolocate(self, place):
return self.geocoder.geocode(place,
exactly_one=False)[0][1]
def location_sequence(self, start, end, steps):
lats = np.linspace(start[0], end[0], steps)
longs = np.linspace(start[1], end[1], steps)
return np.vstack([lats, longs]).transpose()
def green_between(self, steps):
return [Map.Map(*location).count_green()
for location in self.location_sequence(
self.geolocate(self.start),
self.geolocate(self.end),
steps)]
# Now build fixtures method
def build_fixture(start, end, steps):
my_graph = Greengraph(start, end)
locations = my_graph.location_sequence(
my_graph.geolocate(my_graph.start),
my_graph.geolocate(my_graph.end),
steps)
green_counts = [None]*len(locations)
for i in range(0, len(locations)):
location = locations[i]
green_counts[i] = Map(*location).count_green()
start_location = my_graph.geolocate(my_graph.start)
end_location = my_graph.geolocate(my_graph.end)
return eval(str(dict(start=start, end=end, start_location=start_location, end_location=end_location,
green_counts=green_counts, steps=steps)))
# Write YAML file
with open('green_between_fixtures.yaml', 'w') as file_to_write:
file_to_write.write(yaml.dump([build_fixture('Paris', 'Chicago', 10)]))
file_to_write.write(yaml.dump([build_fixture('Matlab', 'Bangkok', 10)]))
file_to_write.write(yaml.dump([build_fixture('London', 'Bristol', 10)]))
|
{"hexsha": "893e9dac63057a71f53c6ef3c807c319bce02cf0", "size": 3269, "ext": "py", "lang": "Python", "max_stars_repo_path": "greengraph/tests/fixtures/generate_green_between_fixtures.py", "max_stars_repo_name": "ddervs/GreenGraph", "max_stars_repo_head_hexsha": "bb65e5d9f2a34686add644e4fa1851aabf82c3c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "greengraph/tests/fixtures/generate_green_between_fixtures.py", "max_issues_repo_name": "ddervs/GreenGraph", "max_issues_repo_head_hexsha": "bb65e5d9f2a34686add644e4fa1851aabf82c3c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "greengraph/tests/fixtures/generate_green_between_fixtures.py", "max_forks_repo_name": "ddervs/GreenGraph", "max_forks_repo_head_hexsha": "bb65e5d9f2a34686add644e4fa1851aabf82c3c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3571428571, "max_line_length": 104, "alphanum_fraction": 0.619149587, "include": true, "reason": "import numpy", "num_tokens": 744}
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HolidayCalendar definition."""
import collections
import datetime
import attr
import numpy as np
import tensorflow.compat.v2 as tf
from tf_quant_finance.experimental.dates import constants
from tf_quant_finance.experimental.dates import date_tensor as dt
from tf_quant_finance.experimental.dates import periods
_ORDINAL_OF_1_1_1970 = 719163
class HolidayCalendar(object):
"""Represents a holiday calendar.
Provides methods for manipulating the dates taking into account the holidays,
and the business day roll conventions. Weekends are treated as holidays.
"""
def __init__(
self,
weekend_mask=None,
holidays=None,
start_year=None,
end_year=None):
"""Initializer.
Args:
weekend_mask: Sequence of 7 elements, where "0" means work day and "1" -
day off. The first element is Monday. By default, no weekends are
applied. Some of the common weekend patterns are defined in
`dates.WeekendMask`.
Default value: None which maps to no weekend days.
holidays: Defines the holidays that are added to the weekends defined by
`weekend_mask`. Can be provided in following forms:
- Iterable of tuples containing dates in (year, month, day) format:
```python
holidays = [(2020, 1, 1), (2020, 12, 25),
(2021, 1, 1), (2021, 12, 24)]
```
- Iterable of datetime.date objects:
```python
holidays = [datetime.date(2020, 1, 1), datetime.date(2020, 12, 25),
datetime.date(2021, 1, 1), datetime.date(2021, 12, 24)]
```
- A numpy array of type np.datetime64:
```python
holidays = np.array(['2020-01-01', '2020-12-25', '2021-01-01',
'2020-12-24'], dtype=np.datetime64)
```
Note that it is necessary to provide holidays for each year, and also
adjust the holidays that fall on the weekends if required, like
2021-12-25 to 2021-12-24 in the example above. To avoid doing this
manually one can use AbstractHolidayCalendar from Pandas:
```python
from pandas.tseries.holiday import AbstractHolidayCalendar
from pandas.tseries.holiday import Holiday
from pandas.tseries.holiday import nearest_workday
class MyCalendar(AbstractHolidayCalendar):
rules = [
Holiday('NewYear', month=1, day=1, observance=nearest_workday),
Holiday('Christmas', month=12, day=25,
observance=nearest_workday)
]
calendar = MyCalendar()
holidays_index = holidays.holidays(
start=datetime.date(2020, 1, 1),
end=datetime.date(2030, 12, 31))
holidays = np.array(holidays_index.to_pydatetime(), dtype="<M8[D]")
```
start_year: Integer giving the earliest year this calendar includes. If
`holidays` is specified, then `start_year` and `end_year` are ignored,
and the boundaries are derived from `holidays`. If `holidays` is `None`,
both `start_year` and `end_year` must be specified.
end_year: Integer giving the latest year this calendar includes. If
`holidays` is specified, then `start_year` and `end_year` are ignored,
and the boundaries are derived from `holidays`. If `holidays` is `None`,
both `start_year` and `end_year` must be specified.
"""
self._weekend_mask = np.array(weekend_mask or constants.WeekendMask.NONE)
self._holidays_np = _to_np_holidays_array(holidays)
start_year, end_year = _resolve_calendar_boundaries(self._holidays_np,
start_year, end_year)
self._dates_np = np.arange(
datetime.date(start_year, 1, 1), datetime.date(end_year + 1, 1, 1),
datetime.timedelta(days=1)).astype("<M8[D]")
self._ordinal_offset = datetime.date(start_year, 1, 1).toordinal()
# Precomputed tables. These are constant 1D Tensors, mapping each day in the
# [start_year, end_year] period to some quantity of interest, e.g. next
# business day. The tables should be indexed with
# `date.ordinal - self._offset`. All tables are computed lazily.
self._table_cache = _TableCache()
def is_business_day(self, date_tensor):
"""Returns a tensor of bools for whether given dates are business days."""
is_bus_day_table = self._compute_is_bus_day_table()
is_bus_day_int32 = self._gather(
is_bus_day_table,
date_tensor.ordinal() - self._ordinal_offset)
return tf.cast(is_bus_day_int32, dtype=tf.bool)
def roll_to_business_day(self, date_tensor, roll_convention):
"""Rolls the given dates to business dates according to given convention.
Args:
date_tensor: DateTensor of dates to roll from.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
"""
if roll_convention == constants.BusinessDayConvention.NONE:
return date_tensor
adjusted_ordinals_table = self._compute_rolled_dates_table(roll_convention)
ordinals_with_offset = date_tensor.ordinal() - self._ordinal_offset
adjusted_ordinals = self._gather(adjusted_ordinals_table,
ordinals_with_offset)
return dt.from_ordinals(adjusted_ordinals, validate=False)
def add_period_and_roll(self,
date_tensor,
period_tensor,
roll_convention=constants.BusinessDayConvention.NONE):
"""Adds given periods to given dates and rolls to business days.
The original dates are not rolled prior to addition.
Args:
date_tensor: DateTensor of dates to add to.
period_tensor: PeriodTensor broadcastable to `date_tensor`.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
"""
return self.roll_to_business_day(date_tensor + period_tensor,
roll_convention)
def add_business_days(self,
date_tensor,
num_days,
roll_convention=constants.BusinessDayConvention.NONE):
"""Adds given number of business days to given dates.
Note that this is different from calling `add_period_and_roll` with
PeriodType.DAY. For example, adding 5 business days to Monday gives the next
Monday (unless there are holidays on this week or next Monday). Adding 5
days and rolling means landing on Saturday and then rolling either to next
Monday or to Friday of the same week, depending on the roll convention.
If any of the dates in `date_tensor` are not business days, they will be
rolled to business days before doing the addition. If `roll_convention` is
`NONE`, and any dates are not business days, an exception is raised.
Args:
date_tensor: DateTensor of dates to advance from.
num_days: Tensor of int32 type broadcastable to `date_tensor`.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
"""
control_deps = []
if roll_convention == constants.BusinessDayConvention.NONE:
message = ("Some dates in date_tensor are not business days. "
"Please specify the roll_convention argument.")
is_bus_day = self.is_business_day(date_tensor)
control_deps.append(
tf.debugging.assert_equal(is_bus_day, True, message=message))
else:
date_tensor = self.roll_to_business_day(date_tensor, roll_convention)
with tf.compat.v1.control_dependencies(control_deps):
cumul_bus_days_table = self._compute_cumul_bus_days_table()
cumul_bus_days = self._gather(
cumul_bus_days_table,
date_tensor.ordinal() - self._ordinal_offset)
target_cumul_bus_days = cumul_bus_days + num_days
bus_day_ordinals_table = self._compute_bus_day_ordinals_table()
ordinals = self._gather(bus_day_ordinals_table, target_cumul_bus_days)
return dt.from_ordinals(ordinals, validate=False)
def subtract_period_and_roll(
self,
date_tensor,
period_tensor,
roll_convention=constants.BusinessDayConvention.NONE):
"""Subtracts given periods from given dates and rolls to business days.
The original dates are not rolled prior to subtraction.
Args:
date_tensor: DateTensor of dates to subtract from.
period_tensor: PeriodTensor broadcastable to `date_tensor`.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
"""
minus_period_tensor = periods.PeriodTensor(-period_tensor.quantity(),
period_tensor.period_type())
return self.add_period_and_roll(date_tensor, minus_period_tensor,
roll_convention)
def subtract_business_days(
self,
date_tensor,
num_days,
roll_convention=constants.BusinessDayConvention.NONE):
"""Adds given number of business days to given dates.
Note that this is different from calling `subtract_period_and_roll` with
PeriodType.DAY. For example, subtracting 5 business days from Friday gives
the previous Friday (unless there are holidays on this week or previous
Friday). Subtracting 5 days and rolling means landing on Sunday and then
rolling either to Monday or to Friday, depending on the roll convention.
If any of the dates in `date_tensor` are not business days, they will be
rolled to business days before doing the subtraction. If `roll_convention`
is `NONE`, and any dates are not business days, an exception is raised.
Args:
date_tensor: DateTensor of dates to advance from.
num_days: Tensor of int32 type broadcastable to `date_tensor`.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting DateTensor.
"""
return self.add_business_days(date_tensor, -num_days, roll_convention)
def business_days_in_period(self, date_tensor, period_tensor):
"""Calculates number of business days in a period.
Includes the dates in `date_tensor`, but excludes final dates resulting from
addition of `period_tensor`.
Args:
date_tensor: DateTensor of starting dates.
period_tensor: PeriodTensor, should be broadcastable to `date_tensor`.
Returns:
An int32 Tensor with the number of business days in given periods that
start at given dates.
"""
return self.business_days_between(date_tensor, date_tensor + period_tensor)
def business_days_between(self, from_dates, to_dates):
"""Calculates number of business between pairs of dates.
For each pair, the initial date is included in the difference, and the final
date is excluded. If the final date is the same or earlier than the initial
date, zero is returned.
Args:
from_dates: DateTensor of initial dates.
to_dates: DateTensor of final dates, should be broadcastable to
`from_dates`.
Returns:
An int32 Tensor with the number of business days between the
corresponding pairs of dates.
"""
cumul_bus_days_table = self._compute_cumul_bus_days_table()
ordinals_1, ordinals_2 = from_dates.ordinal(), to_dates.ordinal()
ordinals_2 = tf.broadcast_to(ordinals_2, ordinals_1.shape)
cumul_bus_days_1 = self._gather(cumul_bus_days_table,
ordinals_1 - self._ordinal_offset)
cumul_bus_days_2 = self._gather(cumul_bus_days_table,
ordinals_2 - self._ordinal_offset)
return tf.math.maximum(cumul_bus_days_2 - cumul_bus_days_1, 0)
def _compute_rolled_dates_table(self, roll_convention):
"""Computes and caches rolled dates table."""
already_computed = self._table_cache.rolled_dates.get(roll_convention, None)
if already_computed is not None:
return already_computed
roll_convention_np = _to_np_roll_convention(roll_convention)
holidays_arg = self._holidays_np
if holidays_arg is None:
holidays_arg = [] # np.busday_offset doesn't accept None
adjusted_np = np.busday_offset(
dates=self._dates_np,
offsets=0,
roll=roll_convention_np,
weekmask=1 - self._weekend_mask,
holidays=holidays_arg)
rolled_date_table = adjusted_np.astype(np.int32) + _ORDINAL_OF_1_1_1970
# To make tensor caching safe, lift the ops out of the current scope using
# tf.init_scope(). This allows e.g. to cache these tensors in one
# tf.function and reuse them in another tf.function.
with tf.init_scope():
rolled_date_table = tf.convert_to_tensor(rolled_date_table,
name="rolled_date_table")
self._table_cache.rolled_dates[roll_convention] = rolled_date_table
return rolled_date_table
def _compute_is_bus_day_table(self):
"""Computes and caches "is business day" table."""
if self._table_cache.is_bus_day is not None:
return self._table_cache.is_bus_day
is_bus_day_table = np.ones_like(self._dates_np, dtype=np.int32)
ordinals = np.arange(self._ordinal_offset,
self._ordinal_offset + len(is_bus_day_table))
# Apply week mask
week_days = (ordinals - 1) % 7
is_bus_day_table[self._weekend_mask[week_days] == 1] = 0
# Apply holidays
if self._holidays_np is not None:
holiday_ordinals = (
np.array(self._holidays_np, dtype=np.int32) + _ORDINAL_OF_1_1_1970)
is_bus_day_table[holiday_ordinals - self._ordinal_offset] = 0
with tf.init_scope():
is_bus_day_table = tf.convert_to_tensor(is_bus_day_table,
name="is_bus_day_table")
self._table_cache.is_bus_day = is_bus_day_table
return is_bus_day_table
def _compute_cumul_bus_days_table(self):
"""Computes and caches cumulative business days table."""
if self._table_cache.cumul_bus_days is not None:
return self._table_cache.cumul_bus_days
is_bus_day_table = self._compute_is_bus_day_table()
with tf.init_scope():
cumul_bus_days_table = tf.math.cumsum(is_bus_day_table, exclusive=True,
name="cumul_bus_days_table")
self._table_cache.cumul_bus_days = cumul_bus_days_table
return cumul_bus_days_table
def _compute_bus_day_ordinals_table(self):
"""Computes and caches rolled business day ordinals table."""
if self._table_cache.bus_day_ordinals is not None:
return self._table_cache.bus_day_ordinals
is_bus_day_table = self._compute_is_bus_day_table()
with tf.init_scope():
bus_day_ordinals_table = tf.cast(
tf.compat.v2.where(is_bus_day_table)[:, 0] + self._ordinal_offset,
tf.int32, name="bus_day_ordinals_table")
self._table_cache.bus_day_ordinals = bus_day_ordinals_table
return bus_day_ordinals_table
def _gather(self, table, indices):
message = "Went out of calendar boundaries!"
assert1 = tf.debugging.assert_greater_equal(indices, 0, message=message)
assert2 = tf.debugging.assert_less(indices, len(self._dates_np),
message=message)
with tf.compat.v1.control_dependencies([assert1, assert2]):
return tf.gather(table, indices)
def _to_np_holidays_array(holidays):
"""Converts holidays from any acceptable format to np.datetime64 array."""
if holidays is None:
return None
if isinstance(holidays, collections.Iterable):
if all(isinstance(h, datetime.date) for h in holidays):
return np.array(list(holidays), "<M8[D]")
if all(isinstance(h, tuple) for h in holidays):
datetimes = [datetime.date(*t) for t in holidays]
return np.array(datetimes, "<M8[D]")
if isinstance(holidays, np.ndarray):
return holidays.astype("<M8[D]")
raise ValueError("Unrecognized format of holidays")
def _to_np_roll_convention(convention):
if convention == constants.BusinessDayConvention.FOLLOWING:
return "following"
if convention == constants.BusinessDayConvention.PRECEDING:
return "preceding"
if convention == constants.BusinessDayConvention.MODIFIED_FOLLOWING:
return "modifiedfollowing"
if convention == constants. BusinessDayConvention.MODIFIED_PRECEDING:
return "modifiedpreceding"
raise ValueError("Unrecognized convention: {}".format(convention))
def _resolve_calendar_boundaries(holidays_np, start_year, end_year):
if holidays_np is None:
if start_year is None or end_year is None:
raise ValueError("Please specify either holidays or both start_year and"
"end_year arguments")
return start_year, end_year
years = [date.year for date in holidays_np.astype(object)]
return np.min(years), np.max(years)
@attr.s
class _TableCache(object):
"""Cache of pre-computed tables."""
# Tables of rolled date ordinals keyed by BusinessDayConvention.
rolled_dates = attr.ib(factory=dict)
# Table with "1" on business days and "0" otherwise.
is_bus_day = attr.ib(default=None)
# Table with number of business days before each date. Starts with 0.
cumul_bus_days = attr.ib(default=None)
# Table with ordinals of each business day in the [start_year, end_year],
# in order.
bus_day_ordinals = attr.ib(default=None)
|
{"hexsha": "60fb14039f5e358bbe7152529716e95da31321bf", "size": 18283, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf_quant_finance/experimental/dates/holiday_calendar.py", "max_stars_repo_name": "rajflume/tf-quant-finance", "max_stars_repo_head_hexsha": "5cb9474f6f2e74617735d38ef26aaef28ce69aff", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tf_quant_finance/experimental/dates/holiday_calendar.py", "max_issues_repo_name": "rajflume/tf-quant-finance", "max_issues_repo_head_hexsha": "5cb9474f6f2e74617735d38ef26aaef28ce69aff", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-14T12:34:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-27T09:13:08.000Z", "max_forks_repo_path": "tf_quant_finance/experimental/dates/holiday_calendar.py", "max_forks_repo_name": "rajflume/tf-quant-finance", "max_forks_repo_head_hexsha": "5cb9474f6f2e74617735d38ef26aaef28ce69aff", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0853932584, "max_line_length": 80, "alphanum_fraction": 0.6952907072, "include": true, "reason": "import numpy", "num_tokens": 4120}
|
import numpy as np
import tensorflow as tf
import random
import tensorflow.layers as layer
from collections import deque
import random
import datetime
import time
from multiagent.environment import MultiAgentEnv
from multiagent.policy import InteractivePolicy
import multiagent.scenarios as scenarios
########################################
action_size = 5
load_model = False
train_mode = True
batch_size = 256
mem_maxlen = 50000
discount_factor = 0.99
learning_rate = 0.00025
run_episode = 10000
start_train_episode = 500
target_update_step = 5000
print_interval = 100
save_interval = 1000
epsilon_min = 0.1
softlambda = 0.9
date_time = str(datetime.date.today()) + '_' + \
str(datetime.datetime.now().hour) + '_' + \
str(datetime.datetime.now().minute) + '_' + \
str(datetime.datetime.now().second)
env_name = "simple_adverary.py"
save_path = "./saved_models/"+date_time+"_maddpg"
load_path = ""
numGoals = 3
###########################################
class Critic(object):
def __init__(self, state_size, action_size, input, action_input, other_action, model_name="Qmodel", agent_num=3, reuse=False):
self.state_size = state_size
self.action_size = action_size
self.agent_num = agent_num
# =================================
self.input = input
self.action_input = action_input
self.other_actions = other_action
# =================================
with tf.variable_scope(name_or_scope=model_name, reuse=reuse):
self.mlp1 = layer.dense(inputs=self.input, units=256, activation = tf.nn.leaky_relu)
self.concat_action = tf.concat([self.action_input, self.other_actions], axis=1)
self.concat = tf.concat([self.mlp1, self.concat_action], axis=1)
self.mlp2 = layer.dense(inputs=self.concat, units=256, activation = tf.nn.leaky_relu)
self.mlp3 = layer.dense(inputs=self.mlp2, units=512, activation = tf.nn.leaky_relu)
self.mlp4 = layer.dense(inputs=self.mlp3, units=512, activation = tf.nn.leaky_relu)
self.Q_Out = layer.dense(self.mlp4, units=1, activation=None)
self.q_predict = self.Q_Out
self.critic_optimizer = tf.train.AdamOptimizer(learning_rate)
class Actor(object):
def __init__(self, state_size, action_size, input, model_name="Pimodel"):
self.agent_num = 3
self.state_size = state_size
self.action_size = action_size
# =================================
self.input = input
# =================================
with tf.variable_scope(name_or_scope=model_name):
self.mlp1 = layer.dense(inputs=self.input, units=512, activation = tf.nn.leaky_relu)
self.mlp2 = layer.dense(inputs=self.mlp1, units=512, activation = tf.nn.leaky_relu)
self.mlp3 = layer.dense(inputs=self.mlp2, units=512, activation = tf.nn.leaky_relu)
self.mlp4 = layer.dense(inputs=self.mlp3, units=512, activation = tf.nn.leaky_relu)
self.Pi_Out = layer.dense(self.mlp4, units=self.action_size, activation=tf.nn.tanh)
self.pi_predict = self.Pi_Out
self.actor_optimizer = tf.train.AdamOptimizer(learning_rate)
class MADDPGAgent(object):
def __init__(self, agent_num, state_size, action_size, idx):
# (1) "actor" : agent in reinforcement learning
# (2) "critic" : helps the actor decide what actions to reinforce during training.
# Traditionally, the critic tries to predict the value (i.e. the reward we expect to get in the future) of an action in a particular state s(t)
# predicted value from critic is used to update the actor policy
# Using critic value as an baseline for update s is more stable than directly using the reward, which can vary considerably
# variation of reward makes the update pertuative
# In maddpg, we enhance our critics so they can access the observations and actions of all the agents,
# Default Environment Information =====
self.state_size = state_size
self.action_size = action_size
self.agent_num = agent_num
# =====================================
# Experience Buffer ===================
self.memory = deque(maxlen=mem_maxlen)
self.batch_size = batch_size
# =====================================
# Placeholer =============================================================================
self.input = tf.placeholder(shape=[None, self.state_size], dtype=tf.float32)
self.action_input = tf.placeholder(shape=[None, self.action_size], dtype=tf.float32)
self.other_actions = tf.placeholder(shape=[None, self.action_size * (self.agent_num-1)], dtype=tf.float32)
self.target_Q = tf.placeholder(shape=[None,1],dtype=tf.float32)
self.reward = tf.placeholder(shape=[None,1], dtype=tf.float32)
# ========================================================================================
self.actor = Actor(self.state_size, self.action_size, self.input, "Pimodel_" + idx)
self.critic = Critic(self.state_size, self.action_size, self.input, self.action_input, self.other_actions, "Qmodel_" + idx, self.agent_num, reuse=False)
'''
critic_value = Critic(self.state_size, self.action_size, self.input, self.actor.pi_predict, self.other_actions, "Qmodel_" + idx, self.agent_num, reuse=True).q_predict
self.action_gradients = tf.gradients(critic_predict.q_predict, self.actor.pi_predict)[0]
self.actor_gradients = tf.gradients(self.actor.pi_predict, actor_var, -self.action_gradients)
self.grads_and_vars = list(zip(self.actor_gradients, actor_var))
self.actor_train = self.actor.actor_optimizer.apply_gradients(self.grads_and_vars)
'''
actor_var = [i for i in tf.trainable_variables() if ("Pimodel_" + idx) in i.name]
action_Grad = tf.gradients(self.critic.q_predict, self.action_input)
self.policy_Grads = tf.gradients(ys=self.actor.pi_predict, xs=actor_var, grad_ys=action_Grad)
for idx, grads in enumerate(self.policy_Grads):
self.policy_Grads[idx] = -grads / batch_size
self.actor_train = self.actor.actor_optimizer.apply_gradients(zip(self.policy_Grads, actor_var))
self.critic_loss = tf.reduce_mean(tf.square(self.target_Q - self.critic.q_predict))
self.critic_train = self.critic.critic_optimizer.minimize(self.critic_loss)
def train_actor(self, state, action, other_action, sess):
sess.run(self.actor_train,
{self.input: state, self.action_input : action, self.other_actions: other_action})
def train_critic(self, state, action, other_action, target, sess):
sess.run(self.critic_train,
{self.input: state, self.action_input: action, self.other_actions: other_action, self.target_Q: target})
def action(self, state, sess):
return sess.run(self.actor.pi_predict, {self.input: state})
def Q(self, state, action, other_action, sess):
return sess.run(self.critic.q_predict,
{self.input: state, self.action_input: action, self.other_actions: other_action})
|
{"hexsha": "ecbd4eeb8c2fe3d3698258b5b65ffbbedfe0dde6", "size": 7254, "ext": "py", "lang": "Python", "max_stars_repo_path": "maddpg.py", "max_stars_repo_name": "170928/-Review-Multi-Agent-Actor-Critic-for-Mixed-Cooperative-Competitive-Environment", "max_stars_repo_head_hexsha": "db927493c9291686ca11033caf528be9c7a86058", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-09-03T08:41:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T09:40:35.000Z", "max_issues_repo_path": "maddpg.py", "max_issues_repo_name": "170928/-Review-Multi-Agent-Actor-Critic-for-Mixed-Cooperative-Competitive-Environment", "max_issues_repo_head_hexsha": "db927493c9291686ca11033caf528be9c7a86058", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-09-18T01:48:31.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-18T02:11:47.000Z", "max_forks_repo_path": "maddpg.py", "max_forks_repo_name": "170928/-Review-Multi-Agent-Actor-Critic-for-Mixed-Cooperative-Competitive-Environment", "max_forks_repo_head_hexsha": "db927493c9291686ca11033caf528be9c7a86058", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5, "max_line_length": 174, "alphanum_fraction": 0.6444720154, "include": true, "reason": "import numpy", "num_tokens": 1618}
|
[STATEMENT]
lemma harm_pos: "n > 0 \<Longrightarrow> harm n > (0 :: 'a :: {real_normed_field,linordered_field})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < n \<Longrightarrow> (0::'a) < harm n
[PROOF STEP]
unfolding harm_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < n \<Longrightarrow> (0::'a) < (\<Sum>k = 1..n. inverse (of_nat k))
[PROOF STEP]
by (intro sum_pos) simp_all
|
{"llama_tokens": 170, "file": null, "length": 2}
|
//| This file is a part of the sferes2 framework.
//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC)
//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr
//|
//| This software is a computer program whose purpose is to facilitate
//| experiments in evolutionary computation and evolutionary robotics.
//|
//| This software is governed by the CeCILL license under French law
//| and abiding by the rules of distribution of free software. You
//| can use, modify and/ or redistribute the software under the terms
//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the
//| following URL "http://www.cecill.info".
//|
//| As a counterpart to the access to the source code and rights to
//| copy, modify and redistribute granted by the license, users are
//| provided only with a limited warranty and the software's author,
//| the holder of the economic rights, and the successive licensors
//| have only limited liability.
//|
//| In this respect, the user's attention is drawn to the risks
//| associated with loading, using, modifying and/or developing or
//| reproducing the software by the user in light of its specific
//| status of free software, that may mean that it is complicated to
//| manipulate, and that also therefore means that it is reserved for
//| developers and experienced professionals having in-depth computer
//| knowledge. Users are therefore encouraged to load and test the
//| software's suitability as regards their requirements in conditions
//| enabling the security of their systems and/or data to be ensured
//| and, more generally, to use and operate it in the same conditions
//| as regards security.
//|
//| The fact that you are presently reading this means that you have
//| had knowledge of the CeCILL license and that you accept its terms.
#ifndef GEN_CMAES_HPP_
#define GEN_CMAES_HPP_
#ifdef EIGEN3_ENABLED
#include <iostream>
#include <cmath>
#include <limits>
#include <boost/foreach.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/nvp.hpp>
#include <boost/static_assert.hpp>
#include <Eigen/Core>
#include <Eigen/QR>
#include <sferes/dbg/dbg.hpp>
#include <sferes/stc.hpp>
#include <sferes/misc.hpp>
namespace sferes {
namespace gen {
// this class requires EIGEN3 (libEIGEN3-dev)
// REFERENCE:
// Hansen, N. and S. Kern (2004). Evaluating the CMA Evolution
// Strategy on Multimodal Test Functions. Eighth International
// Conference on Parallel Problem Solving from Nature PPSN VIII,
// Proceedings, pp. 282-291, Berlin: Springer.
// (http://www.bionik.tu-berlin.de/user/niko/ppsn2004hansenkern.pdf)
template<int Size, typename Params, typename Exact = stc::Itself>
class Cmaes : public stc::Any<Exact> {
public:
typedef Params params_t;
typedef Cmaes<Size, Params, Exact> this_t;
typedef Eigen::Matrix<float, Size, 1> vector_t;
typedef Eigen::Matrix<float, Size, Size> matrix_t;
SFERES_CONST size_t es_size = Size;
Cmaes() : _arx(vector_t::Zero()) { }
void random() {
}
void mutate(const vector_t& xmean,
float sigma,
const matrix_t& B,
const matrix_t& D) {
for (size_t i = 0; i < Size; ++i)
_arz[i] = misc::gaussian_rand<float>();
_arx = xmean + sigma * (B * D * _arz);
}
float data(size_t i) const {
assert(i < _arx.size());
return _arx[i];
}
const vector_t& data() const {
return _arx;
}
const vector_t& arx() const {
return _arx;
}
const vector_t& arz() const {
return _arz;
}
size_t size() const {
return Size;
}
template<typename Archive>
void save(Archive& a, const unsigned version) const {
std::vector<float> v(Size);
for (size_t i = 0; i < Size; ++i)
v[i] = _arx[i];
a & BOOST_SERIALIZATION_NVP(v);
}
template<typename Archive>
void load(Archive& a, const unsigned version) {
std::vector<float> v;
a & BOOST_SERIALIZATION_NVP(v);
assert(v.size() == Size);
for (size_t i = 0; i < Size; ++i)
_arx[i] = v[i];
}
BOOST_SERIALIZATION_SPLIT_MEMBER();
protected:
vector_t _arx, _arz;
};
} // gen
} // sferes
#else
#warning Eigen3 is disabled -> no CMAES
#endif
#endif
|
{"hexsha": "916f456a7dbd58c4f0733ae91f1006d2e32fda3b", "size": 4426, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "sferes/sferes/gen/cmaes.hpp", "max_stars_repo_name": "Evolving-AI-Lab/innovation-engine", "max_stars_repo_head_hexsha": "58c7fcc3cbe3d6f8f59f87d95bdb5f2302f425ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31.0, "max_stars_repo_stars_event_min_datetime": "2015-09-20T03:03:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T06:50:20.000Z", "max_issues_repo_path": "sferes/sferes/gen/cmaes.hpp", "max_issues_repo_name": "Evolving-AI-Lab/innovation-engine", "max_issues_repo_head_hexsha": "58c7fcc3cbe3d6f8f59f87d95bdb5f2302f425ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2016-08-11T07:24:50.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-17T01:19:57.000Z", "max_forks_repo_path": "sferes/sferes/gen/cmaes.hpp", "max_forks_repo_name": "Evolving-AI-Lab/innovation-engine", "max_forks_repo_head_hexsha": "58c7fcc3cbe3d6f8f59f87d95bdb5f2302f425ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2015-11-15T01:52:25.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-11T23:42:58.000Z", "avg_line_length": 32.7851851852, "max_line_length": 72, "alphanum_fraction": 0.6617713511, "num_tokens": 1139}
|
import os
import sys
import click
import cv2
import numpy as np
from utils.dataset.data_provider import load_annoataion
@click.command()
@click.option('--input', '-i', default='data/dataset/mlt_cmt')
@click.option('--name', '-n')
def process(input, name):
im_fn = os.path.join(input, "image", name)
im = cv2.imread(im_fn)
h, w, c = im.shape
im_info = np.array([h, w, c]).reshape([1, 3])
fn, _ = os.path.splitext(name)
txt_fn = os.path.join(input, "label", fn + '.txt')
if not os.path.exists(txt_fn):
print("Ground truth for image {} not exist!".format(im_fn))
return
bbox = load_annoataion(txt_fn)
if len(bbox) == 0:
print("Ground truth for image {} empty!".format(im_fn))
return
for p in bbox:
cv2.rectangle(im, (p[0], p[1]), (p[2], p[3]), color=(
0, 0, 255), thickness=1, lineType=cv2.LINE_AA)
cv2.imshow(name, im)
cv2.waitKey(0)
if __name__ == '__main__':
process()
|
{"hexsha": "43b1705a88887d093a67451c1bfac49a06b12c70", "size": 980, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_split.py", "max_stars_repo_name": "deeplearningvn/text-detection", "max_stars_repo_head_hexsha": "63f9ec1766b8d66f33e6d073f5ae577402d45b19", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-28T09:52:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-28T09:52:18.000Z", "max_issues_repo_path": "test_split.py", "max_issues_repo_name": "deeplearningvn/text-detection", "max_issues_repo_head_hexsha": "63f9ec1766b8d66f33e6d073f5ae577402d45b19", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:16:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T14:50:45.000Z", "max_forks_repo_path": "test_split.py", "max_forks_repo_name": "deeplearningvn/text-detection", "max_forks_repo_head_hexsha": "63f9ec1766b8d66f33e6d073f5ae577402d45b19", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7894736842, "max_line_length": 67, "alphanum_fraction": 0.6102040816, "include": true, "reason": "import numpy", "num_tokens": 288}
|
using Test
using QBase
@testset "./src/channels.jl" begin
@testset "repalacer_channel()" begin
@testset "simple qubit examples" begin
ρ = State([1 0;0 0])
σ = State([0 0;0 1])
r = replacer_channel(ρ, σ, 0.5)
@test r isa State
@test r == [0.5 0;0 0.5]
end
@testset "errors" begin
ρ = State([1 0;0 0])
σ = State([0 0;0 1])
@test_throws DomainError replacer_channel(ρ, σ, -0.1)
@test_throws DomainError replacer_channel(ρ, σ, 1.1)
@test_throws DomainError replacer_channel(
State([1 0;0 0]), State([0 0 0;0 1 0;0 0 0]), 0.5
)
end
end
@testset "depolarizing_channel()" begin
@testset "simple State examples" begin
ρ0 = State([1 0;0 0])
ρ0_out1 = depolarizing_channel(ρ0, 1)
@test ρ0_out1 isa State
@test ρ0_out1 == ρ0
ρ0_out2 = depolarizing_channel(ρ0, 0)
@test ρ0_out2 isa State
@test ρ0_out2 == [1/2 0;0 1/2]
ρ0_out3 = depolarizing_channel(ρ0, 0.5)
@test ρ0_out3 isa State
@test ρ0_out3 == [3/4 0;0 1/4]
end
@testset "simple State examples" begin
ρ = State([0.5 0 0.5;0 0 0;0.5 0 0.5])
ρ_out1 = depolarizing_channel(ρ, 1)
@test ρ_out1 isa State
@test ρ_out1 == ρ
ρ_out2 = depolarizing_channel(ρ, 0.5)
@test ρ_out2 isa State
@test ρ_out2 == [(1/4+1/6) 0 1/4;0 1/6 0;1/4 0 (1/4+1/6)]
end
@testset "errors" begin
ρ = State([1 0;0 0])
@test_throws DomainError depolarizing_channel(ρ, 1.1)
@test_throws DomainError depolarizing_channel(ρ, -0.1)
end
end
@testset "erasure_channel()" begin
@testset "simple State examples" begin
ρ = State([1 0;0 0])
ρ_out1 = erasure_channel(ρ, 1)
@test ρ_out1 isa State
@test ρ_out1 == [1 0 0;0 0 0;0 0 0]
ρ_out2 = erasure_channel(ρ, 0)
@test ρ_out2 isa State
@test ρ_out2 == [0 0 0;0 0 0;0 0 1]
ρ_out3 = erasure_channel(ρ, 0.5)
@test ρ_out3 isa State
@test ρ_out3 == [0.5 0 0;0 0 0;0 0 0.5]
end
@testset "simple State examples" begin
ρ = State([0.5 0 0.5;0 0 0;0.5 0 0.5])
ρ_out1 = erasure_channel(ρ, 1)
@test ρ_out1 isa State
@test ρ_out1 == [0.5 0 0.5 0;0 0 0 0;0.5 0 0.5 0;0 0 0 0]
ρ_out2 = erasure_channel(ρ, 0.5)
@test ρ_out2 isa State
@test ρ_out2 == [1/4 0 1/4 0;0 0 0 0;1/4 0 1/4 0;0 0 0 1/2]
end
@testset "errors" begin
ρ = State([1 0;0 0])
@test_throws DomainError erasure_channel(ρ, 1.1)
@test_throws DomainError erasure_channel(ρ, -0.1)
end
end
end
|
{"hexsha": "a6e0c8b1d91763d3eaa501fb8f3c58e5d48dbfec", "size": 2705, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/unit/channels.jl", "max_stars_repo_name": "ChitambarLab/QBase.jl", "max_stars_repo_head_hexsha": "cb30a84b784c61abdae8b007e1de691f3ccd4e4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-05T01:27:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-05T01:27:24.000Z", "max_issues_repo_path": "test/unit/channels.jl", "max_issues_repo_name": "ChitambarLab/QBase.jl", "max_issues_repo_head_hexsha": "cb30a84b784c61abdae8b007e1de691f3ccd4e4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-06-15T00:20:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T15:59:20.000Z", "max_forks_repo_path": "test/unit/channels.jl", "max_forks_repo_name": "ChitambarLab/QBase.jl", "max_forks_repo_head_hexsha": "cb30a84b784c61abdae8b007e1de691f3ccd4e4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.9380530973, "max_line_length": 67, "alphanum_fraction": 0.5589648799, "num_tokens": 1112}
|
from thyme.trajectories import Trajectories
from thyme.trajectory import Trajectory
from thyme.filters.distance import e_filter
from thyme.filters.energy import sort_e
from thyme.routines.dist_plots.energy import multiple_plots as multiple_plots_e
from thyme.parsers.vasp import pack_folder_trj, get_childfolders, write
from thyme.routines.folders import parse_folders_trjs
from ase.atoms import Atoms
import numpy as np
import logging
logging.basicConfig(
filename=f"new_poscar.log", filemode="w", level=logging.DEBUG, format="%(message)s"
)
logging.getLogger().addHandler(logging.StreamHandler())
def main():
trjs = Trajectories.from_file("all_data.pickle")
trjs = trjs.remerge(preserve_order=False)
trjs.save("all_data_merged.pickle")
mineT = Trajectory()
for name, trj in trjs.alltrjs.items():
mine = np.min(trj.total_energy)
keep_id = np.where(trj.total_energy < (mine + 20))[0]
remove = np.where(trj.total_energy >= (mine + 20))[0]
print("ditch", trj.total_energy[remove])
trj.include_frames(keep_id)
print("keep", trj.total_energy)
write("result_pos/" + name, trj)
# trj.include_frames(keep_id)
if __name__ == "__main__":
main()
|
{"hexsha": "3772251b6178ea14af6d8f9346653f7e8213c77f", "size": 1236, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/formate_au_vasp/new_poscar.py", "max_stars_repo_name": "nw13slx/thyme", "max_stars_repo_head_hexsha": "b2a16aa1e6b0701adcfd2bd146f85b5c46b35254", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/formate_au_vasp/new_poscar.py", "max_issues_repo_name": "nw13slx/thyme", "max_issues_repo_head_hexsha": "b2a16aa1e6b0701adcfd2bd146f85b5c46b35254", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/formate_au_vasp/new_poscar.py", "max_forks_repo_name": "nw13slx/thyme", "max_forks_repo_head_hexsha": "b2a16aa1e6b0701adcfd2bd146f85b5c46b35254", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6923076923, "max_line_length": 87, "alphanum_fraction": 0.7265372168, "include": true, "reason": "import numpy", "num_tokens": 310}
|
# @testset "AABB" begin
# c = SVector(99.0, 99.0, 99.0)
# e = SVector(1.0, 2.0, 3.0)
#
# aabb = AABB(c, e)
#
# @test aabb isa AABB
# @test aabb.c == c
# @test aabb.e == e
# @test AABB(aabb) == aabb
# end
@testset "OBB" begin
c = SVector(99.0, 99.0, 99.0)
e = SVector(1.0, 2.0, 3.0)
R = rand(RotMatrix{3})[:,:]
obb = OBB(c, e, R)
# aabb = AABB(c, e)
# obb_via_aabb = OBB(aabb)
@test obb isa OBB
@test obb.c == c
@test obb.e == e
@test obb.R == R
# @test obb_via_aabb.R == I
# @test aabb == AABB(aabb, aabb)
# @test obb_via_aabb == OBB(obb_via_aabb, obb_via_aabb)
end
|
{"hexsha": "5084b22615ed10845f422c1a7f1fa03b958e3d2f", "size": 651, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_obb/test_box_types.jl", "max_stars_repo_name": "UnofficialJuliaMirror/PressureFieldContact.jl-1a2887a7-38b6-5ebe-8978-3a02049ebf6f", "max_stars_repo_head_hexsha": "55d7b6f85771465de112171c609ba529f852ddef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-06-24T23:58:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T03:45:36.000Z", "max_issues_repo_path": "test/test_obb/test_box_types.jl", "max_issues_repo_name": "UnofficialJuliaMirror/PressureFieldContact.jl-1a2887a7-38b6-5ebe-8978-3a02049ebf6f", "max_issues_repo_head_hexsha": "55d7b6f85771465de112171c609ba529f852ddef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-05-05T02:48:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-08T16:22:45.000Z", "max_forks_repo_path": "test/test_obb/test_box_types.jl", "max_forks_repo_name": "UnofficialJuliaMirror/PressureFieldContact.jl-1a2887a7-38b6-5ebe-8978-3a02049ebf6f", "max_forks_repo_head_hexsha": "55d7b6f85771465de112171c609ba529f852ddef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:29:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:51:13.000Z", "avg_line_length": 20.34375, "max_line_length": 59, "alphanum_fraction": 0.5130568356, "num_tokens": 293}
|
# Copyright (c) Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import scipy.signal as sig
from unlock.state.state import UnlockState
class TimeScopeState(UnlockState):
def __init__(self, channels=1, fs=256, duration=2):
super(TimeScopeState, self).__init__()
self.n_channels = channels
self.fs = fs
self.duration = duration
self.n_samples = self.duration * self.fs
self.cursor = 0
self.traces = np.zeros((self.n_samples, self.n_channels))
self.yscale = 1
self.yshift = np.zeros(self.n_channels)
self.refresh_rate = 1/20.0
self.elapsed = 0
self.state_change = False
def get_state(self):
update = self.state_change
if self.state_change:
self.state_change = False
return update, self.cursor, self.traces, self.yshift, self.yscale
def process_command(self, command):
if command.delta is not None:
self.elapsed += command.delta
if self.elapsed >= self.refresh_rate:
self.state_change = True
self.elapsed = 0
if not command.is_valid():
return
samples = command.matrix[:, 0:self.n_channels]
s = samples.shape[0]
idx = np.arange(self.cursor, self.cursor+s) % self.n_samples
self.traces[idx] = samples
last_cursor = self.cursor
self.cursor += s
self.cursor %= self.n_samples
# compute auto-scaling parameters
if self.cursor < last_cursor:
max = np.max(self.traces)
scale = np.round(0.5*(max - np.min(self.traces)), 2)
shift = np.max(self.traces, axis=0) - scale
if scale != 0:
#if 0.9*self.yscale < 100.0 / scale < 1.1*self.yscale:
# pass
#else:
self.yscale = 100.0 / scale
#if 0.9*self.yshift < shift < 1.1*self.yshift:
# pass
#else:
self.yshift = shift
class FrequencyScopeState(UnlockState):
def __init__(self, channels=1, fs=256, duration=2, nfft=None,
freq_range=None, display_channels=None, labels=None):
super(FrequencyScopeState, self).__init__()
self.n_channels = channels
self.fs = fs
self.duration = duration
self.nfft = nfft
self.labels = labels
self.n_samples = self.duration * self.fs
self.data = np.zeros((self.n_samples, self.n_channels))
if self.nfft is None:
self.nfft = self.n_samples
self.fft_bin_width = fs / self.nfft
self.fft_bins = self.fft_bin_width*np.arange(self.nfft/2 + 1)
if freq_range is None:
freq_range = (0, fs/2)
assert freq_range[0] >= 0, freq_range[1] <= fs / 2
self.freq_begin = freq_range[0]
self.freq_end = freq_range[1]
self.trace_begin = np.floor(self.freq_begin / self.fft_bin_width)
self.trace_end = np.ceil(self.freq_end / self.fft_bin_width) + 1
self.trace = np.zeros(self.trace_end - self.trace_begin)
self.display_channels = display_channels
if display_channels is None:
self.display_channels = [0]
self.refresh_rate = 1/20.0
self.elapsed = 0
self.state_change = False
#self.test_signal = np.zeros(self.data.shape)
#for i in range(self.n_channels):
# self.test_signal[:,i] = 300000000*np.sin((12+i)*2*np.pi*np.arange(0, self.duration, 1/self.fs))
def get_state(self):
update = self.state_change
if self.state_change:
self.state_change = False
return update, self.trace
def process_command(self, command):
if command.delta is not None:
self.elapsed += command.delta
if self.elapsed >= self.refresh_rate:
self.state_change = True
self.elapsed = 0
if not command.is_valid():
return
samples = command.matrix[:, 0:self.n_channels]
s = samples.shape[0]
self.data = np.roll(self.data, -s, axis=0)
#self.test_signal = np.roll(self.test_signal, -s, axis=0)
self.data[-s:] = samples #+ self.test_signal[-s:]
_, psd = sig.periodogram(self.data[:, self.display_channels], fs=self.fs, nfft=self.nfft,
axis=0)
#fft = np.abs(np.fft.rfft(self.data[:, [0]], n=self.nfft, axis=0))
self.trace = psd[self.trace_begin:self.trace_end]
self.trace /= np.max(self.trace)
def change_display_channel(self, change):
if len(self.display_channels) > 1:
return
new_chan = self.display_channels[0] + change
if new_chan >= self.n_channels:
new_chan = self.n_channels - 1
elif new_chan < 0:
new_chan = 0
self.display_channels[0] = new_chan
|
{"hexsha": "4c84a1d377388be208625380e0896ccd5672ca2e", "size": 6541, "ext": "py", "lang": "Python", "max_stars_repo_path": "unlock/state/scope_state.py", "max_stars_repo_name": "NeuralProsthesisLab/unlock", "max_stars_repo_head_hexsha": "0c4d95abdab288d3e657ca2db867b06f755f26ff", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-05-05T01:08:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T21:50:07.000Z", "max_issues_repo_path": "unlock/state/scope_state.py", "max_issues_repo_name": "NeuralProsthesisLab/unlock", "max_issues_repo_head_hexsha": "0c4d95abdab288d3e657ca2db867b06f755f26ff", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-05-21T01:02:50.000Z", "max_issues_repo_issues_event_max_datetime": "2015-05-21T16:03:43.000Z", "max_forks_repo_path": "unlock/state/scope_state.py", "max_forks_repo_name": "NeuralProsthesisLab/unlock", "max_forks_repo_head_hexsha": "0c4d95abdab288d3e657ca2db867b06f755f26ff", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-05-21T12:38:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T15:47:58.000Z", "avg_line_length": 39.1676646707, "max_line_length": 108, "alphanum_fraction": 0.6214646079, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1495}
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that defines common transformations that can be applied when the dataset
is loaded.
"""
# Imports
import collections
import numpy as np
from torchvision import transforms
class RandomFlipDimensions(object):
""" Apply a random mirror flip for all axes with a defined probability.
"""
def __init__(self, ndims, proba, with_channels=True):
""" Initilaize the class.
Parameters
----------
ndims: int
the number of dimensions.
proba: float
apply flip on each axis with this probability [0 - 1].
with_channels: bool, default True
if set expect the array to contain the channels in first dimension.
"""
if proba < 0 or proba > 1:
raise ValueError("The probabilty must be in [0 - 1].")
self.ndims = ndims
self.proba = proba
self.with_channels = with_channels
def _random_flip(self):
""" Generate a random axes flip.
"""
axis = []
for dim in range(self.ndims):
if np.random.choice([True, False], p=[self.proba, 1 - self.proba]):
axis.append(dim)
return tuple(axis)
def __call__(self, arr):
""" Flip an array axes randomly.
Parameters
----------
arr: np.array
an input array.
Returns
-------
flip_arr: np.array
the fliped array.
"""
if self.with_channels:
data = []
flip = self._random_flip()
for _arr in arr:
data.append(np.flip(_arr, axis=flip))
return np.asarray(data)
else:
return np.flip(arr, axis=self._random_flip())
class Offset(object):
""" Apply an intensity offset (shift and scale) on input channels.
"""
def __init__(self, nb_channels, factor):
""" Initilaize the class.
Parameters
----------
nb_channels: int
the number of channels.
factor: float
the offset scale factor [0 - 1].
"""
if factor < 0 or factor > 1:
raise ValueError("The offset factor must be in [0 - 1].")
self.nb_channels = nb_channels
self.factor = factor
def _random_offset(self):
""" Generate a random offset factor.
"""
return (2 * self.factor * np.random.random(self.nb_channels) +
(1 - self.factor))
def __call__(self, arr):
""" Normalize an array.
Parameters
----------
arr: np.array
an input array.
Returns
-------
offset_arr: np.array
the rescaled array.
"""
assert len(arr) == self.nb_channels
mean_scale_factors = self._random_offset()
std_scale_factors = self._random_offset()
data = []
for _arr, _mfactor, _sfactor in zip(
arr, mean_scale_factors, std_scale_factors):
logical_mask = (_arr != 0)
mean = _arr[logical_mask].mean()
std = _arr[logical_mask].std()
data.append((_arr - (mean * _mfactor)) / (std * _sfactor))
return np.asarray(data)
class Padding(object):
""" A class to pad an image.
"""
def __init__(self, shape, nb_channels=1, fill_value=0):
""" Initialize the instance.
Parameters
----------
shape: list of int
the desired shape.
nb_channels: int, default 1
the number of channels.
fill_value: int or list of int, default 0
the value used to fill the array, if a list is given, use the
specified value on each channel.
"""
self.shape = shape
self.nb_channels = nb_channels
self.fill_value = fill_value
if self.nb_channels > 1 and not isinstance(self.fill_value, list):
self.fill_value = [self.fill_value] * self.nb_channels
elif isinstance(self.fill_value, list):
assert len(self.fill_value) == self.nb_channels
def __call__(self, arr):
""" Fill an array to fit the desired shape.
Parameters
----------
arr: np.array
an input array.
Returns
-------
fill_arr: np.array
the zero padded array.
"""
if len(arr.shape) - len(self.shape) == 1:
data = []
for _arr, _fill_value in zip(arr, self.fill_value):
data.append(self._apply_padding(_arr, _fill_value))
return np.asarray(data)
elif len(arr.shape) - len(self.shape) == 0:
return self._apply_padding(arr, self.fill_value)
else:
raise ValueError("Wrong input shape specified!")
def _apply_padding(self, arr, fill_value):
""" See Padding.__call__().
"""
orig_shape = arr.shape
padding = []
for orig_i, final_i in zip(orig_shape, self.shape):
shape_i = final_i - orig_i
half_shape_i = shape_i // 2
if shape_i % 2 == 0:
padding.append((half_shape_i, half_shape_i))
else:
padding.append((half_shape_i, half_shape_i + 1))
for cnt in range(len(arr.shape) - len(padding)):
padding.append((0, 0))
fill_arr = np.pad(arr, padding, mode="constant",
constant_values=fill_value)
return fill_arr
class Downsample(object):
""" A class to downsample an array.
"""
def __init__(self, scale, with_channels=True):
""" Initialize the instance.
Parameters
----------
scale: int
the downsampling scale factor in all directions.
with_channels: bool, default True
if set expect the array to contain the channels in first dimension.
"""
self.scale = scale
self.with_channels = with_channels
def __call__(self, arr):
""" Downsample an array to fit the desired shape.
Parameters
----------
arr: np.array
an input array
Returns
-------
down_arr: np.array
the downsampled array.
"""
if self.with_channels:
data = []
for _arr in arr:
data.append(self._apply_downsample(_arr))
return np.asarray(data)
else:
return self._apply_downsample(arr)
def _apply_downsample(self, arr):
""" See Downsample.__call__().
"""
slices = []
for cnt, orig_i in enumerate(arr.shape):
if cnt == 3:
break
slices.append(slice(0, orig_i, self.scale))
down_arr = arr[tuple(slices)]
return down_arr
|
{"hexsha": "76c48a1b83c25501b6100aa3c536b96e50cf7da3", "size": 7228, "ext": "py", "lang": "Python", "max_stars_repo_path": "pynet/transforms.py", "max_stars_repo_name": "HChegraoui/pynet", "max_stars_repo_head_hexsha": "3e26f7992e5b6954f637e3a68e4766f3886e2ce9", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pynet/transforms.py", "max_issues_repo_name": "HChegraoui/pynet", "max_issues_repo_head_hexsha": "3e26f7992e5b6954f637e3a68e4766f3886e2ce9", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pynet/transforms.py", "max_forks_repo_name": "HChegraoui/pynet", "max_forks_repo_head_hexsha": "3e26f7992e5b6954f637e3a68e4766f3886e2ce9", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3697478992, "max_line_length": 79, "alphanum_fraction": 0.5394299945, "include": true, "reason": "import numpy", "num_tokens": 1561}
|
"""
Mathematica (Wolfram Alpha): integral_(-2)^2 sqrt(4 - x^2) (1./2 + x^3 cos(x/2)) dx = 3.14159
python workouts/integration_examples/free_wifi.py > outputs/integration_examples/free_wifi.log
"""
from qmcpy import *
from numpy import *
from time import time
def pi_problem(abs_tol=.01):
t0 = time()
distribution = Sobol(dimension=1, seed=7)
measure = Lebesgue(distribution, lower_bound=-2, upper_bound=2)
integrand = CustomFun(measure, lambda x: sqrt(4 - x**2) * (1. / 2 + x**3 * cos(x / 2)))
solution,data = CubQMCSobolG(integrand, abs_tol=abs_tol, n_max=2**30).integrate()
password = str(solution).replace('.', '')[:10]
t_delta = time() - t0
return password,t_delta,data
if __name__ == '__main__':
password,time,data = pi_problem(abs_tol=4e-10) # give 10 significant figures of accuracy
print("Password:", password) # 3141592653
print('CPU time: %.2f sec'%time) # around 75 seconds
print('\n'+'~'*100+'\n\n%s'%str(data))
|
{"hexsha": "bea1776df1b51c7e0397faabff096f990122e056", "size": 978, "ext": "py", "lang": "Python", "max_stars_repo_path": "workouts/integration_examples/pi_problem.py", "max_stars_repo_name": "kachiann/QMCSoftware", "max_stars_repo_head_hexsha": "0ed9da2f10b9ac0004c993c01392b4c86002954c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-18T08:14:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T08:14:32.000Z", "max_issues_repo_path": "workouts/integration_examples/pi_problem.py", "max_issues_repo_name": "kachiann/QMCSoftware", "max_issues_repo_head_hexsha": "0ed9da2f10b9ac0004c993c01392b4c86002954c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "workouts/integration_examples/pi_problem.py", "max_forks_repo_name": "kachiann/QMCSoftware", "max_forks_repo_head_hexsha": "0ed9da2f10b9ac0004c993c01392b4c86002954c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.75, "max_line_length": 94, "alphanum_fraction": 0.6687116564, "include": true, "reason": "from numpy", "num_tokens": 306}
|
#T# improper fraction can be converted to and from mixed numbers
#T# to work with improper fractions and mixed numbers, the sympy package is used
import sympy
#T# create an improper fraction with the Rational constructor
num1 = sympy.Rational(7, 4) # 7/4
#T# the p, q attributes of a rational number contain the numerator and the denominator respectively
int1 = num1.p # 7 #| numerator
int2 = num1.q # 4 #| denominator
#T# to convert an improper fraction into a mixed number, the quotient as an integer and the remainder must be calculated
int3 = int1 // int2 # 1 #| quotient
int4 = int1 % int2 # 3 #| remainder
#T# a mixed number is written as quotient remainder/denominator
str1 = f'{int3} {int4}/{int2}' # '1 3/4'
#T# to convert a mixed number into an improper fraction, the quotient is added to remainder/denominator
num2 = int3 + int4/int2 # 1.75
num2 = sympy.Rational(str(int3 + int4/int2)) # 7/4
|
{"hexsha": "1ec559d4bf2a65d76f220e6c8f12c738f5583700", "size": 944, "ext": "py", "lang": "Python", "max_stars_repo_path": "Math/A01_Arithmetics_basics/Programs/S02_2/Improper_fractions.py", "max_stars_repo_name": "Polirecyliente/SGConocimiento", "max_stars_repo_head_hexsha": "560b08984236d7a10f50c6b5e6fb28844193d81b", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Math/A01_Arithmetics_basics/Programs/S02_2/Improper_fractions.py", "max_issues_repo_name": "Polirecyliente/SGConocimiento", "max_issues_repo_head_hexsha": "560b08984236d7a10f50c6b5e6fb28844193d81b", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Math/A01_Arithmetics_basics/Programs/S02_2/Improper_fractions.py", "max_forks_repo_name": "Polirecyliente/SGConocimiento", "max_forks_repo_head_hexsha": "560b08984236d7a10f50c6b5e6fb28844193d81b", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9090909091, "max_line_length": 120, "alphanum_fraction": 0.7086864407, "include": true, "reason": "import sympy", "num_tokens": 270}
|
import pandas as pd
import numpy as np
import operator
import collections
import logging
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
class StatisticEstimator:
def __init__(self):
self.counts = collections.defaultdict(lambda: collections.defaultdict(int))
self.total = 0
def fit(self, data, targets):
n = data.shape[0]
for i in range(n):
key = tuple(data.iloc[i])
target = targets.iloc[i,0]
self.counts[key][target] += 1
self.total += n
def predict(self, data):
predictions = []
n = data.shape[0]
for i in range(n):
key = tuple(data.iloc[i])
props = self.counts[key]
if len(props) != 0:
val = max(props.items(), key=operator.itemgetter(1))[0]
else:
val = -1
predictions.append(val)
return np.array(predictions)
# This class is totally unfinished and unusable
class CitibikeEstimator:
def __init__(self):
self.precise_estimator = StatisticEstimator()
self.general_estimator = StatisticEstimator()
def fit(self, data, targets):
self.precise_estimator.fit(data, targets)
self.general_estimator.fit(data[["start station id"]], targets)
def predict(self, data):
precise_preds = self.precise_estimator.predict(data)
general_preds = self.general_estimator.predict(data[["start station id"]])
# TODO
return general_preds
### Preprocessing
def add_time(df):
# Create hour and weekday from starttime
df["date"] = pd.to_datetime(df["starttime"])
df["hour"] = df["date"].dt.hour
df["weekday"] = df["date"].dt.weekday
def with_clusters(data):
clusters = pd.read_csv("../datasets/clustered_stations.csv")
data = pd.merge(data, clusters[["id", "cluster"]], left_on="start station id", right_on="id")
data = pd.merge(data, clusters[["id", "cluster"]], left_on="end station id", right_on="id", suffixes=("", " end"))
return data
### GENERAL TEST METHODS
def test_estimator(df, featuresCols, labelsCols):
train_df, test_df = train_test_split(df, test_size=0.2)
train_features = train_df[featuresCols]
train_labels = train_df[labelsCols]
test_features = test_df[featuresCols]
test_labels = test_df[labelsCols]
logging.info("Dataset preprocessed!")
# Test estimator
estimator = StatisticEstimator()
estimator.fit(train_features, train_labels)
logging.info("Training done!")
test_predictions = estimator.predict(test_features)
accuracy = metrics.accuracy_score(test_labels.values.reshape(-1), test_predictions)
print("Precision score = %f" % (accuracy,))
### TESTS
def test_next_station(df):
# Create hour and weekday from starttime
df["date"] = pd.to_datetime(df["starttime"])
df["hour"] = df["date"].dt.hour
df["weekday"] = df["date"].dt.weekday
featuresCols = ["hour", "weekday", "start station id"]
labelsCols = ["end station id"]
test_estimator(df, featuresCols, labelCols)
def test_clusters(data):
data = with_clusters(data)
labels = data["cluster"]
predictions = data["cluster end"]
accuracy = metrics.accuracy_score(labels.values.reshape(-1), predictions.values.reshape(-1))
print("Precision score = %f" % (accuracy,))
def test_id_to_clusters(data):
add_time(data)
data = with_clusters(data)
featuresCols = ["start station id"]
labelsCols = ["cluster end"]
test_estimator(data, featuresCols, labelsCols)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
df = pd.read_csv("../datasets/201801.csv")
logging.info("Dataset loaded!")
test_id_to_clusters(df)
|
{"hexsha": "e75b70fd08d942971c2a3d89f77f45adc20696d6", "size": 4062, "ext": "py", "lang": "Python", "max_stars_repo_path": "citibike/stat_models.py", "max_stars_repo_name": "MaxenceHanin/5SDBD-Integ-E3", "max_stars_repo_head_hexsha": "dbf85386451b420da4132b69341155332cf56a53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-31T09:24:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-31T09:24:23.000Z", "max_issues_repo_path": "citibike/stat_models.py", "max_issues_repo_name": "MaxenceHanin/5SDBD-Integ-E3", "max_issues_repo_head_hexsha": "dbf85386451b420da4132b69341155332cf56a53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "citibike/stat_models.py", "max_forks_repo_name": "MaxenceHanin/5SDBD-Integ-E3", "max_forks_repo_head_hexsha": "dbf85386451b420da4132b69341155332cf56a53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2083333333, "max_line_length": 118, "alphanum_fraction": 0.6395864106, "include": true, "reason": "import numpy", "num_tokens": 922}
|
import torch
import numpy as np
from models.stylegan3.networks_stylegan3 import Generator
from utils.common import make_transform
class Expander:
def __init__(self, G: Generator):
self.G = G
def generate_expanded_image(self, ws=None, all_s=None, landmark_t=None,
pixels_right=0, pixels_left=0, pixels_top=0, pixels_bottom=0):
assert landmark_t is not None, "Expected to receive landmarks transforms! Received None!"
images = []
transforms = Expander._get_transforms(self.G.img_resolution, pixels_right, pixels_left, pixels_top, pixels_bottom)
for t in transforms:
if t is not None:
self.G.synthesis.input.transform = torch.from_numpy(landmark_t @ t).float().cuda()
with torch.no_grad():
img = self.G.synthesis(ws, all_s)
else:
img = None
images.append(img)
merged_image = Expander._merge_images(images, self.G.img_resolution, pixels_right, pixels_left, pixels_top, pixels_bottom)
return merged_image
@staticmethod
def _get_transforms(res, pixels_right, pixels_left, pixels_top, pixels_bottom):
identity_transform = make_transform((0, 0), 0)
transform_left = Expander._get_transform_single_edge(res, edge="left", num_pixels=pixels_left)
transform_right = Expander._get_transform_single_edge(res, edge="right", num_pixels=pixels_right)
transform_top = Expander._get_transform_single_edge(res, edge="top", num_pixels=pixels_top)
transform_bottom = Expander._get_transform_single_edge(res, edge="bottom", num_pixels=pixels_bottom)
transform_top_left = Expander._get_transform_corner(res, corner="top_left", num_pixels_hor=pixels_left, num_pixels_ver=pixels_top)
transform_top_right = Expander._get_transform_corner(res, corner="top_right", num_pixels_hor=pixels_right, num_pixels_ver=pixels_top)
transform_bottom_left = Expander._get_transform_corner(res, corner="bottom_left", num_pixels_hor=pixels_left, num_pixels_ver=pixels_bottom)
transform_bottom_right = Expander._get_transform_corner(res, corner="bottom_right", num_pixels_hor=pixels_right, num_pixels_ver=pixels_bottom)
transforms = [identity_transform, transform_left, transform_top, transform_right, transform_bottom,
transform_top_left, transform_top_right, transform_bottom_right, transform_bottom_left]
for i in range(len(transforms)):
if transforms[i] is not None:
transforms[i] = np.linalg.inv(transforms[i])
return transforms
@staticmethod
def _get_transform_single_edge(res, edge, num_pixels):
if num_pixels == 0:
return None
if edge == "left":
return make_transform((num_pixels / res, 0), 0)
if edge == "right":
return make_transform((-num_pixels / res, 0), 0)
if edge == "top":
return make_transform((0, num_pixels / res), 0)
if edge == "bottom":
return make_transform((0, -num_pixels / res), 0)
else:
raise ValueError("Invalid edge for transform")
@staticmethod
def _get_transform_corner(res, corner, num_pixels_hor, num_pixels_ver):
if num_pixels_hor == 0 or num_pixels_ver == 0:
return None
if corner == "top_left":
return make_transform((num_pixels_hor / res, num_pixels_ver / res), 0)
if corner == "top_right":
return make_transform((-num_pixels_hor / res, num_pixels_ver / res), 0)
if corner == "bottom_left":
return make_transform((num_pixels_hor / res, -num_pixels_ver / res), 0)
if corner == "bottom_right":
return make_transform((-num_pixels_hor / res, -num_pixels_ver / res), 0)
else:
raise ValueError("Invalid corner for transform")
@staticmethod
def _merge_images(images, res, pixels_right, pixels_left, pixels_top, pixels_bottom):
result_image = torch.zeros(images[0].shape[0], 3, pixels_top + res + pixels_bottom, pixels_left + res + pixels_right).cuda()
# center
result_image[:, :, pixels_top:pixels_top + res, pixels_left:pixels_left + res] = images[0]
if pixels_left > 0:
result_image[:, :, pixels_top:pixels_top + res, :pixels_left] = images[1][:, :, :, 0:pixels_left]
if pixels_top > 0:
result_image[:, :, :pixels_top, pixels_left:pixels_left + res] = images[2][:, :, 0:pixels_top, :]
if pixels_right > 0:
result_image[:, :, pixels_top:pixels_top + res, pixels_left + res:] = images[3][:, :, :, res - pixels_right:]
if pixels_bottom > 0:
result_image[:, :, pixels_top + res:, pixels_left:pixels_left + res] = images[4][:, :, res - pixels_bottom:, :]
if pixels_top > 0 and pixels_left > 0:
result_image[:, :, :pixels_top, :pixels_left] = images[5][:, :, :pixels_top, :pixels_left]
if pixels_top > 0 and pixels_right > 0:
result_image[:, :, :pixels_top, res + pixels_left:] = images[6][:, :, :pixels_top, res - pixels_right:]
if pixels_bottom > 0 and pixels_right > 0:
result_image[:, :, res + pixels_top:, res + pixels_left:] = images[7][:, :, res - pixels_bottom:, res - pixels_right:]
if pixels_bottom > 0 and pixels_left > 0:
result_image[:, :, res + pixels_top:, :pixels_left] = images[8][:, :, res - pixels_bottom:, :pixels_left]
return result_image
|
{"hexsha": "9e2edc43a08d2656ef1613c46db1dcbe4e8f17b4", "size": 5569, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/fov_expansion.py", "max_stars_repo_name": "ohhagr/stylegan3-editing-environment", "max_stars_repo_head_hexsha": "5f3602f4a6bb8036511b35aacc9b332d0ca5fa58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 347, "max_stars_repo_stars_event_min_datetime": "2022-01-31T18:36:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:08:39.000Z", "max_issues_repo_path": "utils/fov_expansion.py", "max_issues_repo_name": "ohhagr/stylegan3-editing-environment", "max_issues_repo_head_hexsha": "5f3602f4a6bb8036511b35aacc9b332d0ca5fa58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2022-02-13T20:21:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T12:20:57.000Z", "max_forks_repo_path": "utils/fov_expansion.py", "max_forks_repo_name": "ohhagr/stylegan3-editing-environment", "max_forks_repo_head_hexsha": "5f3602f4a6bb8036511b35aacc9b332d0ca5fa58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2022-02-02T23:18:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:16:26.000Z", "avg_line_length": 51.0917431193, "max_line_length": 150, "alphanum_fraction": 0.6555934638, "include": true, "reason": "import numpy", "num_tokens": 1294}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep CIFAR classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import argparse
import sys
import tensorflow as tf
from cifar10_data import get_data_set
traingImages, traingLabels, _ = get_data_set()
testingImages, testingLabels, _ = get_data_set("test")
def deepnn(X):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
X: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard CIFAR image.
Returns:
A tuple (y, keepProb). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keepProb is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(X, [-1, 32, 32, 3])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([8 * 8 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 8 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('Dropout'):
keepProb = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keepProb)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
Yconv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return Yconv, keepProb
def conv2d(X, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(X, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(X):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(X, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Training parameters
maxEpochs = FLAGS.maxEpochs
batchSize = FLAGS.batchSize
testStep = FLAGS.testStep
# Network parameters
n_input = 32*32*3 # CIFAR data input (img shape: 28*28)
n_classes = 10 # CIFAR total classes (0-9 digits)
# Create the model
X = tf.placeholder(tf.float32, [None, n_input], name="input")
Y = tf.placeholder(tf.float32, [None, n_classes], name="output")
# Build the graph for the deep net
Yconv, keepProb = deepnn(X)
# Define loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=Yconv, labels=Y))
# Define optimizer
with tf.name_scope('adam_optimizer'):
train_op = tf.train.AdamOptimizer().minimize(loss, name="train")
# Define accuracy
prediction = tf.equal(tf.argmax(Yconv, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32), name="test")
# Create a summary to monitor cross_entropy tensor
tf.summary.scalar("loss", loss)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", accuracy)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
# Initializing the variables
init = tf.initialize_variables(tf.all_variables(), name='init')
with tf.Session() as sess:
# Session Init
sess.run(init)
# Logger Init
summaryWriter = tf.summary.FileWriter(FLAGS.logDir, graph=sess.graph)
# Training
for step in range( maxEpochs ):
# Get CIFAR training data
randidx = np.random.randint(len(traingImages), size=batchSize)
batchImage = traingImages[randidx]
batchLabel = traingLabels[randidx]
# Test training model for every testStep
if step % testStep == 0:
# Run accuracy op & summary op to get accuracy & training progress
acc, summary = sess.run( [ accuracy, merged_summary_op ], \
feed_dict={ X: testingImages, Y: testingLabels, keepProb: 1.0})
# Write accuracy to log file
summaryWriter.add_summary(summary, step)
# Print accuracy
print('step %d, training accuracy %f' % (step, acc))
# Run training op
train_op.run(feed_dict={X: batchImage, Y: batchLabel, keepProb: 0.5})
# Write TF model
tf.train.write_graph(sess.graph_def,
'./',
'cifar10_dnn.pb', as_text=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--logDir', type=str, default='/tmp/tensorflow_logs/cifar10/deepnet',
help='Training progress data directory')
parser.add_argument('--batchSize', type=int, default=50,
help='Training batch size')
parser.add_argument('--maxEpochs', type=int, default=10000,
help='Maximum training steps')
parser.add_argument('--testStep', type=int, default=100,
help='Test model accuracy for every testStep iterations')
FLAGS, unparsed = parser.parse_known_args()
# Program entry
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
{"hexsha": "631dfdb05b7891c16abac3670b582806cd4c200b", "size": 7546, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/examples/train_cifar10/gen_tf_dnn.py", "max_stars_repo_name": "supernovaremnant/TensorflowOpenCL-GPU", "max_stars_repo_head_hexsha": "8d443c9afb49064c4bdf68fc0c5217986077d24c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow/examples/train_cifar10/gen_tf_dnn.py", "max_issues_repo_name": "supernovaremnant/TensorflowOpenCL-GPU", "max_issues_repo_head_hexsha": "8d443c9afb49064c4bdf68fc0c5217986077d24c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/examples/train_cifar10/gen_tf_dnn.py", "max_forks_repo_name": "supernovaremnant/TensorflowOpenCL-GPU", "max_forks_repo_head_hexsha": "8d443c9afb49064c4bdf68fc0c5217986077d24c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3564356436, "max_line_length": 93, "alphanum_fraction": 0.6500132521, "include": true, "reason": "import numpy", "num_tokens": 1899}
|
#!/usr/bin/env python
'''Utility for testing sinex files equivalence based on comparison of values in the SOLUTION/ESTIMATE blocks. Ignores other blocks and header info.
The functionality is based on assert_frame_equal method (https://pandas.pydata.org/docs/reference/api/pandas.testing.assert_frame_equal.html)'''
import argparse
import os as _os
import numpy as _np
import logging
import sys as _sys
from gn_lib.gn_io.sinex import _read_snx_solution
from gn_lib.gn_io.trace import diff2msg
def parse_arguments():
parser = argparse.ArgumentParser(description='Compares the content of two sinex files, specifically the SOLUTION/ESTIMATE blocks and returns errors on difference.')
parser.add_argument('-i', '--snx1', type=file_path,help='path to sinex file (.snx/.ssc). Can be compressed with LZW (.Z)')
parser.add_argument('-o', '--snx2', type=file_path,help='path to another sinex file')
parser.add_argument('-a', '--atol', type=float,help='absolute tolerance',default=1E-4)
parser.add_argument('-p', '--passthrough', action='store_true',help='passthrough or return 0 even if failed')
return parser.parse_args()
def file_path(path):
if _os.path.isfile(path):
return path
else:
raise argparse.ArgumentTypeError(f"{path} is not a valid path")
def diffsnx(snx1_path,snx2_path,atol,passthrough):
'''Compares two sinex files. '''
logging.getLogger().setLevel(logging.INFO)
logging.info(f':diffsnx testing of {_os.path.basename(snx1_path)}')
snx1_df = _read_snx_solution(path_or_bytes=snx1_path).unstack(0)
snx2_df = _read_snx_solution(path_or_bytes=snx2_path).unstack(0)
bad_snx_vals = diff2msg(snx1_df - snx2_df,tol=atol)
if bad_snx_vals is not None:
logging.warning(msg=f':diffsnx found estimates diffs above the tolerance ({atol:.1E}):\n{bad_snx_vals.to_string()}\n')
logging.error(msg = ':difftrace test failed\n')
if not passthrough:
_sys.exit(-1)
else:
logging.error(msg = ':diffsnx returning 0 as passthrough enabled\n')
return 0
else: logging.info(f':diffsnx [OK] estimates diffs within {atol:.1E} tolerance')
logging.info(':diffsnx [ALL OK]')
if __name__ == "__main__":
parsed_args = parse_arguments()
diffsnx(snx1_path=parsed_args.snx1,snx2_path=parsed_args.snx2,atol=parsed_args.atol,passthrough=parsed_args.passthrough)
|
{"hexsha": "625e4cb2cd3564358dd38a018e8bde5c4ab675c8", "size": 2405, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/diffsnx.py", "max_stars_repo_name": "umma-zannat/ginan", "max_stars_repo_head_hexsha": "a4d1a3bb8696267f23d26e8c6a2f6080b87bb494", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-12T15:14:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T15:14:55.000Z", "max_issues_repo_path": "scripts/diffsnx.py", "max_issues_repo_name": "umma-zannat/ginan", "max_issues_repo_head_hexsha": "a4d1a3bb8696267f23d26e8c6a2f6080b87bb494", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/diffsnx.py", "max_forks_repo_name": "umma-zannat/ginan", "max_forks_repo_head_hexsha": "a4d1a3bb8696267f23d26e8c6a2f6080b87bb494", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-12T15:15:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T15:15:12.000Z", "avg_line_length": 46.25, "max_line_length": 168, "alphanum_fraction": 0.7255717256, "include": true, "reason": "import numpy", "num_tokens": 632}
|
# To run this script:
# pkg> build GoogleSheets
println("Instalation of GoogleSheets python package dependencies")
using PyCall
@pyimport pip
pip.main(["install","google-api-python-client","google-auth-httplib2","google-auth-oauthlib"])
|
{"hexsha": "e0736c46245426a25165d9853ea9d972bdcdcc01", "size": 239, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deps/build.jl", "max_stars_repo_name": "Sixzero/GoogleSheets.jl", "max_stars_repo_head_hexsha": "bffecf8110143c8f10dfbc3ca554c3793d81917c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-05-13T09:09:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T21:48:01.000Z", "max_issues_repo_path": "deps/build.jl", "max_issues_repo_name": "Sixzero/GoogleSheets.jl", "max_issues_repo_head_hexsha": "bffecf8110143c8f10dfbc3ca554c3793d81917c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-12-19T06:08:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-03T16:44:00.000Z", "max_forks_repo_path": "deps/build.jl", "max_forks_repo_name": "Sixzero/GoogleSheets.jl", "max_forks_repo_head_hexsha": "bffecf8110143c8f10dfbc3ca554c3793d81917c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-07T13:40:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-07T13:40:37.000Z", "avg_line_length": 26.5555555556, "max_line_length": 94, "alphanum_fraction": 0.7740585774, "num_tokens": 60}
|
@recipe function plot(l::AbstractLattice; bondcolor=:grey)
markershape --> :circle # if markershape is unset, make it :auto
markercolor --> :black
markersize --> 5
grid --> false
axis --> false
legend --> false
if ndims(l) == 3
showaxis --> false
end
# plot sites
allsites = sites(l)
@series begin
seriestype := :scatter
x = [point(s)[1] for s in allsites]
y = [point(s)[2] for s in allsites]
if ndims(l) == 3
z = [point(s)[3] for s in allsites]
x,y,z
else
x,y
end
end
# plot bonds
for b in bonds(l)
all(iszero.(b.wrap)) || continue
@series begin
seriestype := :line
linestyle := :solid
color := bondcolor
# get the coordinates to where the bond is pointing
from_point = point(allsites[from(b)])
x1 = from_point[1]
y1 = from_point[2]
# get the coordinates to where the bond is pointing
to_point = point(allsites[to(b)])
x2 = to_point[1]
y2 = to_point[2]
if ndims(l) == 3
z1 = from_point[3]
z2 = to_point[3]
[x1,x2], [y1,y2], [z1,z2]
else
[x1,x2], [y1,y2]
end
end
end
end
|
{"hexsha": "7e0924199b0ee38a3b94907cd75e315bd2afa3f3", "size": 1392, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/recipes.jl", "max_stars_repo_name": "crstnbr/LatPhysPlottingPlots.jl", "max_stars_repo_head_hexsha": "119e1779e96860c430beb81243e58d97075b02ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/recipes.jl", "max_issues_repo_name": "crstnbr/LatPhysPlottingPlots.jl", "max_issues_repo_head_hexsha": "119e1779e96860c430beb81243e58d97075b02ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/recipes.jl", "max_forks_repo_name": "crstnbr/LatPhysPlottingPlots.jl", "max_forks_repo_head_hexsha": "119e1779e96860c430beb81243e58d97075b02ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3090909091, "max_line_length": 75, "alphanum_fraction": 0.4762931034, "num_tokens": 385}
|
(** Binary trees the nodes of which are labelled with type A *)
Section Some_type_A.
Variable A: Type.
Inductive tree : Type :=
| leaf
| node (label: A)(left_son right_son : tree).
Inductive subtree (t:tree) : tree -> Prop :=
| subtree1 : forall t' (x:A), subtree t (node x t t')
| subtree2 : forall (t':tree) (x:A), subtree t (node x t' t).
Theorem well_founded_subtree : well_founded subtree.
Proof.
intros t; induction t as [ | x t1 IHt1 t2 IHt2].
- split; inversion 1.
- split; intros y Hsub; inversion_clear Hsub; assumption.
Qed.
(** Alternate arithmetic proof
Using several lemmas in library Wellfounded, we use tree size
as a measure for proving well_foundedness
*)
Require Import Omega
Inverse_Image Wellfounded.Inclusion Wf_nat.
Fixpoint size (t:tree) : nat :=
match t with leaf => 1
| node _ t1 t2 => 1 + size t1 + size t2
end.
Lemma subtree_smaller : forall (t t': tree), subtree t t' -> size t < size t'.
Proof.
inversion 1;simpl;omega.
Qed.
Lemma well_founded_subtree' : well_founded subtree.
Proof.
apply wf_incl with (fun t t' => size t < size t').
intros x y Hxy; now apply subtree_smaller.
apply wf_inverse_image; apply lt_wf.
Qed.
End Some_type_A.
|
{"author": "raduom", "repo": "coq-art", "sha": "092a8df8e74d7d7a90a2405e4eacf902e528d83a", "save_path": "github-repos/coq/raduom-coq-art", "path": "github-repos/coq/raduom-coq-art/coq-art-092a8df8e74d7d7a90a2405e4eacf902e528d83a/ch15_general_recursion/SRC/btreewf.v"}
|
import numpy as np
import csv
import sys
fname=str(sys.argv[1])
ofname=str(sys.argv[2])
chr=str(sys.argv[3])
bin_size=int(sys.argv[4])
chr_size = {'chr1':249250621,
'chr2':243199373,
'chr3':198022430,
'chr4':191154276,
'chr5':180915260,
'chr6':171115067,
'chr7':159138663,
'chr8':146364022,
'chr9':141213431,
'chr10':135534747,
'chr11':135006516,
'chr12':133851895,
'chr13':115169878,
'chr14':107349540,
'chr15':102531392,
'chr16':90354753,
'chr17':81195210,
'chr18':78077248,
'chr19':59128983,
'chr20':63025520,
'chr21':48129895,
'chr22':51304566,
'chrX':155270560}
with open(fname) as f:
file=f.readlines()
file=[x.strip() for x in file]
d = {}
for line in file:
x,y,n = line.split()
d[x,y] = n
get_max_coord = []
for key, value in d.iteritems():
get_max_coord.append(int(key[1]))
chr_max_value = max(get_max_coord) + bin_size
query = list(np.arange(0,chr_max_value,bin_size))
list_of_list = []
for i in query:
list1 = []
col3 = i+bin_size
list1.extend([chr,i,col3])
for j in query:
keytuple = (str(i),str(j))
if keytuple in d:
if(i==j):
list1.append(0)
else:
list1.append(d[keytuple])
else:
reverse_keytuple = (str(j),str(i))
if reverse_keytuple in d:
if(i==j):
list1.append(0)
else:
list1.append(d[reverse_keytuple])
else:
list1.append(0)
list_of_list.append(list1)
list_of_list[-1][2] = chr_size[chr]
with open(ofname,"wb") as csvfile:
writer = csv.writer(csvfile,delimiter="\t")
writer.writerows(list_of_list)
|
{"hexsha": "a6ec33a13052a71dbc92bad2f33036c8ea43032a", "size": 1578, "ext": "py", "lang": "Python", "max_stars_repo_path": "convert_coo_to_full_matrix2.py", "max_stars_repo_name": "tharvesh/preprocess_script", "max_stars_repo_head_hexsha": "a52d56442c4038a1af567c83773972f10078294e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "convert_coo_to_full_matrix2.py", "max_issues_repo_name": "tharvesh/preprocess_script", "max_issues_repo_head_hexsha": "a52d56442c4038a1af567c83773972f10078294e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "convert_coo_to_full_matrix2.py", "max_forks_repo_name": "tharvesh/preprocess_script", "max_forks_repo_head_hexsha": "a52d56442c4038a1af567c83773972f10078294e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.243902439, "max_line_length": 49, "alphanum_fraction": 0.6432192649, "include": true, "reason": "import numpy", "num_tokens": 541}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Problem statement
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\clearpage
\section{Problem Statement}
\label{sec:problem}
%% intro
In this section I will describe the store backend system examined in this thesis (later: \textit{system})
and elaborate on the problems that I was solving in this project (later: \textit{project}).
I will also describe the users of the project and what the needs for
each of them were, as discovered by the requirements engineering work I did.
%% description of the store
%% ingestion system
%% workflows, processors, parallelism
%% description of the terms used
\subsection{The store systems}
The system being investigated in this thesis was the Microsoft Windows store.
The store sells digital \emph{products} such as games and applications.
Each product belongs to a \emph{publisher} such as an independent developer or a game company.
A product is \emph{submitted} by a publisher as a digital application package.
Before it is \emph{published} to the store catalog, it needs to be \emph{ingested},
which means the package is verified and processed through multiple steps.
After the package has been ingested and the necessary information has been collected,
it can be published, which also involves a pipeline of steps.
These steps together form an ingestion-publishing \emph{workflow} that consists of multiple \emph{activities} (steps).
Completion of each of these activities is logged as an \emph{event}.
The list of events for a single submission forms a \emph{trace}.
These terms will be used throughout this thesis so they shall be formally defined as follows:
% describe: workflow, trace (submission), product (bigid), publisher, event
\begin{description}[style=nextline]
\item[Product]
Single digital application package being processed in the system.
\item[Publisher]
Independent developer or a company submitting a product that they own into the store.
\item[Activity]
Single atomic step of the workflow, where some part of the product is processed.
\item[Processor]
Part of the distributed system executing a specific activity.
\item[Workflow]
Abstract description of the whole pipeline consisting of a number of sequential and parallel activities. It contains the whole set of activities and their dependencies.
\item[Submission]
Single execution of the workflow for a single product. Equates to a single \emph{case} in the process discovery model.
\item[Event]
Log entry documenting the time when a specific execution of an activity has finished or changed status.
\item[Trace]
Set of events describing a current or past submission.
A trace is tied to a specific product from a specific publisher and has information about all the activities and their execution times.
\item[System]
The distributed store backend system as a whole, with all the processors and other parts involved.
\item[Project]
The new part of the system implemented in this thesis.
\label{desc:termdefinitions}
\end{description}
%% logging
The workflows are processed in the distributed system as a pipeline of steps.
Multiple concurrent submissions are in progress at any moment of time.
Furthermore, many steps of the workflow are independent of each other.
Thus, a single submission can have multiple steps in progress at the same time.
However, to continue the workflow all the parallel steps need to complete.
An ``aggregator'' step waits for the parallel steps to finish and only then proceeds to the next step.
Figure \ref{fig:workflowexample} illustrates this.
The different steps of the workflow have different lengths that depend on, among others,
the processing needed, the characteristics of the package, and the current workload of the system.
Because of these uncertainties, traces from two different submissions may differ from each other.
The order, which the parallel finish, is unknown, as described in section \ref{sec:eventtheory}.
\begin{figure}[htb]
\centering \includegraphics[width=0.9\linewidth]{gfx/figures/workflow.pdf}
\caption{A workflow with sequential, parallel, and aggregator steps}
\label{fig:workflowexample}
\end{figure}
When each step completes, an event is generated.
These events are collected from the different processors into a single database.
Each step is associated with a timestamp and all the available metadata.
The system works by a ``best effort'' delivery.
This means the delivery of the events to the database can fail and thus be delayed.
Furthermore, the distributed system involves multiple machines in multiple locations.
This results in variance of seconds to minutes in the clocks of the systems.
The clock variance is directly seen as noise in the timestamps of the events.
Because of the described parallelism and the uncertainties mentioned, the overall state of the system is difficult to describe at any given time.
Looking at the raw log data is also challenging because of the volume.
At the time of writing, the system produced on average 15~000 events per hour with peak times averaging in the 30~000 range.
Event filtering and visualization is necessary to find the relevant data from the noise.
\subsection{Requirements engineering}
%% description of users
% engineers, developers, working on ingestion
% first party and third party publisher release managers
% managers, PMs
In the beginning of my work the project requirements were not clear.
To understand the needs of the users, I conducted requirements engineering work.
In my research I discovered five user groups relevant to the project:
\begin{description}
\item[Developers] are the Microsoft software engineers working on the ingestion and publishing system.
\item[Managers] are the Microsoft developer leads and program managers who coordinate the developers' time and what they are working on.
\item[Publishers] are the independent creators, companies, and other third parties submitting the product packages to the store.
\item[Release managers] are Microsoft employees who have been assigned to be a contact for the largest publishing companies who develop the high-profile ``triple-A'' games and applications.
\item[Manual reviewers] are the people working for Microsoft that do the manual steps of the product validation when necessary. This includes, for example, checking a submission for fraud or inappropriate graphics or language.
\end{description}
The needs for each user group are covered in table \ref{tab:userneeds}.
I used the ``user story'' format for documenting the needs \cite{cohn2004user}.
%% description that user needs were not fully known so they need engineering
The project was done in two cycles, with requirements engineering work done in the beginning of both cycles. See section \ref{sec:timeline} for the project timeline.
The user needs found at the start of the second section were related mostly on the presentation and the user interface, so they did not affect the main structure of the project significantly.
%% description of initial user needs
% which?
\begin{table}[htb]
\begin{center}
\begin{tabularx}{\linewidth}{| X |}
\hline
\textbf{As a developer I want...} \newline
- to see the current status of a submission so that I can investigate issues with a single submission or product.\newline
- to see the shape of the store workflows so that I can find issues in the dependencies.\newline
- to see statistics about the activities so that I can write reports to my superiors.\newline
- to be able to customize what I see so that I can have exactly the information I care about.\newline
- to be notified of any delays in the workflow so that I can investigate issues faster.\newline
- to be notified about big issues distinctively so that I can prioritize my work.
\\
\hline
\textbf{As a manager I want...} \newline
- to have statistics of the workflows so that I can report the system performance and improvements over time to my superiors.
\\
\hline
\textbf{As a publisher I want...} \newline
- to be able to know estimated times for submission completion so that I can schedule my work day.\newline
- to be able to inquire about the status of my submissions so that I can escalate any issues that arise
\\
\hline
\textbf{As a release manager I want...} \newline
- to see the detailed status of a single product or submission so that I track it and report any issues to my superiors. \newline
- to see the big picture status for all submissions related to a single product or publisher quickly so that I can save time. \newline
- to be notified about completion of crucial steps of the workflow or any issues so that I can schedule and prioritize my work. \newline
- to be able to customize what I see so that I do not see information about products that I do not own.
\\
\hline
\textbf{As a manual reviewer I want...} \newline
- to be notified in advance when a product is heading towards a manual review so that I can schedule my work day.
\\
\hline
\end{tabularx}
\end{center}
\caption{Initial user needs found in January}
\label{tab:userneeds}
\end{table}
%% business requirements
%% need to integrate with an existing system
%% confidentiality, integrity
%% working with partners
The project also involved business requirements from Microsoft.
The major two requirements were integration with existing systems and following confidentiality requirements.
The project was required to integrate with the existing store backend systems.
The event collection and storing was already handled by a system called \textbf{Jury}, which is an interface to browse the products in the store and see diagnostics information from the log database.
The existing system stores the events in a database and allows the used to query the events with a SQL-like query language.
The results were shown as a list of rows with the matching events and timestamps (see figure \ref{fig:plaineventlog}).
The project was to integrate with this querying system to load the events from the database.
The events and the product metadata in the system contained confidential information.
Mainly there were two terms used to classify confidential data: \textit{Medium Business Intelligence} (MBI) and \textit{Personally Identifiable Information} (PII).
In practice, it meant that any MBI-classified information related to a product should only be shown to the publisher or the partner who owns the product.
For example, the product events should only be available to the publisher who owns the product.
However, the developers working at Microsoft should be able to see all MBI-classified information.
Any PII-classified information should be considered private and should not be visible through the interface developed in this thesis.
%% challenges
%% user needs not fully understood
%% unknown parallelism
%% unsupervised learning and adaptation
%% both real time and statistical data are needed
There were several challenges discovered in the beginning of the project.
The system implemented in the project should be unsupervised.
This means that the system should use past data to build an understanding of the workflow without the need for a user such as an engineer to supply any knowledge beforehand.
The system should adapt to any changes in the workflow automatically.
The distributed workflows contain unknown parallelism that must be detected automatically.
Furthermore, the distributed system contains noise in the timestamps which further complicate the parallelism detection.
In addition, the system should be able to provide two different kinds of information.
The workflow models and statistics should be built based on a long term aggregate discovered from several days worth of data,
while the system should also show the real time data for the current ongoing submissions.
These two sides of information should both be utilized in the visualization.
Lastly, requirements engineering was necessary to discover the user needs.
This is why an iterative process was set up.
Section \ref{sec:timeline} contains a detailed description of this process.
%% needs discovered at meetings?
%% conclusion, key goals
To recap, the key goals for the solution developed in this thesis are the ability to dynamically adapt to changes in the workflow,
the ability for the user to customize the information they need, and decoupling the solution from the specific workflow steps of the store.
The project needs to use \emph{unsupervised learning} to build workflow models, show \emph{real-time event data} to the user based on the model,
and use the models and the real-time data to show predictions of future events.
|
{"hexsha": "7ca11c5812dc4204c546890224fd3fe2f9445309", "size": 12681, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sec3_problem.tex", "max_stars_repo_name": "Sentri/Master-s-Thesis", "max_stars_repo_head_hexsha": "09846d109a45a07e28641891ee3b651228fc049e", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sec3_problem.tex", "max_issues_repo_name": "Sentri/Master-s-Thesis", "max_issues_repo_head_hexsha": "09846d109a45a07e28641891ee3b651228fc049e", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sec3_problem.tex", "max_forks_repo_name": "Sentri/Master-s-Thesis", "max_forks_repo_head_hexsha": "09846d109a45a07e28641891ee3b651228fc049e", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.110619469, "max_line_length": 226, "alphanum_fraction": 0.7880293352, "num_tokens": 2608}
|
//
// Copyright (c) 2020 Richard Hodges (hodges.r@gmail.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/madmongo1/webclient
//
// This project was made possible with the generous support of:
// The C++ Alliance (https://cppalliance.org/)
// Jetbrains (https://www.jetbrains.com/)
//
// Talk to us on Slack (https://cppalliance.org/slack/)
//
// Many thanks to Vinnie Falco for continuous mentoring and support
//
#include <boost/webclient/async/future.hpp>
#include <boost/asio/io_context.hpp>
#include <catch2/catch.hpp>
TEST_CASE("boost::webclient::async::future")
{
using namespace boost::webclient;
net::io_context ioc;
auto p = async::promise< std::string >();
SECTION("promise met before future taken")
{
p.set_value("Hello");
auto f = p.get_future();
f.async_wait([](async::future_result_type<std::string> s) {
REQUIRE(s.has_value());
CHECK(s.value() == "Hello");
});
ioc.run();
}
auto f = p.get_future();
SECTION("value available prior to wait")
{
p.set_value("Hello");
f.async_wait([](async::future_result_type<std::string> s) {
REQUIRE(s.has_value());
CHECK(s.value() == "Hello");
});
ioc.run();
}
SECTION("value available after wait")
{
f.async_wait([](async::future_result_type<std::string> s) {
REQUIRE(s.has_value());
CHECK(s.value() == "Hello");
});
p.set_value("Hello");
ioc.run();
}
SECTION("error available prior to wait")
{
p.set_error(net::error::operation_aborted);
f.async_wait([](async::future_result_type<std::string> s) {
REQUIRE(s.has_error());
CHECK(s.error().message() == "Operation canceled");
});
ioc.run();
}
SECTION("error available after wait")
{
f.async_wait([](async::future_result_type<std::string> s) {
REQUIRE(s.has_error());
CHECK(s.error().message() == "Operation canceled");
});
p.set_error(net::error::operation_aborted);
ioc.run();
}
SECTION("broken promise after wait")
{
f.async_wait([](async::future_result_type<std::string> s) {
REQUIRE(s.has_error());
CHECK(s.error().message() == "Operation canceled");
});
p = async::promise< std::string >();
ioc.run();
}
}
|
{"hexsha": "48624a351941a436b8a9d8a9fdc9a92cccf84b54", "size": 2620, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/boost/webclient/async/future.spec.cpp", "max_stars_repo_name": "madmongo1/webclient", "max_stars_repo_head_hexsha": "7eb52899443a76ced83b6f286b0e0d688f02fc65", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2020-06-12T02:22:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-23T14:18:01.000Z", "max_issues_repo_path": "src/boost/webclient/async/future.spec.cpp", "max_issues_repo_name": "madmongo1/webclient", "max_issues_repo_head_hexsha": "7eb52899443a76ced83b6f286b0e0d688f02fc65", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-06-12T02:29:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-18T10:07:05.000Z", "max_forks_repo_path": "src/boost/webclient/async/future.spec.cpp", "max_forks_repo_name": "madmongo1/webclient", "max_forks_repo_head_hexsha": "7eb52899443a76ced83b6f286b0e0d688f02fc65", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4368932039, "max_line_length": 79, "alphanum_fraction": 0.5778625954, "num_tokens": 615}
|
% To compile this document
% graphics.off();rm(list=ls());library('knitr');knit('EDA-lab.Rnw'); for(i in 1:2) system('R CMD pdflatex EDA-lab.tex')
%detach(bodyfat);
% extract R-code
% purl('EDA-lab.Rnw')
%setwd("/Volumes/Macintosh Storage/Users/jbinder/Dropbox/Docs/Teaching/isb101/Visualization in R/Tutorial")
\documentclass{article}\usepackage[]{graphicx}\usepackage[usenames,dvipsnames]{color}
%% maxwidth is the original width if it is less than linewidth
%% otherwise use linewidth (to make sure the graphics do not exceed the margin)
\makeatletter
\def\maxwidth{ %
\ifdim\Gin@nat@width>\linewidth
\linewidth
\else
\Gin@nat@width
\fi
}
\makeatother
\definecolor{fgcolor}{rgb}{0.345, 0.345, 0.345}
\newcommand{\hlnum}[1]{\textcolor[rgb]{0.686,0.059,0.569}{#1}}%
\newcommand{\hlstr}[1]{\textcolor[rgb]{0.192,0.494,0.8}{#1}}%
\newcommand{\hlcom}[1]{\textcolor[rgb]{0.678,0.584,0.686}{\textit{#1}}}%
\newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}}%
\newcommand{\hlstd}[1]{\textcolor[rgb]{0.345,0.345,0.345}{#1}}%
\newcommand{\hlkwa}[1]{\textcolor[rgb]{0.161,0.373,0.58}{\textbf{#1}}}%
\newcommand{\hlkwb}[1]{\textcolor[rgb]{0.69,0.353,0.396}{#1}}%
\newcommand{\hlkwc}[1]{\textcolor[rgb]{0.333,0.667,0.333}{#1}}%
\newcommand{\hlkwd}[1]{\textcolor[rgb]{0.737,0.353,0.396}{\textbf{#1}}}%
\usepackage{framed}
\makeatletter
\newenvironment{kframe}{%
\def\at@end@of@kframe{}%
\ifinner\ifhmode%
\def\at@end@of@kframe{\end{minipage}}%
\begin{minipage}{\columnwidth}%
\fi\fi%
\def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep
\colorbox{shadecolor}{##1}\hskip-\fboxsep
% There is no \\@totalrightmargin, so:
\hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}%
\MakeFramed {\advance\hsize-\width
\@totalleftmargin\z@ \linewidth\hsize
\@setminipage}}%
{\par\unskip\endMakeFramed%
\at@end@of@kframe}
\makeatother
\definecolor{shadecolor}{rgb}{.97, .97, .97}
\definecolor{messagecolor}{rgb}{0, 0, 0}
\definecolor{warningcolor}{rgb}{1, 0, 1}
\definecolor{errorcolor}{rgb}{1, 0, 0}
\newenvironment{knitrout}{}{} % an empty environment to be redefined in TeX
\usepackage{alltt}
\RequirePackage{/Library/Frameworks/R.framework/Versions/3.1/Resources/library/BiocStyle/sty/Bioconductor}
\AtBeginDocument{\bibliographystyle{/Library/Frameworks/R.framework/Versions/3.1/Resources/library/BiocStyle/sty/unsrturl}}
\title{EDA--lab}
\usepackage{amsmath}
\usepackage{natbib}
\usepackage{mathpazo}
%\usepackage{enumerate}
\usepackage{soul}
\usepackage{cases}
\setlength{\parindent}{0cm}
\author{Janos Binder$^1$ \\[1em]European Molecular Biology Laboratory (EMBL),\\ Heidelberg, Germany\\
\texttt{$^1$janos.binder@embl.de}}
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\begin{document}
\maketitle
\tableofcontents
\section{Acknowledgements}
This tutorial is based on the material of Bernd Klaus (\href{http://www-huber.embl.de/users/klaus/main.html}{http://www-huber.embl.de/users/klaus/main.html)}.
\section{Required Packages and other Preparations} \label{sec:prep}
%
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{load}\hlstd{(}\hlkwd{url}\hlstd{(}\hlstr{"http://www-huber.embl.de/users/klaus/BasicR/seqZyx.rda"}\hlstd{))}
\hlkwd{library}\hlstd{(}\hlstr{"TeachingDemos"}\hlstd{)}
\hlkwd{data}\hlstd{(golub,} \hlkwc{package} \hlstd{=} \hlstr{"multtest"}\hlstd{)}
\hlkwd{library}\hlstd{(biomaRt)}
\hlkwd{library}\hlstd{(reshape2)}
\hlkwd{library}\hlstd{(ggplot2)}
\hlkwd{library}\hlstd{(plyr)}
\hlkwd{library}\hlstd{(xlsx)}
\hlkwd{library}\hlstd{(vioplot)}
\end{alltt}
\end{kframe}
\end{knitrout}
%
Should you get error messages, you can also install them quickly:
%
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{source}\hlstd{(}\hlstr{"http://bioconductor.org/biocLite.R"}\hlstd{)}
\hlstd{packs} \hlkwb{<-} \hlkwd{c}\hlstd{(}\hlstr{"TeachingDemos"}\hlstd{,} \hlstr{"multtest"}\hlstd{,} \hlstr{"biomaRt"}\hlstd{,}
\hlstr{"reshape2"}\hlstd{,} \hlstr{"ggplot2"}\hlstd{,} \hlstr{"plyr"}\hlstd{,} \hlstr{"xlsx"}\hlstd{,} \hlstr{"vioplot"}\hlstd{)}
\hlkwd{biocLite}\hlstd{(packs)}
\end{alltt}
\end{kframe}
\end{knitrout}
%
\section{Introduction}
In this lab, a few essential methods are given to display and visualize data.
It quickly answers questions like: How are my data distributed? How can the frequencies
of nucleotides from a gene be visualized? Are there outliers in my data?
Does the distribution of my data resemble that of a bell--shaped curve? Are
there differences between gene expression values taken from two groups of
patients?\\
The most important central tendencies (mean, median) are defined and
illustrated together with the most important measures of spread (standard
deviation, variance, inter quartile range, and median absolute deviation).
We also introduce \CRANpkg{ggplot2} a package to produce elegant graphics
for data analysis.
\section{Base graphics and ggplot2}
The package \Rpackage{ggplot2} is one of the most commonly used graphics packages for \R.
It is an alternative to the basic graphic system of \R, which is limited in various
aspects. In this lab we will use basic graphics in \R \vspace{2pt} and corresponding
\Rpackage{ggplot2} plots side by side.
\Rpackage{ggplot2} is meant to be an implementation of the Grammar of Graphics,
developed by L. Wilkinson, hence gg--plot.
The basic notion is that there is a grammar to the composition of graphical components in
statistical graphics, and by directly controlling that grammar, you can generate
a large set of carefully constructed graphics tailored to your particular needs.
The central concept of the approach is that
plots convey information through various aspects of their aesthetics.
Aesthetics are mappings from the data to something you can visually perceive.
Some aesthetics that plots use are:
\begin{itemize}
\item x position
\item y position
\item size of elements
\item shape of elements
\item color of elements
\end{itemize}
The elements in a plot are geometric shapes, like
\begin{itemize}
\item points
\item lines
\item line segments
\item bars
\item text
\end{itemize}
Some of these geometries have their own particular aesthetics. For instance:
\begin{itemize}
\item points
\begin{itemize}
\item point shape
\item point size
\end{itemize}
\item lines
\begin{itemize}
\item line type
\item line weight
\end{itemize}
\item bars
\begin{itemize}
\item y minimum
\item y maximum
\item fill color
\item outline color
\end{itemize}
\item text
\begin{itemize}
\item label value
\end{itemize}
\end{itemize}
Each component is added to the plot as a layer, hence you might start with
a simple mapping of the raw data to the x-- and y--axes, creating a scatterplot.
A second layer may the be added by coloring the points according to a
group they belong to and so on.
There are other basics of these graphics that you can adjust,
like the scaling of the aesthetics, and the positions of the geometries.
The values represented in the plot are the product of various statistics. If you just plot the
raw data, you can think of each point representing the identity statistic.
Many bar charts represent the mean or the median statistic. Histograms are bar charts
where the bars represent the binned count or density statistics and so on.
\subsection{Building a Plot Layer by Layer}
There's a quick plotting function in \Rpackage{ggplot2} called \Rfunction{qplot()}
which is meant to be similar to the \Rfunction{plot()} function from base graphics.
You can do a lot with \Rfunction{qplot()}, but it can be better to approach
the package from the layering syntax.
All \Rpackage{ggplot2} plots begin with the function \Rfunction{ggplot()}.
\Rfunction{ggplot()} takes two primary
arguments, \Robject{data} is the data frame containing the data to be plotted
and \Rfunction{aes( )} are the aesthetic mappings to pass on to the plot elements.
As you can see, the second argument, \Rfunction{aes()}, isn't a normal argument, but another
function. Since we'll never use \Rfunction{aes()} as a separate function, it might be best
to think of it as a special way to pass a list of arguments to the plot.
The first step in creating a plot is to add one or more layers. Let's start with
the iris data set as an example. Note that \Rpackage{ggplot2} always requires the specification
of the data frame from which the variables used in the plot are drawn.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{summary}\hlstd{(iris)}
\end{alltt}
\begin{verbatim}
#> Sepal.Length Sepal.Width Petal.Length Petal.Width Species
#> Min. :4.30 Min. :2.00 Min. :1.00 Min. :0.1 setosa :50
#> 1st Qu.:5.10 1st Qu.:2.80 1st Qu.:1.60 1st Qu.:0.3 versicolor:50
#> Median :5.80 Median :3.00 Median :4.35 Median :1.3 virginica :50
#> Mean :5.84 Mean :3.06 Mean :3.76 Mean :1.2
#> 3rd Qu.:6.40 3rd Qu.:3.30 3rd Qu.:5.10 3rd Qu.:1.8
#> Max. :7.90 Max. :4.40 Max. :6.90 Max. :2.5
\end{verbatim}
\begin{alltt}
\hlstd{p} \hlkwb{<-} \hlkwd{ggplot}\hlstd{(iris,} \hlkwd{aes}\hlstd{(Sepal.Length, Sepal.Width) )}
\end{alltt}
\end{kframe}
\end{knitrout}
If you just type \Rcode{p} or \Rcode{print(p)}, you'll get back a warning saying
that the plot lacks any layers. With the \Rfunction{ggplot()} function,
we've set up a plot which is going to draw from the iris data,
the Sepal.length variable will be mapped to the x--axis, and the Sepal.width
variable is going to be mapped to the y--axis.
However, we have not determined which kind of geometric object will represent the data.
Let's add points, for a scatterplot.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{p} \hlopt{+} \hlkwd{geom_point}\hlstd{()}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot-ex2}
\end{knitrout}
Alternatively, this plot could have been produced with \Rfunction{qplot}.
Additionally, you can map color to the species.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{qplot}\hlstd{(Sepal.Length, Sepal.Width,} \hlkwc{data} \hlstd{= iris,} \hlkwc{color} \hlstd{= Species)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot-ex-qplot}
\end{knitrout}
We clearly see that the setosa plants have different Sepal.Length/Sepal.Width
relationship compared to the other two species. The full documentation for \Rpackage{ggplot2} can be found at \url{http://docs.ggplot2.org/current/}.
Apart from mapping data to aesthetics, \Rpackage{ggplot2}
can handle statistical transformations of the data, i.e. easily create all
the nice exploratory graphics we will look at below.
We will explore one of these transformations by adding a regression line
to the data of each of the three plant species as a third layer.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{ggsmooth} \hlkwb{<-} \hlstd{(}\hlkwd{qplot}\hlstd{(Sepal.Length, Sepal.Width,} \hlkwc{data} \hlstd{= iris,} \hlkwc{color} \hlstd{= Species)}
\hlopt{+} \hlkwd{stat_smooth}\hlstd{(}\hlkwc{method} \hlstd{=} \hlstr{"lm"}\hlstd{))}
\hlstd{ggsmooth}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot-ex-smoother}
\end{knitrout}
The command \Rfunction{stat\textunderscore smooth} first adds a statistical transformation
to the existing data and then plots it using a certain geometry, in this
case a special "smooth" geometry that is tailored to the plotting of regression
fits. You can obtained the statistical transformations by looking at the saved
plot and extracting the appropriate sublist.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{transformed.data} \hlkwb{<-} \hlkwd{as.list}\hlstd{(}\hlkwd{print}\hlstd{(ggsmooth))}\hlopt{$}\hlstd{data[[}\hlnum{2}\hlstd{]]}
\end{alltt}
\end{kframe}
\end{knitrout}
Thus, you could also map the transformed data differently than the default geometry
does it. This however does not make much sense in this case.
\subsection{Some biological data}
Here we're looking at some real biological data: different doses of HFG (a cytokine)
were applied to cells and the downstream effect to the phosphorylation of target
proteins were assessed recording a time course--signal in different conditions.
This data and the exercise ideas were provided by Lars Velten (Steinmetz lab).
We first load the dataset.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{proteins}\hlkwb{<-}\hlkwd{read.csv}\hlstd{(}\hlstr{"http://www-huber.embl.de/users/klaus/BasicR/proteins.csv"}\hlstd{)[,}\hlopt{-}\hlnum{1}\hlstd{]}
\hlkwd{head}\hlstd{(proteins)}
\end{alltt}
\begin{verbatim}
#> Condition min Target Signal Sigma
#> 1 10ng/mL HGF 0 pAKT -6.71e+08 96749276
#> 2 10ng/mL HGF 5 pAKT 5.68e+08 97144057
#> 3 10ng/mL HGF 10 pAKT 1.05e+09 97215659
#> 4 10ng/mL HGF 20 pAKT -3.23e+08 97055787
#> 5 10ng/mL HGF 30 pAKT -9.00e+08 96908309
#> 6 10ng/mL HGF 60 pAKT 3.17e+08 96731346
\end{verbatim}
\begin{alltt}
\hlstd{proteins_pMek} \hlkwb{<-} \hlkwd{subset}\hlstd{(proteins, proteins}\hlopt{$}\hlstd{Target} \hlopt{==} \hlstr{"pMEK"}\hlstd{)}
\hlstd{proteins_pMek_sub} \hlkwb{<-} \hlkwd{subset}\hlstd{(proteins_pMek, proteins_pMek}\hlopt{$}\hlstd{Condition} \hlopt{==} \hlstr{"10ng/mL HGF"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
We can start simple by only looking at the first condition
of the "pMEK" protein target for now. We simply
produce a line plot of the signal across time. In this plot
we use the data \Robject{proteins\textunderscore pMek\textunderscore sub},
map \Robject{min}
to the x--axis and \Robject{Signal} to the y--axis and use a
line a a geometry. Additional modifications are added to the plot
in the exercise.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{proteins_pMek_sub}
\end{alltt}
\begin{verbatim}
#> Condition min Target Signal Sigma
#> 103 10ng/mL HGF 0 pMEK 3.38e+08 31005696
#> 104 10ng/mL HGF 5 pMEK -2.09e+08 31400418
#> 105 10ng/mL HGF 10 pMEK 7.20e+07 31199120
#> 106 10ng/mL HGF 20 pMEK -2.76e+08 31015194
#> 107 10ng/mL HGF 30 pMEK 8.70e+08 31140886
#> 108 10ng/mL HGF 60 pMEK 2.42e+08 31040783
#> 109 10ng/mL HGF 120 pMEK 4.31e+07 31072343
\end{verbatim}
\begin{alltt}
\hlkwd{qplot}\hlstd{(min, Signal,} \hlkwc{data} \hlstd{= proteins_pMek_sub,} \hlkwc{geom} \hlstd{=} \hlstr{"line"}\hlstd{)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/plot-protein-data}
\begin{kframe}\begin{alltt}
\hlcom{# or}
\hlcom{#ggplot(proteins_pMek_sub, aes(min, Signal)) + geom_line()}
\end{alltt}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: Simple ggplot usage}
Using the data \Robject{proteins\textunderscore pMek\textunderscore sub},
do the following
\begin{enumerate}[label=(\emph{\alph*})]
\item Use points as a geometry instead of lines
\item Use both lines and points
\item Add errorbars \Robject{geom\textunderscore errorbar} to the plot.
This requires further aesthetics:
\Robject{ymax} and \Robject{ymin}.
The estimated error is stored in the variable \Robject{Sigma}.
\end{enumerate}
We can also easily plot all conditions for the protein pMEK by
mapping color to the experimental condition of pMEK.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{qplot}\hlstd{(min, Signal,} \hlkwc{data} \hlstd{= proteins_pMek,} \hlkwc{geom} \hlstd{=} \hlstr{"line"}\hlstd{,} \hlkwc{color} \hlstd{= Condition)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/plot-protein-data-2}
\begin{kframe}\begin{alltt}
\hlcom{# or}
\hlcom{#ggplot(proteins_pMek, aes(min, Signal, color = Condition) ) + geom_line()}
\end{alltt}
\end{kframe}
\end{knitrout}
\subsection{Setting axis limits}
Another important aspect of data display are the limits of the
x-- and y--axis. You can change these limits by using \Robject{xlim()}
and \Robject{ylim()}. However, the overall scale will not be "retrained"
and only be limited. This essentially results in plotting only a subset
of the data, excluding values outside of the
limits. In order to actually "zoom--in" without excluding data points
one has to use the \Rfunction{coord\textunderscore cartesian()} command. In the
code below, we first limit the y--axis to 1e9, excluding all
other data points, then we retrain the scale.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlcom{## limit scale}
\hlstd{(}\hlkwd{qplot}\hlstd{(min, Signal,} \hlkwc{data} \hlstd{= proteins_pMek,} \hlkwc{geom} \hlstd{=} \hlstr{"line"}\hlstd{,} \hlkwc{color} \hlstd{= Condition)}
\hlopt{+} \hlkwd{ylim}\hlstd{(}\hlkwd{c}\hlstd{(}\hlnum{0}\hlstd{,}\hlnum{1e9}\hlstd{)))}
\end{alltt}
{\ttfamily\noindent\color{warningcolor}{\#>\ \ Warning: Removed 4 rows containing missing values (geom\_path).}}\end{kframe}
\includegraphics[width=\maxwidth]{figure/limitng_vs_zooming1}
\begin{kframe}\begin{alltt}
\hlcom{## retrain scale}
\hlstd{(}\hlkwd{qplot}\hlstd{(min, Signal,} \hlkwc{data} \hlstd{= proteins_pMek,} \hlkwc{geom} \hlstd{=} \hlstr{"line"}\hlstd{,} \hlkwc{color} \hlstd{= Condition)}
\hlopt{+} \hlkwd{coord_cartesian}\hlstd{(}\hlkwc{ylim} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{0}\hlstd{,} \hlnum{1e9}\hlstd{)))}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/limitng_vs_zooming2}
\end{knitrout}
\subsection{Faceting}
\Rpackage{ggplot2} also allows you to easily split plots according to a factor
variable, plotting parts of the data in different panels. Returning
to the iris data, we can easily plot different species in different
panels using \Rfunction{facet\textunderscore wrap()}. This is
the simplest faceting function, splitting according to one
factor only.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{ggsmooth} \hlopt{+} \hlkwd{facet_wrap}\hlstd{(} \hlopt{~} \hlstd{Species)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/iris-panels}
\end{knitrout}
The splitting is defined in a formula notation:
yfactor \textasciitilde xfactor. Factors can also be combined,
e.g. yfactor \textasciitilde xfactor\textunderscore 1 + xfactor\textunderscore 2.
Faceting can also be used with \Rfunction{qplot()}, there you always have
to specify a two--sided formula. If you only want to use a single
splitting factor, you can use the dot notation:
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{(}\hlkwd{qplot}\hlstd{(Sepal.Length, Sepal.Width,} \hlkwc{data} \hlstd{= iris,}
\hlkwc{color} \hlstd{= Species,} \hlkwc{facets} \hlstd{= .} \hlopt{~} \hlstd{Species))}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/iris-panels_qplot}
\end{knitrout}
\subsubsection*{Exercise: ggplot faceting}
Using the data \Robject{proteins\textunderscore pMek},
produce a plot split by the experimental condition factor using
\Rfunction{facet\textunderscore wrap()}.
\subsection{Melting and casting data frames}
The data table we loaded was already suitable for the plots we wanted
to produce since every line represents exactly one observation.
However, this is not necessarily the case and we might want to represent
the different time points by different columns, not just a single one.
In time series analysis parlance our current data would be in "long" format,
but we might want to transform it into a "wide" format, with a separate
column for every time point. The package \CRANpkg{reshape2} allows
you to do this. \Rpackage{ggplot2} usually requires "long" formats, which can be
obtained by using the function \Rfunction{melt}. Thus a "molten"
data frame in \Rpackage{ggplot2} corresponds to a "long" format. "wide" formats
can be computed using the function \Rfunction{dcast}. As an example
we will now represent every time point of our data frame as a single
column.
Note that the wide format is only compatible with a single numerical target
variable, so we only include signal as a variable here. The
casting grid is defined by a formula with the variables that will
be represented by a single column on the left (x--variables) and
the wide variables on the right side of the tilde (y--variables).
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{proteins_cast} \hlkwb{<-} \hlkwd{dcast}\hlstd{(proteins, Target} \hlopt{+} \hlstd{Condition} \hlopt{~} \hlstd{min ,}
\hlkwc{value.var} \hlstd{=} \hlstr{"Signal"}\hlstd{)}
\hlkwd{head}\hlstd{(proteins_cast)}
\end{alltt}
\begin{verbatim}
#> Target Condition 0 5 10 20 30 45 60
#> 1 pAKT 10ng/mL HGF -6.71e+08 5.68e+08 1.05e+09 -3.23e+08 -9.00e+08 NA 3.17e+08
#> 2 pAKT 40ng/mL HGF -7.59e+08 3.32e+08 1.95e+08 1.29e+08 7.43e+07 NA 1.01e+09
#> 3 pAKT 40ng/mL HGF + AKTi -2.03e+08 1.62e+08 -2.41e+08 NA -4.69e+08 NA -3.31e+08
#> 4 pAKT 40ng/mL HGF + MEKi 7.30e+08 6.57e+08 8.73e+08 NA 1.39e+09 NA 7.10e+08
#> 5 pAKT 80ng/mL HGF -3.09e+08 8.36e+08 8.91e+08 1.12e+09 9.08e+08 3.8e+08 -6.95e+08
#> 6 pERK1 10ng/mL HGF 2.77e+08 2.37e+08 -3.36e+08 -1.48e+08 7.06e+08 NA 2.85e+07
#> 120
#> 1 -4.69e+08
#> 2 -7.95e+08
#> 3 5.92e+08
#> 4 -2.21e+08
#> 5 -2.16e+08
#> 6 5.52e+08
\end{verbatim}
\end{kframe}
\end{knitrout}
We can now melt the data frame again. For melting, you have to specify
id variable and/or measurement variables. id variables identify a single
line in the molten data frame and measured variables represent the
measurements. If you only supply one of them, it is assumed that
all the other variables belong to the other group. Columns
that are not in either of the groups are discarded.
A factor column is then added to indicate to which
former column a measurement belongs to. In our case this is the
time point. Another useful function is \Rfunction{arrange}
which allows you to reorder the data frame according
to certain columns.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{proteins_molten} \hlkwb{<-} \hlkwd{melt}\hlstd{(proteins_cast,} \hlkwc{id.vars} \hlstd{=} \hlkwd{c}\hlstd{(}\hlstr{"Target"}\hlstd{,} \hlstr{"Condition"}\hlstd{),}
\hlkwc{variable.name}\hlstd{=}\hlstr{"Time"}\hlstd{,} \hlkwc{value.name}\hlstd{=} \hlstr{"Signal"}\hlstd{)}
\hlstd{proteins_molten} \hlkwb{<-}\hlkwd{arrange}\hlstd{(proteins_molten, Condition)}
\hlkwd{head}\hlstd{(proteins_molten)}
\end{alltt}
\begin{verbatim}
#> Target Condition Time Signal
#> 1 pAKT 10ng/mL HGF 0 -6.71e+08
#> 2 pERK1 10ng/mL HGF 0 2.77e+08
#> 3 pERK2 10ng/mL HGF 0 -1.91e+08
#> 4 pMEK 10ng/mL HGF 0 3.38e+08
#> 5 pAKT 10ng/mL HGF 5 5.68e+08
#> 6 pERK1 10ng/mL HGF 5 2.37e+08
\end{verbatim}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: complex ggplot example}
Use the data frame \Robject{proteins} to produce
a plot of the time courses split by the experimental
target and colored according to the experimental
conditions. Add error bars to your plot.
%ldply(list(value = proteins[,1:4], value = proteins[,c(1:3,5)] ))
\section{Univariate Data Display}
In order to study the distribution of data, various visualization methods are
available. We will look at some of them in the following.
\subsection{Frequency Table and Barplot}
Discrete data occur when the values naturally fall into categories. A frequency
table simply gives the number of occurrences within a category.
\subsubsection*{Example: Nucleotide Frequencies }
A gene consists of a sequence of nucleotides $\{A, C, G, T \}$.
The number of each nucleotide can be displayed in a frequency table. This
will be illustrated by an Exon of the Zyxin gene, which plays an important role in cell
adhesion. You can use the Bioconductor package \Rpackage{biomaRt} to query
various genomic databases. The commented code below was originally used to retrieve the
exon sequence. The command \Rfunction{strplit} splits the sequence into
its single letter parts. We already loaded the sequence at the beginning of
the lab.
%
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{ensembl}\hlkwb{=} \hlkwd{useMart}\hlstd{(}\hlstr{"ensembl"}\hlstd{,}\hlkwc{dataset}\hlstd{=}\hlstr{"hsapiens_gene_ensembl"}\hlstd{)}
\hlstd{seqZyx} \hlkwb{<-} \hlkwd{getSequence}\hlstd{(}\hlkwc{id} \hlstd{=} \hlstr{"ENSG00000159840"}\hlstd{,} \hlkwc{type} \hlstd{=} \hlstr{"ensembl_gene_id"}\hlstd{,}
\hlkwc{mart} \hlstd{= ensembl,} \hlkwc{seqType} \hlstd{=} \hlstr{"gene_exon"}\hlstd{)[}\hlnum{1}\hlstd{,]}
\hlstd{seqZyx} \hlkwb{<-} \hlkwd{strsplit}\hlstd{(}\hlkwd{as.character}\hlstd{(seqZyx[}\hlnum{1}\hlstd{,}\hlnum{1}\hlstd{]),} \hlkwc{split} \hlstd{=} \hlkwd{character}\hlstd{(}\hlnum{0}\hlstd{))}
\hlstd{seqZyx} \hlkwb{<-} \hlstd{seqZyx[[}\hlnum{1}\hlstd{]]}
\hlkwd{save}\hlstd{(seqZyx,} \hlkwc{file} \hlstd{=} \hlstr{"seqZyx.rda"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
%
A (frequency) table, a corresponding pie plot and a barplot
can be produced by following commands
%
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{table}\hlstd{(seqZyx)} \hlcom{## table}
\end{alltt}
\begin{verbatim}
#> seqZyx
#> A C G T
#> 29 94 70 30
\end{verbatim}
\begin{alltt}
\hlkwd{prop.table}\hlstd{(}\hlkwd{table}\hlstd{(seqZyx))} \hlcom{## frequency table}
\end{alltt}
\begin{verbatim}
#> seqZyx
#> A C G T
#> 0.130 0.422 0.314 0.135
\end{verbatim}
\begin{alltt}
\hlkwd{barplot}\hlstd{(}\hlkwd{table}\hlstd{(seqZyx))}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/Zyx-table}
\end{knitrout}
In \Rpackage{ggplot2} we first have to transform our data into an
appropriate \Robject{data.frame}. We then map the bases to
the x--axis and add binning by using the bar geometry.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{ggBarplot} \hlkwb{=} \hlkwd{ggplot}\hlstd{(} \hlkwc{data} \hlstd{=} \hlkwd{data.frame}\hlstd{(}\hlkwc{bases} \hlstd{=} \hlkwd{factor}\hlstd{(seqZyx)),} \hlkwd{aes}\hlstd{(}\hlkwc{x} \hlstd{= bases))}
\hlstd{ggBarplot} \hlkwb{=} \hlstd{ggBarplot} \hlopt{+} \hlkwd{xlab}\hlstd{(}\hlstr{""}\hlstd{)} \hlopt{+} \hlkwd{geom_bar}\hlstd{()}
\hlstd{ggBarplot}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/Zyx-table-ggplot2}
\end{knitrout}
%
In order to plot a pie chart with \Rpackage{ggplot2}, we first have
to create a stacked bar plot by mapping the base counts to the fill
of the bar and then use polar coordinates.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{pie}\hlstd{(}\hlkwd{table}\hlstd{(seqZyx))}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/Zyx-pie1}
\begin{kframe}\begin{alltt}
\hlstd{ggPie} \hlkwb{=} \hlkwd{ggplot}\hlstd{(} \hlkwc{data} \hlstd{=} \hlkwd{data.frame}\hlstd{(}\hlkwc{bases} \hlstd{=} \hlkwd{factor}\hlstd{(seqZyx)),} \hlkwd{aes}\hlstd{(}\hlkwc{x} \hlstd{=} \hlkwd{factor}\hlstd{(}\hlnum{1}\hlstd{),} \hlkwc{fill} \hlstd{= bases))}
\hlstd{ggPie} \hlkwb{=} \hlstd{ggPie} \hlopt{+} \hlkwd{xlab}\hlstd{(}\hlkwa{NULL}\hlstd{)} \hlopt{+} \hlkwd{geom_bar}\hlstd{()} \hlopt{+} \hlkwd{scale_x_discrete}\hlstd{(}\hlkwc{breaks}\hlstd{=}\hlkwa{NULL}\hlstd{)}
\hlstd{ggPie} \hlopt{+} \hlkwd{coord_polar}\hlstd{(}\hlkwc{theta} \hlstd{=} \hlstr{"y"}\hlstd{)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/Zyx-pie2}
\end{knitrout}
%
\subsection{Scatterplots and Stripcharts}
An elementary method to visualize data is the so--called scatterplot,
which simply plots two sets of data against each other or, if applied to a single
data set, plots each data point of the set according to its index. \\
A stripchart is similar to a scatterplot, but plots all data points of a single
data set horizontally or vertically. This is particularly useful in combination with a factor
that distinguishes members from different experimental conditions or patients groups.
\subsubsection*{Example: Visualizing Gene CCND3}
Many visualization methods will be illustrated by the Golub
microarray data on two sub types of leukemia. We shall concentrate on the expression values of gene
"CCND3 Cyclin D3", which are collected in row 1042 of the data matrix
\Rfunction{golub}. To plot the data values one can simply use \Rfunction{plot(golub[1042,])}. In
the resulting plot the vertical axis gives the size of the expression
values and the horizontal axis the index of the patients. This is a scatterplot
of a single data set. \\
It can be observed
that the values for patient 28 to 38 are somewhat lower, but, indeed, the
picture is not very clear because the groups are not plotted separately.
To produce two adjacent stripcharts one for the ALL and one for the
AML patients, we use a factor called \Rfunction{gol.fac} which
separates the classes of patients.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{plot}\hlstd{(golub[}\hlnum{1042}\hlstd{,])}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/plotCCND3}
\end{knitrout}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{gol.fac} \hlkwb{<-} \hlkwd{factor}\hlstd{(golub.cl,}\hlkwc{levels}\hlstd{=}\hlnum{0}\hlopt{:}\hlnum{1}\hlstd{,} \hlkwc{labels}\hlstd{=} \hlkwd{c}\hlstd{(}\hlstr{"ALL"}\hlstd{,}\hlstr{"AML"}\hlstd{))}
\hlkwd{stripchart}\hlstd{(golub[}\hlnum{1042}\hlstd{,]} \hlopt{~} \hlstd{gol.fac,} \hlkwc{method}\hlstd{=}\hlstr{"jitter"}\hlstd{,} \hlkwc{vertical} \hlstd{=} \hlnum{TRUE}\hlstd{)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/stripchartCCND3}
\end{knitrout}
From the resulting figure it can be observed that the CCND3
expression values of the ALL patients tend to have larger expression values
than those of the AML patients. The option \Robject{jitter} will ``smear''
the data points a little bit. In \Rpackage{ggplot2} these plots can be created
like this:
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{CCND3} \hlkwb{<-} \hlkwd{data.frame}\hlstd{(}\hlkwc{expCCND3} \hlstd{= golub[}\hlnum{1042}\hlstd{,],} \hlkwc{patients} \hlstd{= gol.fac)}
\hlstd{ggScatter} \hlkwb{=} \hlkwd{qplot}\hlstd{(}\hlkwd{seq_along}\hlstd{(expCCND3), expCCND3,} \hlkwc{data} \hlstd{= CCND3,} \hlkwc{geom} \hlstd{=}\hlstr{"point"}\hlstd{)}
\hlstd{ggScatter} \hlopt{+} \hlkwd{xlab}\hlstd{(}\hlstr{"Index"}\hlstd{)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot2scatter1}
\begin{kframe}\begin{alltt}
\hlstd{ggStrip} \hlkwb{=} \hlkwd{ggplot}\hlstd{(CCND3,} \hlkwd{aes}\hlstd{(}\hlkwc{x}\hlstd{=patients,} \hlkwc{y}\hlstd{=expCCND3))}
\hlstd{ggStrip} \hlopt{+} \hlkwd{geom_point}\hlstd{(}\hlkwc{position} \hlstd{=} \hlkwd{position_jitter}\hlstd{(}\hlkwc{w} \hlstd{=} \hlnum{.0}\hlstd{,} \hlkwc{h} \hlstd{=} \hlnum{.30}\hlstd{))}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot2scatter2}
\end{knitrout}
The ``jittering'' in \Rpackage{ggplot2} occurs after the plot has been created
and the data has been mapped to aesthetics that is why it is called a ``position
adjustment''. Note that the degree of jittering can be controlled
explicitly in \Rpackage{ggplot2}.
\subsection{Histograms}
Another method to visualize data is by dividing the range of data values into
a number of intervals and to plot the frequency per interval as a bar. Such
a plot is called a histogram.
\subsubsection*{Example: Histogram of CCND3}
A histogram of the expression values of gene CCND3
of the acute lymphoblastic leukemia patients can be produced as follows:
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{hist}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/histCCND3}
\end{knitrout}
The function \Rfunction{hist} divides the data into 5 intervals having width equal to
0.5. Observe that one value is small and the
other ones are more or less symmetrically distributed around the mean. The number
of bins can be adjusted by the \Rfunction{breaks} option, choosing
\Rfunction{prob = TRUE}, will give the relative frequency for each bin on
the $y$--axis instead of the absolute frequencies. \\
\Rpackage{ggplot2} uses binning as the default geometry if only x--values are
supplied, so creating a histogram is really easy. By default it uses the range of
the data divided by 30 as the binwidth. This can be easily adjusted. Here, we
produce a barplot given the absolute counts per bin and then a classical histogram
giving the relative frequencies.
In the code below, \Rfunction{(aes(y = ..density..)} means
that we map the y--coordinate to the relative frequency
estimate as returned by the \Rfunction{stat \textunderscore bin}
statistical transformation used by the ``geom\textunderscore histogram''
function.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{CCND3.ALL} \hlkwb{=} \hlkwd{data.frame}\hlstd{(}\hlkwc{CCND3.ALL} \hlstd{= golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlstd{ggplotAbsFreq} \hlkwb{=} \hlkwd{qplot}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{],} \hlkwc{binwidth} \hlstd{=} \hlnum{0.5}\hlstd{)}
\hlstd{ggplotHist} \hlkwb{=} \hlkwd{ggplot}\hlstd{(CCND3.ALL,} \hlkwd{aes}\hlstd{(}\hlkwc{x}\hlstd{=CCND3.ALL ))}
\hlstd{ggplotHist} \hlkwb{=} \hlstd{ggplotHist} \hlopt{+} \hlkwd{geom_histogram}\hlstd{(}\hlkwd{aes}\hlstd{(}\hlkwc{y} \hlstd{= ..density..),} \hlkwc{binwidth} \hlstd{=} \hlnum{0.5}\hlstd{)}
\hlstd{ggplotAbsFreq}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot2hist1}
\begin{kframe}\begin{alltt}
\hlstd{ggplotHist}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot2hist2}
\end{knitrout}
\subsection{Kernel density estimates}
Kernel density estimates are a method to turn the histogram
into a smooth density estimate. Given data
$x_1, \dotsc, x_n$ and a constant called "bandwidth" $h$,
the kernel density estimate is given by:
\begin{center}
\[
\hat f(x)= \frac{1}{n}\sum_{i=1}^{n} \frac{1}{h}K\left( \frac{x-x_i}{h} \right)
\]
\end{center}
Typical kernels are:
\begin{itemize}
\item Bisquare kernel: $K(u) = \frac{15}{16}(1-u^2)^2 \; \text{ for } u \in [-1,1] \; \text{and} \; 0 \text{ otherwise }$
\item Gau{\ss} kernel: $K(u) = \frac{1}{\sqrt{2\pi}} \exp\Biggl(-\frac{1}{2} u^2 \Biggr) \; \text{ for } \; u \in \mathbb{R}$
\end{itemize}
They readily computed using the \R \hspace{2pt} function \Rfunction{density} or the appropriate
ggplot statistical transformation, which uses this function.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{CCND3.ALL} \hlkwb{=} \hlkwd{data.frame}\hlstd{(}\hlkwc{CCND3.ALL} \hlstd{= golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlkwd{qplot}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{],} \hlkwc{geom} \hlstd{=} \hlstr{"density"}\hlstd{)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggplot2density}
\end{knitrout}
Since we do not have many data points, the kernel density estimates
gives a somewhat different impression than the histogram. Also note
that kernel density estimates do not respect the boundaries of the
data set by default.
\subsection{Boxplots}
It is always possible to sort $n$ data values to have increasing order $x_1 \leq
x_2 \leq \dots \leq x_n$ , where $x_1$ is the smallest, $x_2$ is the first--to--the
smallest, etc.
Let $x_{0.25}$ be a number for which it holds that 25\% of the data values
$x_1 , \dotsc, x_n$ are smaller. That is, 25\% of the data values
lay on the left side of the number $x_{0.25}$, the reason for which it is called the first quartile or the 25th percentile. \\
The second quartile is the value $x_{0.5}$ such that 50\% of the data values are
smaller. It is also called the median.
Similarly, the third quartile or 75th percentile is the value $x_{0.75}$ such
that 75\% of the data is smaller.\\
A popular method to display data is by drawing a box around the first and the
third quartile, a bold line segment for the median, and the
smaller line segments (whiskers) for the smallest and
the largest data values. Such a data display is known as a box--and--whisker
plot or simply boxplot.
\subsubsection*{Example: Boxplot of CCND3}
A view on the distribution of the expression values of the
ALL and the AML patients on gene CCND3 can be obtained by
constructing two separate boxplots adjacent to one another. To produce such
a plot the factor gol.fac is again very useful.
%
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{boxplot}\hlstd{(golub[}\hlnum{1042}\hlstd{,]} \hlopt{~} \hlstd{gol.fac)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/boxplotCCND3}
\end{knitrout}
%
In \Rpackage{ggplot2} a boxplot can be exactly as a stripchart,
we just have to change the geometry of the plot:
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{ggBox}\hlkwb{=} \hlkwd{ggplot}\hlstd{(CCND3,} \hlkwd{aes}\hlstd{(}\hlkwc{x}\hlstd{=patients,} \hlkwc{y}\hlstd{=expCCND3,} \hlkwc{color} \hlstd{= patients))}
\hlstd{ggBox} \hlopt{+} \hlkwd{geom_boxplot}\hlstd{()}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggBox}
\end{knitrout}
From the position of the boxes, it can be observed that the gene
expression values for ALL are larger than those for AML. Furthermore, since
the two sub-boxes around the median are more or less equally wide, the data
are quite symmetrically distributed around the median. The quantiles can
be computed with the function \Rfunction{quantile}. By default the four
``quartiles'' are returned but other quantiles can be obtained by specifying
the \Rfunction{probs} parameter
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{quantile}\hlstd{(golub[}\hlnum{1042}\hlstd{,])}
\end{alltt}
\begin{verbatim}
#> 0% 25% 50% 75% 100%
#> -0.743 1.043 1.813 2.049 2.766
\end{verbatim}
\begin{alltt}
\hlcom{## custom quantiles}
\hlkwd{quantile}\hlstd{(golub[}\hlnum{1042}\hlstd{,],} \hlkwc{probs} \hlstd{=} \hlkwd{c}\hlstd{(}\hlnum{0}\hlstd{,} \hlnum{0.1}\hlstd{,} \hlnum{0.5}\hlstd{))}
\end{alltt}
\begin{verbatim}
#> 0% 10% 50%
#> -0.743 0.484 1.813
\end{verbatim}
\end{kframe}
\end{knitrout}
Outliers are data values laying far apart from the pattern set by the
majority of the data values. The implementation in \Rfunction{R} of the (modified)
boxplot draws such outlier points separately as small circles. A data point
$x$ is defined as an outlier point if
\begin{gather*}
x < x_{0.25} - 1.5 \cdot (x_{0.75} - x_{0.25} )
\intertext{or}
x > x_{0.75} + 1.5 \cdot (x_{0.75} - x_{0.25} )
\end{gather*}
From the boxplot above it can be observed that there are outliers among the gene
expression values of ALL patients. These are the smaller values 0.45827 and
1.10546, and the largest value 2.76610. The AML expression values have one
outlier with value -0.74333.
To define extreme outliers, the factor 1.5 is raised to 3.0.
These numbers can be conveniently extracted from the
\Rfunction{boxplot.stats} function:
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{boxplot.stats}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac} \hlopt{==} \hlstr{"ALL"}\hlstd{])}
\end{alltt}
\begin{verbatim}
#> $stats
#> [1] 1.28 1.80 1.93 2.18 2.59
#>
#> $n
#> [1] 27
#>
#> $conf
#> [1] 1.81 2.04
#>
#> $out
#> [1] 2.766 1.105 0.458
\end{verbatim}
\begin{alltt}
\hlcom{### outliers are in the "out" element of the list }
\end{alltt}
\end{kframe}
\end{knitrout}
\subsection{Violin plots}
A violin plot is a combination of a boxplot and a kernel density estimate.
Specifically, instead of straight borders for the boxes, a kernel density estimate
is displayed.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{vioplot}\hlstd{(}\hlkwd{split}\hlstd{(golub[}\hlnum{1042}\hlstd{,], gol.fac)[[}\hlnum{1}\hlstd{]],} \hlkwd{split}\hlstd{(golub[}\hlnum{1042}\hlstd{,], gol.fac)[[}\hlnum{2}\hlstd{]],}
\hlkwc{names} \hlstd{=} \hlkwd{c}\hlstd{(}\hlstr{"ALL"}\hlstd{,} \hlstr{"AML"}\hlstd{))}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/Violin_plot_example1}
\begin{kframe}\begin{alltt}
\hlstd{ggBox} \hlopt{+} \hlkwd{geom_violin}\hlstd{()}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/Violin_plot_example2}
\end{knitrout}
\subsection{Empirical Cumulative Distribution Function}
Another way to visualize data is the empirical cumulative distribution function
(ecdf). It is an estimator for the actual cumulative distribution function,
which gives the probability of observing values less than a given value $t$.
Especially if you have a large number
of data points, it gives a useful visualization tool for your data.
For every number $t$ the ecdf $\hat F_n(t)$ is the fraction of data points
that are smaller than $t$.
\begin{gather*}
\hat F_n(t) = \frac{ \mbox{number of elements in the sample} \leq t}n
%\frac{1}{n} \sum_{i=1}^n \mathbf{1}\{x_i \le t\},
\end{gather*}
Obviously, just as the cdf the ecdf will always start 0 and end up at
1. You can easily read off quantiles from the ecdf, since the $y$--axis
represents the quantiles of the data.
\subsubsection*{Example: ECDF of CCND3}
From the plot of the ecdf of CCND3, it can be seen that the
median of the overall data is roughly 1.8. The steep increase of the ecdf for
values greater than the median shows that larger values are more more common
in the data set.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{plot}\hlstd{(}\hlkwd{ecdf}\hlstd{(golub[}\hlnum{1042}\hlstd{, ]) ,} \hlkwc{main} \hlstd{=} \hlstr{"ecdf of CCND3"}\hlstd{)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ecdfCCND3}
\end{knitrout}
In \Rpackage{ggplot2} there exists a specialized statistic which performs
the calculation necessary to obtain the ecdf. It also easy to split the
ecdf calculation by a factor, here we clearly see that for CCND3,
the expression for ALL patients is always higher than the expression for
AML patients.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{ggECDF}\hlkwb{=} \hlkwd{ggplot}\hlstd{(CCND3,} \hlkwd{aes}\hlstd{(}\hlkwc{x}\hlstd{=expCCND3,} \hlkwc{color} \hlstd{= patients))}
\hlstd{ggECDF} \hlopt{+} \hlkwd{stat_ecdf}\hlstd{(}\hlkwc{geom} \hlstd{=} \hlstr{"step"}\hlstd{,} \hlkwc{size} \hlstd{=} \hlnum{2}\hlstd{)} \hlopt{+} \hlkwd{ylab}\hlstd{(}\hlstr{"Frequency"}\hlstd{)}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggecdfCCND3}
\end{knitrout}
\subsection{Quantile--Quantile (QQ) Plot}
A method to compare the (e)cdf of a data set to another (e)cdf is
so--called quantile-quantile (Q--Q) plot. In such a plot, usually the quantiles of the
of the data set are displayed against the corresponding quantiles of
the normal distribution (bell--shaped). Hence, the empirical cdf of the
data are compared to the (corresponding) normal cdf. \\
A straight line is added representing points which
correspond exactly to the quantiles of a corresponding normal distribution.
By observing the extent in which the points appear on the line, it can be evaluated to
what degree the data are normally distributed. That is, the closer the
values appear to the line, the more likely it is that the data are
normally distributed. To produce a Q--Q plot of the ALL gene expression values
of CCND3 one may use the following code \\
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{qqnorm}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlkwd{qqline}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/QQCCND3}
\end{knitrout}
From the resulting figure it can be observed that most of the data points
are on or near the straight line, while a few others are further away.
This is an expected behavior for gene expression data. The above example
illustrates a case where the degree of non--normality
is moderate so that a clear conclusion cannot be drawn. \\
The QQ--plot can also be used to compare the distributions of
two data sets to one another by plotting their quantiles against each other.
In order to do this, you can call the function \Rfunction{qqplot}. A comparison
of the distributions / ecdfs of AML and ALL gene expression values is given by:
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{qqplot}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{], golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"AML"}\hlstd{])}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/QQCCND3vs}
\end{knitrout}
As we can see, the quantiles lie on a straight line, which indicates that
their general shape agrees. However, they might have different means and
standard deviations.
In \Rpackage{ggplot2} there exists a specialized statistic which performs
the calculation necessary to obtain the QQ--plot. The qnorm equivalent is
a bit harder to obtain, we have to extract the augmented data frame returned
by the \Rfunction{stat\textunderscore qq} function and add a linear smoother.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{ggQQ} \hlkwb{=} \hlkwd{ggplot}\hlstd{(CCND3.ALL,} \hlkwd{aes}\hlstd{(}\hlkwc{sample}\hlstd{=CCND3.ALL))} \hlopt{+} \hlkwd{stat_qq}\hlstd{()}
\hlcom{#ggQQ }
\hlstd{tp} \hlkwb{=} \hlkwd{cbind}\hlstd{(CCND3.ALL,} \hlkwd{as.data.frame}\hlstd{((}\hlkwd{print}\hlstd{(ggQQ)}\hlopt{$}\hlstd{data)[[}\hlnum{1}\hlstd{]][,} \hlkwd{c}\hlstd{(} \hlstr{"sample"}\hlstd{,} \hlstr{"theoretical"}\hlstd{)]))}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggQQplotCCND31}
\begin{kframe}\begin{alltt}
\hlstd{ggQQ} \hlopt{+} \hlkwd{stat_smooth}\hlstd{(}\hlkwc{mapping} \hlstd{=} \hlkwd{aes}\hlstd{(}\hlkwc{x} \hlstd{= theoretical,} \hlkwc{y} \hlstd{= sample),} \hlkwc{data} \hlstd{= tp,} \hlkwc{method} \hlstd{=} \hlstr{"lm"}\hlstd{,} \hlkwc{se} \hlstd{= F )}
\end{alltt}
\end{kframe}
\includegraphics[width=\maxwidth]{figure/ggQQplotCCND32}
\end{knitrout}
\section{Descriptive Statistics}
There exist various ways to describe the central tendency as well as the spread
of data. In particular, the central tendency can be described by the mean or
the median, and the spread by the variance, standard deviation, interquartile
range, or median absolute deviation. These will be defined and illustrated.
\subsection{Measures of Central Tendency}
The most important descriptive statistics for central tendency are the mean
and the median. The sample mean of the data values $x_1, \dotsc, x_n$ is defined
as:
\begin{gather*}
\overline x =\frac{1}{k} \sum_{i=1}^k x_{i}
= \frac{1}{n} \left( x_{1} + \dotso + x_{n}\right).
\end{gather*}
Thus the sample mean is simply the average of the $n$ data values. Since it
is the sum of all data values divided by the sample size, a few extreme data
values may largely influence its size. In other words, the mean is not robust
against outliers. \\
The median is defined as the second quartile or the 50th percentile, and
is denoted by $x_{0.50}$. When the data are symmetrically distributed around the
mean, then the mean and the median are equal. Since extreme data values
do not influence the size of the median, it is very robust against outliers.
Robustness is important in biological applications because data are frequently contaminated
by extreme or otherwise influential data values.
\subsubsection*{Example: Mean and Median of CCND3}
To compute the mean and median of the ALL expression
values of gene CCND3 Cyclin D3 consider the following.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{mean}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\end{alltt}
\begin{verbatim}
#> [1] 1.89
\end{verbatim}
\begin{alltt}
\hlkwd{median}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\end{alltt}
\begin{verbatim}
#> [1] 1.93
\end{verbatim}
\end{kframe}
\end{knitrout}
Note that the mean and the median do not differ much so that the distribution
seems to be quite symmetric.
\subsection{Measures of Spread}
The most important measures of spread are the standard deviation, the interquartile range,
and the median absolute deviation. The standard deviation
is the square root of the sample variance, which is defined as
\begin{gather*}
s^2 =\frac{1}{k - 1} \sum_{i=1}^k (x_{i} - \overline x)^{2}
= \frac{1}{n} \left( (x_{1} - \overline x)^{2} + \dotso + (x_{n} - \overline x)^{2}\right).
\end{gather*}
Hence, it is the average of the squared differences between the data values
and the sample mean. The sample standard deviation $s$ is the square root
of the sample variance and may be interpreted as the distance of the data
values to the mean. The variance and the standard deviation are not robust
against outliers. \\
The interquartile range is defined as the difference between the third and
the first quartile, that is $x_{0.75} - x_{0.25}$. It can be computed by the function
\Rfunction{IQR(x)}. More specifically, the value \Rfunction{IQR(x)/1.349}
is a robust estimator of
the standard deviation. \\
The median absolute deviation (MAD) is defined as
a constant times the median of the absolute deviations of the data from the
median. In \Rfunction{R} it is computed by the
function \Rfunction{mad} defined as the median of the sequence
$|x_1 - x_{0.5} |, \dotsc, |x_n - x_{0.5} |$
multiplied by the constant 1.4826. It equals the standard deviation in case
the data come from a bell--shaped (normal) distribution.
Because the interquartile range and the median absolute deviation are based
on quantiles, these are robust against outliers.
\subsubsection*{Example: Measures of Spread for CCND3} These measures of spread
for the ALL expression values of gene CCND3 can be computed as follows.
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlkwd{sd}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\end{alltt}
\begin{verbatim}
#> [1] 0.491
\end{verbatim}
\begin{alltt}
\hlkwd{IQR}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])} \hlopt{/} \hlnum{1.349}
\end{alltt}
\begin{verbatim}
#> [1] 0.284
\end{verbatim}
\begin{alltt}
\hlkwd{mad}\hlstd{(golub[}\hlnum{1042}\hlstd{, gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\end{alltt}
\begin{verbatim}
#> [1] 0.368
\end{verbatim}
\end{kframe}
\end{knitrout}
Due to the three outliers (cf. boxplot above) the standard deviation is larger
than the interquartile range and the mean absolute deviation. That is, the
absolute differences with respect to the median are somewhat smaller than
the root of the squared differences.
\subsubsection*{Exercise: Illustration of Mean and Standard Deviation}
\begin{enumerate}[label=(\emph{\alph*})]
\item Compute the mean and the standard deviation for 1, 1.5, 2, 2.5, 3.
\item Compute the mean and the standard deviation for 1, 1.5, 2, 2.5, 30.
\item Comment on the differences.
\end{enumerate}
\subsubsection*{Exercise: Plotting Gene Expressions of CCND3}
Use the gene expressions from "CCND3" of Golub collected in row
1042 of the object golub.
\begin{enumerate}[label=(\emph{\alph*})]
\item Produce a stripchart for the gene expressions separately
for the ALL as well as for the AML patients. Hint: Use a factor
for appropriate separation.
\item Rotate the plot to a vertical position and keep it that way for the
questions to come.
\item Color the ALL expressions red and AML blue. Hint: Use the \Robject{col}
parameter.
\item Add a title to the plot. Hint: Use \Robject{title}.
\item Change the boxes into stars. Hint: Use the \Robject{pch} parameter.
\end{enumerate}
\subsubsection*{Exercise: Plotting Gene Expressions using \CRANpkg{ggplot2}}
In this exercise we will plot the gene expressions for a couple
of genes in the golub data set in a single figure, using a separate
panel for each gene.
\begin{enumerate}[label=(\emph{\alph*})]
\item Turn the golub data set into a
data frame with genes in the columns. Add the
group descriptor \Robject{gol.fac} as a additional
column and turn it into a factor.
\item Select a random sample of 6 genes from the golub
data. HINT: Use the function \Rfunction{sample} to do this.
\item Melt the resulting data frame containing the selected genes
in such a way that all the gene expression values are in
a single column.
\item Use this data frame and \CRANpkg{ggplot2} to produce
a pdf file containing boxplots separated per patient group for each
of the randomly selected genes. Also add the raw data points
to the plot.
\end{enumerate}
\subsubsection*{Exercise: Comparing Normality for Two Genes}
Consider the gene expression values in row 790 and 66 of the Golub data.
\begin{enumerate}[label=(\emph{\alph*})]
\item Produce a boxplot for the expression values of the ALL patients
and comment on the differences. Are there outliers?
\item Produce a QQ-plot and formulate a hypothesis about the normality of the genes.
\item Compute the mean and the median for the expression values of
the ALL patients and compare these. Do this for both genes.
\end{enumerate}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Answers to Exercises}
\subsubsection*{Exercise: Simple ggplot usage }
\begin{enumerate}[label=(\emph{\alph*})]
\item Use points as a geometry instead of lines
\item Use both lines and points
\item Add errorbars \Robject{geom\textunderscore errorbar} to the plot.
This requires further aesthetics:
\Robject{ymax} and \Robject{ymin}.
The estimated error is stored in the variable \Robject{Sigma}.
\end{enumerate}
\subsubsection*{Solution: Simple ggplot usage }
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlcom{#a}
\hlcom{##########################################################}
\hlstd{plot_1} \hlkwb{<-} \hlkwd{ggplot}\hlstd{(}\hlkwd{aes}\hlstd{(} \hlkwc{x} \hlstd{= min,} \hlkwc{y} \hlstd{= Signal ),} \hlkwc{data} \hlstd{= proteins_pMek_sub)}
\hlstd{plot_1} \hlkwb{<-} \hlstd{plot_1} \hlopt{+} \hlkwd{geom_point}\hlstd{()}
\hlstd{plot_1} \hlkwb{<-} \hlstd{plot_1} \hlopt{+} \hlkwd{xlab}\hlstd{(}\hlstr{"Time [min]"}\hlstd{)} \hlopt{+} \hlkwd{ylab}\hlstd{(}\hlstr{"pMEK Signal"}\hlstd{)}
\hlcom{#b}
\hlcom{##########################################################}
\hlstd{plot_1} \hlkwb{<-} \hlstd{plot_1} \hlopt{+} \hlkwd{geom_line}\hlstd{()}
\hlcom{#c}
\hlcom{##########################################################}
\hlstd{plot_1} \hlkwb{<-} \hlstd{plot_1} \hlopt{+} \hlkwd{geom_errorbar}\hlstd{(}\hlkwd{aes}\hlstd{(}\hlkwc{ymax} \hlstd{= Signal}\hlopt{+}\hlnum{2}\hlopt{*}\hlstd{Sigma,}
\hlkwc{ymin} \hlstd{= Signal}\hlopt{-}\hlnum{2}\hlopt{*}\hlstd{Sigma))}
\end{alltt}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: ggplot faceting}
Using the data \Robject{proteins\textunderscore pMek\textunderscore sub},
to produce a plot split by the experimental condition factor using
\Rfunction{facet\textunderscore wrap()}.
\subsubsection*{Solution: ggplot faceting}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{(}\hlkwd{qplot}\hlstd{(min, Signal,} \hlkwc{data} \hlstd{= proteins_pMek,} \hlkwc{geom} \hlstd{=} \hlstr{"line"}\hlstd{,} \hlkwc{color} \hlstd{= Condition)}
\hlopt{+} \hlkwd{facet_wrap}\hlstd{(} \hlopt{~} \hlstd{Condition)} \hlopt{+} \hlkwd{geom_errorbar}\hlstd{(}\hlkwd{aes}\hlstd{(}\hlkwc{ymax} \hlstd{= Signal}\hlopt{+}\hlnum{2}\hlopt{*}\hlstd{Sigma,}
\hlkwc{ymin} \hlstd{= Signal}\hlopt{-}\hlnum{2}\hlopt{*}\hlstd{Sigma)))}
\end{alltt}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: complex ggplot example}
Use the data \Robject{proteins}, to produce
a plot of the time courses split by the experimental
target and colored according to the experimental
conditions. Add error bars to your plot.
\subsubsection*{Solution: complex ggplot example}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{(}\hlkwd{qplot}\hlstd{(min, Signal,} \hlkwc{data} \hlstd{= proteins,} \hlkwc{geom} \hlstd{=} \hlstr{"line"}\hlstd{,} \hlkwc{color} \hlstd{= Condition)}
\hlopt{+} \hlkwd{facet_wrap}\hlstd{(} \hlopt{~} \hlstd{Target,} \hlkwc{ncol} \hlstd{=} \hlnum{2}\hlstd{)} \hlopt{+} \hlkwd{geom_errorbar}\hlstd{(}\hlkwd{aes}\hlstd{(}\hlkwc{ymax} \hlstd{= Signal}\hlopt{+}\hlnum{2}\hlopt{*}\hlstd{Sigma,}
\hlkwc{ymin} \hlstd{= Signal}\hlopt{-}\hlnum{2}\hlopt{*}\hlstd{Sigma)))}
\end{alltt}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: Illustration of Mean and Standard Deviation}
\begin{enumerate}[label=(\emph{\alph*})]
\item Compute the mean and the standard deviation for 1, 1.5, 2, 2.5, 3.
\item Compute the mean and the standard deviation for 1, 1.5, 2, 2.5, 30.
\item Comment on the differences.
\end{enumerate}
\subsubsection*{Solution: Illustration of Mean and Standard Deviation}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlcom{#Use x<- c(1,1.5,2,2.5,3) and mean(x) and sd(x) to obtain}
\hlcom{#that the mean is 2 and the standard deviation is 0.79}
(b) Now the mean is 7.4 and the standard deviation dramatically increased
(c) The outlier increased the mean as well as the standard deviation.
\end{alltt}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: Plotting Gene Expressions of CCND3}
Use the gene expressions from "CCND3" of Golub collected in row
1042 of the object golub.
\begin{enumerate}[label=(\emph{\alph*})]
\item Produce a so-called stripchart for the gene expressions separately
for the ALL as well as for the AML patients. Hint: Use a factor
for appropriate separation.
\item Rotate the plot to a vertical position and keep it that way for the
questions to come.
\item Color the ALL expressions red and AML blue. Hint: Use the \Robject{tcol}
parameter.
\item Add a title to the plot. Hint: Use \Robject{title}.
\item Change the boxes into stars. Hint: Use the \Robject{pch} parameter.
\end{enumerate}
\subsubsection*{Solution: Plotting Gene Expressions of CCND3}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlstd{gol.fac} \hlkwb{<-} \hlkwd{factor}\hlstd{(golub.cl,}\hlkwc{levels}\hlstd{=}\hlnum{0}\hlopt{:}\hlnum{1}\hlstd{,} \hlkwc{labels}\hlstd{=} \hlkwd{c}\hlstd{(}\hlstr{"ALL"}\hlstd{,}\hlstr{"AML"}\hlstd{))}
\hlkwd{stripchart}\hlstd{(golub[}\hlnum{1042}\hlstd{,]} \hlopt{~} \hlstd{gol.fac,}\hlkwc{method}\hlstd{=}\hlstr{"jitter"}\hlstd{)}
\hlkwd{stripchart}\hlstd{(golub[}\hlnum{1042}\hlstd{,]} \hlopt{~} \hlstd{gol.fac,}\hlkwc{method}\hlstd{=}\hlstr{"jitter"}\hlstd{,}\hlkwc{vertical} \hlstd{=} \hlnum{TRUE}\hlstd{)}
\hlkwd{stripchart}\hlstd{(golub[}\hlnum{1042}\hlstd{,]} \hlopt{~} \hlstd{gol.fac,}\hlkwc{method}\hlstd{=}\hlstr{"jitter"}\hlstd{,}\hlkwc{col}\hlstd{=}\hlkwd{c}\hlstd{(}\hlstr{"red"}\hlstd{,} \hlstr{"blue"}\hlstd{),}
\hlkwc{vertical} \hlstd{=} \hlnum{TRUE}\hlstd{)}
\hlkwd{stripchart}\hlstd{(golub[}\hlnum{1042}\hlstd{,]} \hlopt{~} \hlstd{gol.fac,}\hlkwc{method}\hlstd{=}\hlstr{"jitter"}\hlstd{,}\hlkwc{col}\hlstd{=}\hlkwd{c}\hlstd{(}\hlstr{"red"}\hlstd{,} \hlstr{"blue"}\hlstd{),} \hlkwc{pch}\hlstd{=}\hlstr{"*"}
\hlstd{,}\hlkwc{vertical} \hlstd{=} \hlnum{TRUE}\hlstd{)}
\hlkwd{title}\hlstd{(}\hlstr{"CCND3 Cyclin D3 expression value for ALL and AMl patients"}\hlstd{)}
\end{alltt}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: Plotting Gene Expressions using \CRANpkg{ggplot2}}
In this exercise we will plot the gene expressions for a couple
of genes in the golub data set in a single figure, using a separate
panel for each gene.
\begin{enumerate}[label=(\emph{\alph*})]
\item Turn the golub data set into a
data frame with genes in the columns. Add the
group descriptor \Robject{gol.fac} as a additional
column and turn it into a factor.
\item Select a random sample of 6 genes from the golub
data. HINT: Use the function \Rfunction{sample} to do this.
\item Melt the resulting data frame containing the selected genes
in such a way that all the gene expression values are in
a single column.
\item Use this data frame and \CRANpkg{ggplot2} to produce
a pdf file containing boxplots separated per patient group for each
of the randomly selected genes. Also add the raw data points
to the plot.
\end{enumerate}
\subsubsection*{Solution: Plotting Gene Expressions using \CRANpkg{ggplot2}}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlcom{#a}
\hlcom{###############################################################}
\hlstd{golub.df} \hlkwb{=} \hlkwd{as.data.frame}\hlstd{(}\hlkwd{cbind}\hlstd{(}\hlkwd{t}\hlstd{(golub), gol.fac))}
\hlstd{golub.df}\hlopt{$}\hlstd{gol.fac} \hlkwb{=} \hlkwd{as.factor}\hlstd{(golub.df}\hlopt{$}\hlstd{gol.fac)}
\hlcom{#b}
\hlcom{###############################################################}
\hlstd{rand.sample} \hlkwb{<-} \hlkwd{c}\hlstd{(}\hlkwd{sample}\hlstd{(}\hlkwd{dim}\hlstd{(golub)[}\hlnum{1}\hlstd{],}\hlnum{6}\hlstd{),}\hlnum{3052}\hlstd{)}
\hlcom{#c}
\hlcom{###############################################################}
\hlstd{dataForPlot} \hlkwb{=} \hlkwd{melt}\hlstd{(golub.df[, rand.sample],}
\hlkwc{id} \hlstd{=} \hlstr{'gol.fac'}\hlstd{)}
\hlcom{#e}
\hlcom{###############################################################}
\hlkwd{pdf}\hlstd{(}\hlstr{"boxplots.pdf"}\hlstd{,} \hlkwc{width} \hlstd{=} \hlnum{14}\hlstd{,} \hlkwc{height} \hlstd{=} \hlnum{10}\hlstd{)}
\hlstd{p} \hlkwb{<-} \hlkwd{qplot}\hlstd{(gol.fac, value,} \hlkwc{data} \hlstd{= dataForPlot)} \hlopt{+}
\hlkwd{geom_boxplot}\hlstd{(}\hlkwd{aes}\hlstd{(}\hlkwc{fill} \hlstd{= gol.fac))} \hlopt{+}
\hlkwd{facet_wrap}\hlstd{(}\hlopt{~} \hlstd{variable)} \hlopt{+}
\hlkwd{geom_point}\hlstd{(}\hlkwc{colour} \hlstd{=} \hlstr{'black'}\hlstd{,} \hlkwc{alpha} \hlstd{=} \hlnum{0.5}\hlstd{)}
\hlstd{p}
\hlkwd{dev.off}\hlstd{()}
\end{alltt}
\end{kframe}
\end{knitrout}
\subsubsection*{Exercise: Comparing Normality for Two Genes}
Consider the gene expression values in row 790 and 66 of the Golub data.
\begin{enumerate}[label=(\emph{\alph*})]
\item Produce a boxplot for the expression values of the ALL patients
and comment on the differences. Are there outliers?
\item Produce a QQ-plot and formulate a hypothesis about the normality of the genes.
\item Compute the mean and the median for the expression values of
the ALL patients and compare these. Do this for both genes.
\end{enumerate}
\subsubsection*{Solution: Comparing Normality for Two Genes}
\begin{knitrout}
\definecolor{shadecolor}{rgb}{0.969, 0.969, 0.969}\color{fgcolor}\begin{kframe}
\begin{alltt}
\hlcom{#Comparing two genes}
\hlcom{#(a) Use }
\hlkwd{boxplot}\hlstd{(golub[}\hlnum{66}\hlstd{,]}\hlopt{~}\hlstd{gol.fac)}
\hlkwd{dev.new}\hlstd{()}
\hlkwd{boxplot}\hlstd{(golub[}\hlnum{790}\hlstd{,]}\hlopt{~}\hlstd{gol.fac)}
\hlcom{#to observe that 790 has three}
\hlcom{#outliers and 66 has no outlier.}
\hlcom{# (dev.new() opens a new graphical window)}
\hlcom{#(b) Use }
\hlkwd{qqnorm}\hlstd{(golub[}\hlnum{66}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlkwd{qqline}\hlstd{(golub[}\hlnum{66}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlkwd{dev.new}\hlstd{()}
\hlkwd{qqnorm}\hlstd{(golub[}\hlnum{790}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlkwd{qqline}\hlstd{(golub[}\hlnum{790}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlcom{#to observe that nearly all values of 66 are on the line, where as for}
\hlcom{#790 the three outliers are way of the normality line. Hypothesis:}
\hlcom{#The expression values of 66 are normally distributed, but those of}
\hlcom{#row 790 are not.}
\hlcom{#(c) Use }
\hlkwd{mean}\hlstd{(golub[}\hlnum{66}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlkwd{median}\hlstd{(golub[}\hlnum{790}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlcom{#and# }
\hlkwd{mean}\hlstd{(golub[}\hlnum{790}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlkwd{median}\hlstd{(golub[}\hlnum{790}\hlstd{,gol.fac}\hlopt{==}\hlstr{"ALL"}\hlstd{])}
\hlcom{#The mean (-1.174024) is larger than the median (-1.28137) due to}
\hlcom{#outliers on the right hand side. For the gene in row 66 the mean is}
\hlcom{#1.182503 and the median 1.23023. The differences are smaller.}
\end{alltt}
\end{kframe}
\end{knitrout}
\end{document}
|
{"hexsha": "5c2d3b3a83a06bb749fdfbf38388816ad04cb320", "size": 66724, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "R/Visualization/Visualization in R/Tutorial/EDA-lab.tex", "max_stars_repo_name": "rolandkrause/isb101", "max_stars_repo_head_hexsha": "40be2fa62a6ca986e4ed9f1833382b2c10478039", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/Visualization/Visualization in R/Tutorial/EDA-lab.tex", "max_issues_repo_name": "rolandkrause/isb101", "max_issues_repo_head_hexsha": "40be2fa62a6ca986e4ed9f1833382b2c10478039", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/Visualization/Visualization in R/Tutorial/EDA-lab.tex", "max_forks_repo_name": "rolandkrause/isb101", "max_forks_repo_head_hexsha": "40be2fa62a6ca986e4ed9f1833382b2c10478039", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7285803627, "max_line_length": 269, "alphanum_fraction": 0.7166836521, "num_tokens": 23240}
|
import unittest
import numpy as np
from spartan import expr
from spartan.util import Assert
from spartan import util
import test_common
TEST_SIZE = 50
class TestReduce(test_common.ClusterTest):
def test_sum_3d(self):
x = expr.arange((TEST_SIZE, TEST_SIZE, TEST_SIZE), dtype=np.int64)
nx = np.arange(TEST_SIZE * TEST_SIZE * TEST_SIZE, dtype=np.int64).reshape((TEST_SIZE, TEST_SIZE, TEST_SIZE))
for axis in [None, 0, 1, 2]:
y = x.sum(axis)
val = y.glom()
Assert.all_eq(val, nx.sum(axis))
def test_sum_2d(self):
x = expr.arange((TEST_SIZE, TEST_SIZE), dtype=np.int)
nx = np.arange(TEST_SIZE * TEST_SIZE, dtype=np.int).reshape((TEST_SIZE, TEST_SIZE))
for axis in [None, 0, 1]:
y = x.sum(axis)
val = y.glom()
Assert.all_eq(val, nx.sum(axis))
def test_sum_1d(self):
x = expr.arange((TEST_SIZE,), dtype=np.int)
nx = np.arange(TEST_SIZE, dtype=np.int)
y = x.sum()
val = y.glom()
Assert.all_eq(val, nx.sum())
def test_argmin_1d(self):
x = expr.arange((TEST_SIZE,), dtype=np.int)
nx = np.arange(TEST_SIZE, dtype=np.int)
y = x.argmin()
val = y.glom()
Assert.all_eq(val, nx.argmin())
def test_argmin_2d(self):
for axis in [1]: #[None, 0, 1]:
x = expr.arange((TEST_SIZE, TEST_SIZE), dtype=np.int)
nx = np.arange(TEST_SIZE * TEST_SIZE, dtype=np.int).reshape((TEST_SIZE, TEST_SIZE))
y = x.argmin(axis=axis)
val = expr.glom(y)
Assert.all_eq(val, nx.argmin(axis=axis))
def test_argmin_3d(self):
x = expr.arange((TEST_SIZE, TEST_SIZE, TEST_SIZE), dtype=np.int64)
nx = np.arange(TEST_SIZE * TEST_SIZE * TEST_SIZE, dtype=np.int64).reshape((TEST_SIZE, TEST_SIZE, TEST_SIZE))
for axis in [None, 0, 1, 2]:
y = x.argmin(axis)
val = y.glom()
Assert.all_eq(val, nx.argmin(axis))
def test_argmax_1d(self):
x = expr.arange((TEST_SIZE,), dtype=np.int)
nx = np.arange(TEST_SIZE, dtype=np.int)
y = x.argmax()
val = y.glom()
Assert.all_eq(val, nx.argmax())
def test_argmax_2d(self):
for axis in [1]: #[None, 0, 1]:
x = expr.arange((TEST_SIZE, TEST_SIZE), dtype=np.int)
nx = np.arange(TEST_SIZE * TEST_SIZE, dtype=np.int).reshape((TEST_SIZE, TEST_SIZE))
y = x.argmax(axis=axis)
val = expr.glom(y)
Assert.all_eq(val, nx.argmax(axis=axis))
def test_argmax_3d(self):
x = expr.arange((TEST_SIZE, TEST_SIZE, TEST_SIZE), dtype=np.int64)
nx = np.arange(TEST_SIZE * TEST_SIZE * TEST_SIZE, dtype=np.int64).reshape((TEST_SIZE, TEST_SIZE, TEST_SIZE))
for axis in [None, 0, 1, 2]:
y = x.argmax(axis)
val = y.glom()
Assert.all_eq(val, nx.argmax(axis))
def test_simple_sum(self):
def _(axis):
util.log_info('Testing sum over axis %s', axis)
a = expr.ones((TEST_SIZE, TEST_SIZE)) + expr.ones((TEST_SIZE, TEST_SIZE))
b = a.sum(axis=axis)
Assert.all_eq(b.glom(), 2 * np.ones((TEST_SIZE, TEST_SIZE)).sum(axis))
_(axis=0)
_(axis=1)
_(axis=None)
def test_count_nonzero(self):
x = expr.ones((TEST_SIZE,))
Assert.eq(expr.count_nonzero(x).glom(), TEST_SIZE)
x = expr.zeros((TEST_SIZE,))
Assert.eq(expr.count_nonzero(x).glom(), 0)
def test_count_zero(self):
x = expr.ones((TEST_SIZE,))
Assert.eq(expr.count_zero(x).glom(), 0)
x = expr.zeros((TEST_SIZE,))
Assert.eq(expr.count_zero(x).glom(), TEST_SIZE)
if __name__ == '__main__':
# x = TestReduce(methodName='test_simple_sum')
# x.setUpClass()
# for i in range(100):
# x.setUp()
# x.test_simple_sum()
unittest.main()
|
{"hexsha": "60c539ec18f729b6747b16a427c8253dcc2c2d4b", "size": 3631, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_reduce.py", "max_stars_repo_name": "GabrielWen/spartan", "max_stars_repo_head_hexsha": "ce3bf7f2bb551d7f996a1884acef819b620cc854", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 156, "max_stars_repo_stars_event_min_datetime": "2015-01-10T21:54:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-17T14:13:57.000Z", "max_issues_repo_path": "tests/test_reduce.py", "max_issues_repo_name": "GabrielWen/spartan", "max_issues_repo_head_hexsha": "ce3bf7f2bb551d7f996a1884acef819b620cc854", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2015-01-05T16:34:18.000Z", "max_issues_repo_issues_event_max_datetime": "2015-12-11T08:12:28.000Z", "max_forks_repo_path": "tests/test_reduce.py", "max_forks_repo_name": "GabrielWen/spartan", "max_forks_repo_head_hexsha": "ce3bf7f2bb551d7f996a1884acef819b620cc854", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2015-01-10T21:55:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-14T08:09:34.000Z", "avg_line_length": 31.3017241379, "max_line_length": 112, "alphanum_fraction": 0.6317818783, "include": true, "reason": "import numpy", "num_tokens": 1069}
|
using myJuliaUtils
using Test
using LinearAlgebra
using DelimitedFiles
using Statistics
# Testing both lag functions
@testset "testing lag functions" begin
# if vector
T = 100
P = rand(1:T - 1)
a = collect(1.0:1.0:T)
b = lag0(a, P)
@test b[1:P] == zeros(P)
@test T - P == b[end]
# if matrix
a = reshape(a, T, 1)
P = round(Int, 1 + rand() * T - 1)
b = lag0(a, P)
@test b[1:P,1] == zeros(P)
@test T - P == b[end,1]
a = [ones(T, 1) a]
P = round(Int, 1 + rand() * T - 1)
b = lag0(a, P)
@test b[1:P,2] == zeros(P)
@test T - P == b[end,2]
end
@testset "testing function preparexy()" begin
T = 100
P = rand(1:T - 1)
a = randn(T)
cnst = rand() > 0.5
Y, X = preparexy(a, P, cnst)
# for univariate
i = rand(1:T - P)
z = reverse(a[i:i + P - 1])
if cnst
push!(z, 1.)
end
@test X[i,:] == z
@test Y[i] == a[i + P]
# for multivariate
N = rand(1:20)
a = randn(T, N)
Y, X = preparexy(a, P, cnst)
nT, nX = size(X)
i = rand(1:T - P)
z = Vector(vec(reverse(a[i:i + P - 1,:], dims = 1)'))
if cnst
push!(z, 1.)
end
@test nT == T - P
@test nX == P * N + cnst
@test X[i,:] == z
@test Y[i,:] == a[i + P,:]
end
# testing companion form function
@testset "testing companion form functions" begin
betta = [2 3;4 5]
n = 2
p = 1
c = false
@test betta == companionf(betta, n, p, c)
bettaC = [betta ones(n, 1)]
c = true
@test betta == companionf(bettaC, n, p, c)
c = false
p = 2
betta = [2 3 4 5;6 7 8 9]
betta_comp = [betta;1 0 0 0; 0 1 0 0]
@test betta_comp == companionf(betta, n, p, c)
bettaC = [betta ones(n, 1)]
c = true
@test betta_comp == companionf(bettaC, n, p, c)
betta = [1.]
n = 1
p = 1
c = false
@test reshape(betta, 1, 1) == companionf(betta, n, p, c)
n = 2
p = 1
c = false
@test_throws ArgumentError companionf(betta, n, p, c)
n = 1
p = 2
c = false
@test_throws ArgumentError companionf(betta, n, p, c)
n = 1
p = 1
c = true
@test_throws ArgumentError companionf(betta, n, p, c)
end
# Testing stability function
@testset "testing stability functions" begin
N = round(Int, 1 + rand() * 100)
# A = randn(N, N)
# A = A'A
A = genPDmat(N)
S, Q = eigen(A)
S[end] = 1.0
A = Q * Diagonal(S) * Q'
@test stabcheck(A, N, 1, false)
@test stabcheckC(A, N, 1, false)
a = findall(x->x >= 1.0, S)
b = length(a)
S[a] = ones(b) - rand(b)
A = Q * Diagonal(S) * Q'
@test stabcheck(A, N, 1, false) == false
@test stabcheckC(A, N, 1, false) == false
A = [
0.516090886728283 0.0724233649560897 0.0816329971852510 0.143587089009963 -0.00458107252041236
0.0724233649560897 0.844836851975328 0.176990845416178 -0.0777482352998716 -0.0921908525982972
0.0816329971852510 0.176990845416178 0.671194897897464 -0.00975809606339381 -0.0331333053292128
0.143587089009963 -0.0777482352998717 -0.00975809606339381 0.797277510951764 -0.218043826201017
-0.00458107252041236 -0.0921908525982972 -0.0331333053292128 -0.218043826201017 0.554361340591299
];
@test stabcheck(A, size(A, 1), 1, false)
@test stabcheckC(A, size(A, 1), 1, false)
A = [
0.500514390972542 -0.0693316507737557 0.103250184760922 0.157734480886660 0.0498798880488583
-0.0693316507737557 0.586863186538567 0.00546111896077394 -0.0714652123701029 0.153497763972517
0.103250184760922 0.00546111896077393 0.487346580847464 -0.0744321935787715 -0.0521727141915581
0.157734480886660 -0.0714652123701029 -0.0744321935787715 0.566439153014101 0.0799567015086364
0.0498798880488583 0.153497763972517 -0.0521727141915581 0.0799567015086364 0.603872518537494
];
@test stabcheck(A, size(A, 1), 1, false) == false
@test stabcheckC(A, size(A, 1), 1, false) == false
# AR(2) stability check
Nvar = 1;
LAGS = 2;
c = false;
reps = 50000
BETTAt = ones(LAGS, reps);
for i = 1:reps
chk = 1
while chk == true
global BETTAt[:,i] = randn(LAGS, 1)
chk = stabcheck(BETTAt[:,i]', Nvar, LAGS, c)
end
end
@test all([BETTAt[2,:] .< BETTAt[1,:] .+ 1. BETTAt[2,:] .< -BETTAt[1,:] .+ 1. BETTAt[2,:] .> -1. ])
# some other small tests
@test stabcheck(1., 1, 1, false) == true
@test stabcheck([1.], 1, 1, false) == true
@test stabcheck(ones(1, 1), 1, 1, false) == true
@test stabcheck([1. randn()], 1, 1, true) == true
@test stabcheckC(1., 1, 1, false) == true
@test stabcheckC([1.], 1, 1, false) == true
@test stabcheckC([1. randn()], 1, 1, true) == true
@test stabcheck(1, 1, 1, false) == true
@test stabcheck([1 randn()], 1, 1, true) == true
@test stabcheckC(1, 1, 1, false) == true
@test stabcheckC([1 randn()], 1, 1, true) == true
end
# test for normal pdf
x = [-2.,-1.,0.,1.,2.]
y1 = [0.0539909665131881, 0.241970724519143, 0.398942280401433, 0.241970724519143, 0.0539909665131881]
mu = 2.
sigma = 1.
y2 = [0.000133830225764885, 0.00443184841193801, 0.0539909665131881, 0.241970724519143, 0.398942280401433]
@testset "testing normal pdf function" begin
@test normpdf.(x) ≈ y1
@test normpdf.(x, mu, sigma) ≈ y2
end
# testing ols function
path = dirname(@__FILE__)
# path = "/Users/zymantas/TVPVARPkg/test/"
data = readdlm("$path/TestOls/data2test.txt", ',');
B2check = readdlm("$path/TestOls/B2check.txt", ',');
stats2check = readdlm("$path/TestOls/stats2check.txt", ',');
covb2check = readdlm("$path/TestOls/covb2check.txt", ',');
y = data[:,1]
x = [ones(length(y)) data[:,2:end]]
r = ols1(y, x)
@testset "testing OLS function" begin
@test r.bhat ≈ B2check
@test r.sigbhat ≈ covb2check
@test r.R2[1] ≈ stats2check[1]
@test r.sig2hat[1] ≈ stats2check[2]
end
# testing quantileArr function
a = randn(rand(1:20), rand(1:20), rand(1:20))
@testset "testing quantileArr function" begin
@test quantileArr(a, [0.5], 1) ≈ median(a, dims = 1)
@test quantileArr(a, [0.5], 2) ≈ median(a, dims = 2)
@test quantileArr(a, [0.5], 3) ≈ median(a, dims = 3)
end
@testset "testing nan mean" begin
@test nanmean([1., 2., NaN]) == 1.5
@test nanmean([1. 2.; NaN 3.]) == 2.0
@test nanmean([1. 2.; NaN 3.], 2) ≈ [1.5; 3. ]
@test nanmean([1. 2.; NaN 3.], 1) ≈ [1.0 2.5 ]
a = [1. 2.; NaN 3.]
a = cat(a, [1. NaN; 7. 3.], dims = 3)
@test nanmean(a, 3) ≈ [1. 2.; 7. 3.]
a = randn(4, 3, 5)
@test mean(a;dims = 1) ≈ nanmean(a, 1)
@test mean(a;dims = 2) ≈ nanmean(a, 2)
@test mean(a;dims = 3) ≈ nanmean(a, 3)
end
@testset "testing inbetween" begin
b = randn(2, 2, 2)
b[:,:,1] = [1 2; 3 4]
b[:,:,2] = b[:,:,1] .+ 2
a = [3 5; 9 5]
@test inbetween(a, b) == [true false; false true]
T = rand(1:1000)
n = rand(1:1000)
a = randn(T, n)
b = randn(T, n, 2)
int = abs(randn())
b[:,:,2] = b[:,:,1] .+ int
a = b[:,:,1] .+ int ./ 2
@test all(inbetween(a, b))
end
@testset "testing sumsqr" begin
a = randn(rand(1:100))
@test sumsqr(a) ≈ a'a
@test sumsqr(a) ≈ sum(a.^2)
a = randn(rand(1:100), rand(1:100))
@test sumsqr(a) ≈ sum(a.^2)
end
@testset "testing getmultdiag!" begin
n = rand(1:100)
A = rand() .+ randn(n, n)
B = rand() .+ randn(n, n)
v = similar(A, n)
getmultdiag!(v, A, B)
@test v ≈ diag(A * B)
end
@testset "testing transf1" begin
a = transf1To(0.2, 0., 1.)
@test 0.2 ≈ transf1Back(a, 0., 1.)
n = rand(1:100)
a = randn(n)
aup = a .+ abs.(randn(n))
adown = a .- abs.(randn(n))
b = transf1To.(a, adown, aup)
@test a ≈ transf1Back.(b, adown, aup)
end
@testset "testing regMat2PD!" begin
for i = 1:100
a = genPDmat()
b = copy(a)
regMat2PD!(a)
@test a ≈ b
@test isapprox(a,b; atol=1e-10)
end
end
|
{"hexsha": "0b124edb4b9430fed61be8c433a1262dc8f2ee37", "size": 7920, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/TestingExtfunc.jl", "max_stars_repo_name": "zymbuzz/myJuliaUtils", "max_stars_repo_head_hexsha": "bd18eff600ca8a5052575706dc3a4b494128c4a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/TestingExtfunc.jl", "max_issues_repo_name": "zymbuzz/myJuliaUtils", "max_issues_repo_head_hexsha": "bd18eff600ca8a5052575706dc3a4b494128c4a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/TestingExtfunc.jl", "max_forks_repo_name": "zymbuzz/myJuliaUtils", "max_forks_repo_head_hexsha": "bd18eff600ca8a5052575706dc3a4b494128c4a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7980456026, "max_line_length": 106, "alphanum_fraction": 0.5723484848, "num_tokens": 3299}
|
import numpy as np
import matplotlib.pyplot as plt
import random
import seaborn as sns
sns.set_style("white")
from scipy.stats import norm
import time
from math import sqrt, log, exp, pi
from random import uniform
size = 500
set1 = np.random.normal(loc = 1, scale = 0.1, size = size)
set2 = np.random.normal(loc = 1.5, scale = 0.1, size = size)
set3 = np.random.normal(loc = 2, scale = 0.2, size = size)
p1 = 0.25
p2 = 0.5
p3 = 0.25
dset = np.array(random.sample(list(set1), int(p1*size)) + random.sample(list(set2), int(p2*size)) + random.sample(list(set3), int(p3*size)))
set4 = np.random.normal(loc = 1, scale = 0.3, size = size)
set5 = np.random.normal(loc = 1.5, scale = 0.4, size = size)
set6 = np.random.normal(loc = 2, scale = 0.3, size = size)
dset2 = np.array(random.sample(list(set4), int(p1*size)) + random.sample(list(set5), int(p2*size)) + random.sample(list(set6), int(p3*size)))
set4.sort()
plt.plot(set4, norm.pdf(set4,1,0.3))
set5.sort()
plt.plot(set5, norm.pdf(set5,1.5,0.4))
set6.sort()
plt.plot(set6, norm.pdf(set6,2,0.3))
plt.show()
sns.distplot(dset)
plt.show()
data = dset2
class Gaussian:
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def pdf(self, datum):
u = (datum - self.mu) / abs(self.sigma)
y = (1 / (sqrt(2 * pi) * abs(self.sigma))) * exp(-u * u / 2)
return y
def __repr__(self):
return 'Gaussian({0:4.6}, {1:4.6})'.format(self.mu, self.sigma)
best_single = Gaussian(np.mean(data), np.std(data))
x = np.linspace(-6, 8, 200)
g_single = stats.norm(best_single.mu, best_single.sigma).pdf(x)
class GaussianMixture:
def __init__(self, data, mu_min=min(data), mu_max=max(data), sigma_min=.1, sigma_max=1, mix1=.25, mix2=.5):
self.data = data
self.one = Gaussian(uniform(mu_min, mu_max),
uniform(sigma_min, sigma_max))
self.two = Gaussian(uniform(mu_min, mu_max),
uniform(sigma_min, sigma_max))
self.three = Gaussian(uniform(mu_min, mu_max),
uniform(sigma_min, sigma_max))
self.mix1 = mix1
self.mix2 = mix2 #mix3 can be calculated as 1-(mix1+mix2)
#calculates Expectation
def Estep(self):
self.loglike = 0.
for datum in self.data:
wp1 = self.one.pdf(datum) * (self.mix1)
wp2 = self.two.pdf(datum) * (self.mix2)
wp3 = self.three.pdf(datum) * (1 - self.mix1 - self.mix2)
den = wp1 + wp2 + wp3
wp1 /= den
wp2 /= den
wp3 /= den
self.loglike += log(wp1 + wp2 + wp3)
yield (wp1, wp2, wp3)
#performs Maximization
def Mstep(self, weights):
(left, mid, rigt) = zip(*weights)
one_den = sum(left)
two_den = sum(mid)
three_den = sum(rigt)
self.one.mu = sum(w * d / one_den for (w, d) in zip(left, data))
self.two.mu = sum(w * d / two_den for (w, d) in zip(mid, data))
self.three.mu = sum(w * d / three_den for (w, d) in zip(rigt, data))
self.one.sigma = sqrt(sum(w * ((d - self.one.mu) ** 2)
for (w, d) in zip(left, data)) / one_den)
self.two.sigma = sqrt(sum(w * ((d - self.two.mu) ** 2)
for (w, d) in zip(mid, data)) / two_den)
self.three.sigma = sqrt(sum(w * ((d - self.three.mu) ** 2)
for (w, d) in zip(rigt, data)) / three_den)
self.mix1 = one_den / len(data)
self.mix2 = two_den / len(data)
def iterate(self, N=1, verbose=False):
mix.Mstep(mix.Estep())
def pdf(self, x):
return (self.mix1) * self.one.pdf(x) + (self.mix2) * self.two.pdf(x) + (1 - self.mix1 - self.mix2) * self.three.pdf(x)
def __repr__(self):
return 'GaussianMixture({0}, {1}, {2}, {3}, {4}, {5})'.format(self.one, self.two, self.three, self.mix1, self.mix2, 1 - self.mix1 - self.mix2)
def __str__(self):
return 'Mixture: {0}, {1}, {2}, {3}, {4}, {5})'.format(self.one, self.two, self.three, self.mix1, self.mix2, 1 - self.mix1 - self.mix2)
start_time = time.time()
n_iterations = 5
best_mix = None
best_loglike = float('-inf')
mix = GaussianMixture(data)
for _ in range(n_iterations):
mix.iterate(verbose=True)
if mix.loglike > best_loglike:
best_loglike = mix.loglike
best_mix = mix
n_iterations = 40
n_random_restarts = 500
best_mix = None
best_loglike = float('-inf')
for _ in range(n_random_restarts):
mix = GaussianMixture(data)
for _ in range(n_iterations):
mix.iterate()
if mix.loglike > best_loglike:
best_loglike = mix.loglike
best_mix = mix
print (time.time() - start_time)
print (best_loglike)
sns.distplot(data, bins=20, kde=False, norm_hist=True)
g_both = [best_mix.pdf(e) for e in x]
plt.plot(x, g_both, label='gaussian mixture');
plt.show()
print (best_mix)
|
{"hexsha": "f126d726fe9f62d3b745225c6039cd6cb312e0f5", "size": 4959, "ext": "py", "lang": "Python", "max_stars_repo_path": "gmm.py", "max_stars_repo_name": "prakhardogra921/Clustering-using-Kmeans-Cmeans-and-GMM", "max_stars_repo_head_hexsha": "7fce1527a5362313aa45e5fd41f5369af3087cba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gmm.py", "max_issues_repo_name": "prakhardogra921/Clustering-using-Kmeans-Cmeans-and-GMM", "max_issues_repo_head_hexsha": "7fce1527a5362313aa45e5fd41f5369af3087cba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gmm.py", "max_forks_repo_name": "prakhardogra921/Clustering-using-Kmeans-Cmeans-and-GMM", "max_forks_repo_head_hexsha": "7fce1527a5362313aa45e5fd41f5369af3087cba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.06, "max_line_length": 150, "alphanum_fraction": 0.5894333535, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1493}
|
From VST Require Import floyd.proofauto.
From CertiGC Require Import model.GCGraph.
From CertiGC Require Import vst.ast.env_graph_gc.
From CertiGC Require Import vst.clightgen.gc.
From CertiGC Require Import vst.cmodel.constants.
From CertiGC Require Import vst.cmodel.spatial_gcgraph.
From CertiGC Require Import vst.spec.gc_spec.
Lemma body_resume: semax_body Vprog Gprog f_resume resume_spec.
Proof.
start_function.
unfold thread_info_rep, heap_struct_rep. Intros.
forward. unfold fun_info_rep. forward. 1: entailer!. rewrite Znth_0_cons.
replace_SEP 1 (fun_info_rep rsh f_info fi) by (unfold fun_info_rep; entailer!).
forward_if True.
- forward; entailer!.
- remember (ti_heap_p t_info). rewrite (data_at_isptr sh heap_type).
Intros. exfalso. destruct t_info. simpl in *. subst. contradiction.
- Intros. destruct (heap_head__cons (ti_heap t_info)) as [hs [hl [? ?]]].
rewrite H1, <- H2, map_cons.
destruct (gt_gs_compatible _ _ H _ (heapgraph_has_gen__O _)) as [H3 H4 H5 HH6].
simpl in H3, H4, H5, HH6.
assert (isptr (space_base (heap_head (ti_heap t_info)))). {
rewrite H2. unfold nth_space in H3. rewrite H1 in H3. simpl in H3.
rewrite <- H3. apply generation_base__isptr. } unfold space_tri at 1. do 2 forward.
rewrite Znth_0_cons.
destruct (space_base (heap_head (ti_heap t_info))) eqn:? ; try contradiction.
forward_if (fun_word_size f_info <= space_capacity hs).
+ unfold denote_tc_samebase. simpl. entailer!.
+ unfold all_string_constants. Intros.
forward_call ((gv ___stringlit_10),
(map init_data2byte (gvar_init v___stringlit_10)), rsh).
exfalso; assumption.
+ forward. entailer!.
unfold sem_sub_pp in H7. destruct eq_block in H7; [|easy]; simpl in H7.
inv_int i. clear -H7. remember (heap_head (ti_heap t_info)) as h.
rewrite ptrofs_add_repr, ptrofs_sub_repr, Z.add_comm, Z.add_simpl_r in H7.
simpl in H7. unfold Ptrofs.divs in H7.
first [rewrite (Ptrofs.signed_repr 8) in H7 by rep_lia |
rewrite (Ptrofs.signed_repr 4) in H7 by rep_lia].
rewrite Ptrofs.signed_repr in H7 by (apply space_limit__signed_range).
unfold WORD_SIZE in H7. rewrite Z.mul_comm, Z.quot_mul in H7 by lia.
first [rewrite ptrofs_to_int64_repr in H7 by easy |
rewrite ptrofs_to_int_repr in H7]. hnf in H7.
remember (if Archi.ptr64 then
(Int64.ltu (Int64.repr (space_capacity h - space_remembered h))
(Int64.repr (fun_word_size f_info))) else
(Int.ltu (Int.repr (space_capacity h - space_remembered h))
(Int.repr (fun_word_size f_info)))) as comp.
cbv [Archi.ptr64] in Heqcomp.
rewrite <- Heqcomp in H7.
destruct comp eqn:? ; simpl in H7 ; try congruence.
symmetry in Heqcomp.
match goal with
| H : Int64.ltu _ _ = false |- _ => apply ltu64_repr_false in H
| H : Int.ltu _ _ = false |- _ => apply ltu_repr_false in H
end.
{ pose proof (space_remembered__lower_bound h). lia. }
{
first [apply space_limit__range | apply word_size_range].
}
{
first [apply space_limit__range | apply word_size_range].
}
+ rewrite <- Heqv in *.
red in H0.
rewrite H0 in H5.
unfold heapgraph_block_size_prev in H5. simpl in H5. unfold nth_space in H5.
rewrite H1 in H5. simpl in H5. rewrite <- H2 in H5.
replace_SEP 4
(heap_struct_rep sh
(
( space_base (heap_head (ti_heap t_info)),
( offset_val (WORD_SIZE * space_allocated (heap_head (ti_heap t_info))) (space_base (heap_head (ti_heap t_info))),
( offset_val (WORD_SIZE * (space_capacity (heap_head (ti_heap t_info)) - space_remembered (heap_head (ti_heap t_info)))) (space_base (heap_head (ti_heap t_info)))
, offset_val (WORD_SIZE * space_capacity (heap_head (ti_heap t_info))) (space_base (heap_head (ti_heap t_info)))
)))
::
map space_tri hl
)
(ti_heap_p t_info)) by
(unfold heap_struct_rep; entailer!).
do 2 forward.
unfold before_gc_thread_info_rep. rewrite !heap_struct_rep_eq. rewrite <- H5.
replace (WORD_SIZE * 0)%Z with 0 by lia.
rewrite !isptr_offset_val_zero by assumption. entailer!. rewrite H1. simpl tl.
assert (12 = Zlength (map space_tri hl) + 1). {
pose proof (heap_spaces__size (ti_heap t_info)). rewrite MAX_SPACES_eq in H2.
rewrite <- H2, H1, Zlength_cons, Zlength_map. lia. } rewrite !H2.
rewrite !data_at_tarray_split_1 by reflexivity. cancel.
do 2 (unfold_data_at (data_at _ _ _ _)). cancel.
Qed.
|
{"author": "CertiGraph", "repo": "CertiGC", "sha": "ec0183449d5e7dc66d33c9bc2dd5759de0ebd877", "save_path": "github-repos/coq/CertiGraph-CertiGC", "path": "github-repos/coq/CertiGraph-CertiGC/CertiGC-ec0183449d5e7dc66d33c9bc2dd5759de0ebd877/theories/CertiGC/vst/verif/verif_resume.v"}
|
import sys
from abc import ABC, abstractmethod
import glob
import os
import random
import shutil
import statistics
from imagecorruptions import corrupt
import cv2
import numpy as np
import pickle
from tqdm import tqdm
from mean_average_precision import MetricBuilder
import yoloPredictor
import imageDifferenceCalculator
class SampleSelector(ABC):
"""Abstract class to select samples"""
def __init__(self, inputdir, outputdir, trainImages=None, trainImagesPool=None, seed=42):
"""
:param inputdir: directory with images to choose from
:param outputdir: where list of selected images is located
:param trainImagesPool: if a training pool has already been created, it can be reused here
"""
random.seed(seed) # make experiments repeatable
print(f"using seed {seed}")
self.inputdir = inputdir
self.outputdir = outputdir
if not trainImagesPool:
self.trainImagesPool = []
self.findImages(self.inputdir) # fill list with potential training images
self.trainImages = [] # labeled training images
else:
self.trainImagesPool = trainImagesPool
self.trainImages = trainImages
@abstractmethod
def selectSamples(self, amount=100):
pass
def findImages(self, dir):
datasets = [x[0] for x in os.walk(dir)] # a list of all subdirectories (including root directory)
for d in datasets:
self.trainImagesPool = glob.glob(f"{d}/*.png", recursive=True)
self.trainImagesPool += glob.glob(f"{d}/*.PNG", recursive=True)
self.trainImagesPool += glob.glob(f"{d}/*.jpg", recursive=True)
self.trainImagesPool += glob.glob(f"{d}/*.JPG", recursive=True)
def writeSamplesToFile(self):
""""
writes a list of the used samples to a file
"""
samples = "\n".join(self.trainImages)
# with open(os.path.join(self.outputdir, "train.txt", "w")) as f:
# f.write(samples)
with open(os.path.join(self.outputdir, "train.txt"), "w") as f:
f.write(samples)
def copyFiles(self, toBeCopied):
"""
This is currently not used, because we just define it via the train.txt
Double check if you want to use self.outputdir as the directory to copy images to if you use it
Copy n files to training folder
:param toBeCopied: list of files from the pool to be copied into training directory
:return: None
"""
for source in toBeCopied:
# shutil.copy(source, self.outputdir)
labelFile = source.replace(".png", ".txt").replace(".jpg", ".txt")\
.replace(".PNG", ".txt").replace(".JPG", ".txt")
shutil.copy(labelFile, self.outputdir)
destination = os.path.join(self.outputdir, os.path.basename(source))
self.trainImages.append(destination)
class RandomSampleSelector(SampleSelector):
"""
Select samples randomly.
This is the baseline to compare other approaches to.
"""
def __init__(self, inputdir, outputdir, trainImages=None, trainImagesPool=None, seed=42):
super().__init__(inputdir, outputdir, trainImages=trainImages, trainImagesPool=trainImagesPool, seed=seed)
def selectSamples(self, amount=100):
"""
selects samples randomly from the pool
:param amount: amount of images to add to pool
:return: current train images, pool of remaining images
"""
# selectedSamples = []
# for i in range(amount):
# sample = random.choice(self.trainImagesPool)
# selectedSamples.append(sample)
# self.trainImagesPool.remove(sample)
if amount > len(self.trainImagesPool): # make sure this doesn't crash at the end
amount = len(self.trainImagesPool)
random.shuffle(self.trainImagesPool)
selectedSamples = self.trainImagesPool[:amount]
self.trainImages.extend(selectedSamples)
if amount < len(self.trainImagesPool):
self.trainImagesPool = self.trainImagesPool[amount:]
else:
self.trainImagesPool = [] # there are no images left
# self.copyFiles(selectedSamples)
self.writeSamplesToFile()
return self.trainImages, self.trainImagesPool, selectedSamples
class meanConfidenceSelector(SampleSelector):
"""
Select the samples which (on average) had the lowest confidences over all predictions
Questions to evaluate:
how do we treat no predictions?
too unsure for a prediction -> should be included?
no objects -> not important?
at first ignore them, use them later to prevent false negatives?
"""
def __init__(self, inputdir, outputdir, trainImages=None, trainImagesPool=None, mode="mean", seed=42):
super().__init__(inputdir, outputdir, trainImages=trainImages, trainImagesPool=trainImagesPool, seed=seed)
# we can't load the weights here, because we need new ones after the next training
self.mode = mode
def selectSamples(self, amount=100):
"""
selects samples based on the mean confidences from the pool
:param amount: amount of images to add to pool
:return: current train images, pool of remaining images
"""
if amount > len(self.trainImagesPool): # make sure this doesn't crash at the end
amount = len(self.trainImagesPool)
yolo = yoloPredictor.yoloPredictor() # load weights here, because after sampling new weights are trained
predictionConfidences = []
discarded_samples = []
print("Selecting samples based on confidences:")
for path in tqdm(self.trainImagesPool):
boxes = yolo.predict(path)
# boxes = [[x1, y1, x2, y2, confidence, class]]
# store one average confidence value per image
if self.mode == "mean":
if len(boxes) > 0:
confidences = [cfd[4] for cfd in boxes]
meanConfidence = statistics.mean(confidences)
predictionConfidences.append([meanConfidence, path])
else:
discarded_samples.append(path)
# prefer images with no bounding boxes, because objects are very common in our domain
elif self.mode == "mean_with_no_boxes":
if len(boxes) == 0:
meanConfidence = 0
else:
confidences = [cfd[4] for cfd in boxes]
meanConfidence = statistics.mean(confidences)
predictionConfidences.append([meanConfidence, path])
elif self.mode == "median":
if len(boxes) > 0:
confidences = [cfd[4] for cfd in boxes]
median = statistics.median(confidences)
predictionConfidences.append([median, path])
else:
discarded_samples.append(path)
elif self.mode == "min":
if len(boxes) > 0:
confidences = [cfd[4] for cfd in boxes]
minConfidence = min(confidences)
predictionConfidences.append([minConfidence, path])
else:
discarded_samples.append(path)
elif self.mode == "lowest_max" or self.mode == "max":
if len(boxes) > 0:
confidences = [cfd[4] for cfd in boxes]
maxConfidence = max(confidences)
predictionConfidences.append([maxConfidence, path])
else:
discarded_samples.append(path)
sortedPredictions = sorted(predictionConfidences) # sort the list so we can take the first #amount items
if self.mode == "max":
# reverse the list, now the highest predictions are at the start of the list
sortedPredictions = sortedPredictions[::-1]
new_train_images = []
for image in sortedPredictions[:amount]:
self.trainImagesPool.remove(image[1]) # remove about to be labeled images from pool
self.trainImages.append(image[1])
new_train_images.append(image[1])
while len(new_train_images) < amount:
sample = random.choice(discarded_samples)
self.trainImagesPool.remove(sample)
self.trainImages.append(sample)
new_train_images.append(sample)
print("Had to use a previously discarded sample")
self.writeSamplesToFile()
return self.trainImages, self.trainImagesPool, new_train_images
class BoundingBoxAmountSelector(SampleSelector):
"""
Select the samples which had the most or least bounding box predictions
"""
def __init__(self, inputdir, outputdir,trainImages=None, trainImagesPool=None, mode="most", seed=42):
super().__init__(inputdir, outputdir, trainImages=trainImages, trainImagesPool=trainImagesPool,seed=seed)
# we can't load the weights here, because we need new ones after the next training
self.mode = mode
def selectSamples(self, amount=100):
"""
selects samples based on the amount of predicted bounding boxes from the pool
:param amount: amount of images to add to pool
:return: current train images, pool of remaining images
"""
if amount > len(self.trainImagesPool): # make sure this doesn't crash at the end
amount = len(self.trainImagesPool)
yolo = yoloPredictor.yoloPredictor() # load weights here, because after sampling new weights are trained
predictionConfidences = []
print("Selecting samples based on amount of bounding boxes:")
for path in tqdm(self.trainImagesPool):
boxes = yolo.predict(path)
length = len(boxes)
predictionConfidences.append([length, path])
new_train_images = []
sortedPredictions = sorted(predictionConfidences) # sort the list so we can take the first #amount items
if self.mode == "least":
for image in sortedPredictions[:amount]:
self.trainImagesPool.remove(image[1]) # remove about to be labeled images from pool
self.trainImages.append(image[1])
new_train_images.append(image[1])
if self.mode == "most":
for image in sortedPredictions[len(sortedPredictions)-amount:]:
self.trainImagesPool.remove(image[1]) # remove about to be labeled images from pool
self.trainImages.append(image[1])
new_train_images.append(image[1])
self.writeSamplesToFile()
return self.trainImages, self.trainImagesPool, new_train_images
class noiseSelector(SampleSelector):
""""
Apply noise to the image and compare how much the prediction changes
The intuition is that a large difference in prediction means uncertain initial prediction
"""
def __init__(self, inputdir, outputdir,trainImages=None, trainImagesPool=None, mode="", seed=42):
super().__init__(inputdir, outputdir, trainImages=trainImages, trainImagesPool=trainImagesPool, seed=seed)
self.mode = mode
self.number_of_classes = 6
def selectSamples(self, amount=100):
"""
selects samples by first applying noise and then comparing the predictions
:param amount: amount of images to add to pool
:return: current train images, pool of remaining images
"""
if amount > len(self.trainImagesPool): # make sure this doesn't crash at the end
amount = len(self.trainImagesPool)
yolo = yoloPredictor.yoloPredictor() # load weights here, because after sampling new weights are trained
print("Selecting samples after applying noise to the samples:")
differences = []
for path in tqdm(self.trainImagesPool):
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# get prediction from uncorrupted image
init_boxes = yolo.predict(path)
# apply corruption
if self.mode == "gaussian_mean_difference" or self.mode == "gaussian_map_mean":
gaussian_noised_image = corrupt(img, corruption_name="gaussian_noise", severity=1)
gaussian_boxes = yolo.predictFromLoadedImage(gaussian_noised_image)
elif self.mode == "motion_mean_difference" or self.mode == "motion_blur_map_mean":
motion_blurred_image = corrupt(img, corruption_name="motion_blur", severity=3)
motion_boxes = yolo.predictFromLoadedImage(motion_blurred_image)
if self.mode == "gaussian_mean_difference":
difference = self.calc_confidence_difference(init_boxes, gaussian_boxes, "mean")
differences.append([difference[0], path])
elif self.mode == "gaussian_map_mean" or self.mode == "motion_blur_map_mean":
# map library expects for ground truth:
# [xmin, ymin, xmax, ymax, class_id, difficult, crowd]
# pytorchyolo returns
# # [[x1, y1, x2, y2, confidence, class]]
# so we need to delete confidence and have class there
# and set 0, 0 for difficult and crowd
init_map = []
for pred in init_boxes:
minx, miny, maxx, maxy = self.min_and_max_xy_values(pred)
init_map.append([int(minx), int(miny), int(maxx), int(maxy), pred[5], 0, 0])
init_map = np.array(init_map)
# map library expects for detection:
# [xmin, ymin, xmax, ymax, class_id, confidence]
# -> we need to swap class and confidence
if self.mode == "gaussian_map_mean":
gauss_map = []
for pred in gaussian_boxes:
minx, miny, maxx, maxy = self.min_and_max_xy_values(pred)
gauss_map.append([int(minx), int(miny), int(maxx), int(maxy), pred[5], pred[4]])
gauss_map = np.array(gauss_map)
noised_map = gauss_map
if self.mode == "motion_blur_map_mean":
motion_blur_map = []
for pred in motion_boxes:
minx, miny, maxx, maxy = self.min_and_max_xy_values(pred)
motion_blur_map.append([int(minx), int(miny), int(maxx), int(maxy), pred[5], pred[4]])
motion_blur_map = np.array(motion_blur_map)
noised_map = motion_blur_map
metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=False,
num_classes=self.number_of_classes)
metric_fn.add(noised_map, init_map)
map_value = metric_fn.value(iou_thresholds=0.5)['mAP']
differences.append([map_value, path])
sorted_differences = np.sort(differences, axis=0) # sort the list so we can take the first #amount items
print(f"There are {len(sorted_differences)} images in the sorted differences")
# reverse list so we can just take the first #amount samples
# this way the images with the biggest difference should be at the start
if self.mode == "gaussian_mean_difference":
sorted_differences = sorted_differences[::-1]
new_train_images = []
for image in sorted_differences[:amount]:
self.trainImagesPool.remove(image[1]) # remove about to be labeled images from pool
self.trainImages.append(image[1])
new_train_images.append(image[1])
self.writeSamplesToFile()
print(f"{len(self.trainImages)} used train images right now. \n"
f"{len(self.trainImagesPool)} images left in trainImagesPool")
return self.trainImages, self.trainImagesPool, new_train_images
def min_and_max_xy_values(self, prediction):
"""
This is probably unnecessary, but ensure that the order is correct
:param prediction: yolo prediction
:return: minx, miny, maxx, maxy
"""
minx = min(prediction[0], prediction[2])
maxx = max(prediction[0], prediction[2])
miny = min(prediction[1], prediction[3])
maxy = max(prediction[1], prediction[3])
return minx, miny, maxx, maxy
def calc_confidence_difference(self, first_preds, second_preds, mode):
"""
first_preds: initial predictions of neural network with no noise
second_preds: predictions after noise was applied to the image
mode: select which values should be compared. options are: mean. maybe in the future: median, max, min
returns: mean of differences, list of differences
"""
predictions = [first_preds, second_preds]
confidences_by_class = []
for i, pred in enumerate(predictions):
# add an empty list to the confidences in which we then put the confidences of our predictions
confidences_by_class.append([])
# ball, goalpost, robot, L-, T-, X-intersection are the classes in this order
for object_class in range(0, self.number_of_classes):
confidences = []
if len(pred) > 0:
for detection in pred:
if detection[5] == object_class:
confidences.append(detection[4])
if mode == "mean":
if len(confidences) == 0:
confidences_by_class[i].append(0)
else:
confidences_by_class[i].append(np.mean(confidences))
else:
# this could be any number between the min yolo threshold we use and 0
# for simplicity we assume it to be 0
confidences_by_class[i].append(0)
results = []
for i in range(len(confidences_by_class[0])):
# calculate difference between e.g. confidence in two ball predictions
# use absolute numbers, because we don't care which was more confident
results.append(abs(confidences_by_class[0][i] - confidences_by_class[1][i]))
return np.mean(results), results
class DifferenceSampleSelector(SampleSelector):
"""
Select samples by comparing them. No ground truth data is required.
"""
def __init__(self, inputdir, outputdir, trainImages=None, trainImagesPool=None, mode=None, seed=42):
super().__init__(inputdir, outputdir, trainImages=trainImages, trainImagesPool=trainImagesPool, seed=seed)
self.sampler = imageDifferenceCalculator.Image2Vector(trainImagesPool)
def selectSamples(self, amount=100, cluster_amount=10):
"""
selects samples from the pool
:param amount: amount of images to add to pool
:param cluster_amount: split input images into how many cluster?
:return: current train images, pool of remaining images
"""
if amount > len(self.trainImagesPool): # make sure this doesn't crash at the end
amount = len(self.trainImagesPool)
self.sampler.image_list = self.trainImagesPool
self.sampler.generate_all_image_vectors()
images_by_cluster = self.sampler.images_by_cluster(cluster_amount)
current_cluster = 0
added_images = 0
print("\n")
# TODO skip if cluster empty with a warning
new_train_images = []
while added_images < amount:
if len(images_by_cluster[current_cluster]) == 0:
print(f"No image found for cluster {current_cluster}")
current_cluster = (current_cluster + 1) % cluster_amount
continue
sample = random.choice(images_by_cluster[current_cluster])
self.trainImages.append(sample)
new_train_images.append(sample)
self.trainImagesPool.remove(sample)
images_by_cluster[current_cluster].remove(sample)
added_images += 1
current_cluster = (current_cluster + 1) % cluster_amount
print(f"Adding image {added_images}/{amount}", end='\r')
print("\n")
self.writeSamplesToFile()
return self.trainImages, self.trainImagesPool, new_train_images
class VAEBasedSelector(SampleSelector):
"""
Select samples by checking the VAE values. No ground truth data is required.
"""
def __init__(self, inputdir, outputdir, trainImages=None, trainImagesPool=None, mode=None, seed=42):
super().__init__(inputdir, outputdir, trainImages=trainImages, trainImagesPool=trainImagesPool, seed=seed)
self.sampler = imageDifferenceCalculator.Vae(trainImagesPool)
def selectSamples(self, amount=100):
"""
selects samples from the pool
:param amount: amount of images to add to pool
:param cluster_amount: split input images into how many cluster?
:return: current train images, pool of remaining images
"""
if amount > len(self.trainImagesPool): # make sure this doesn't crash at the end
amount = len(self.trainImagesPool)
self.sampler.image_list = self.trainImagesPool # TODO
new_train_images = []
# using integer division to safeguard cases where % 10 !=0
error_images_amount = amount // 10
difference_images_amount = amount - error_images_amount
high_error = self.sampler.get_high_error_samples()
# random can't select from a set
high_error = list(high_error)
error_value = 1.64
# select high error images
while len(new_train_images) < error_images_amount:
if len(high_error) == 0:
error_value -= 0.01
print("reduced error value")
high_error = self.sampler.get_high_error_samples(error_value)
# random can't select from a set
high_error = list(high_error)
new_sample = random.choice(high_error)
if new_sample in self.trainImagesPool and new_sample not in new_train_images:
new_train_images.append(new_sample)
self.trainImagesPool.remove(new_sample)
high_error.remove(new_sample)
else:
high_error.remove(new_sample) # still remove, even if we can't add it to make loop faster
# loop pseudo code to select difference based images:
# call with x distance
# check length after discarding images not in imagespool
# take x samples randomly (or run again with larger distance?)
# redo with smaller distance if too short
latent_distance = 50
while True:
# ensure this doesn't go into an endless loop
if latent_distance <= 0:
high_difference = self.trainImagesPool
print()
print("VAE selection discarded, because we had to use a distance of 0")
print()
break
high_difference = self.sampler.get_very_different_samples(latent_distance_to_prune=latent_distance)
samples_to_be_removed = []
# discard images that are not in the pool
for sample in high_difference:
if sample not in self.trainImagesPool or sample in new_train_images: # if it was added by high error
samples_to_be_removed.append(sample)
for sample in samples_to_be_removed:
high_difference.remove(sample)
if len(high_difference) < difference_images_amount:
latent_distance -= 5
else:
break
while len(new_train_images) < amount:
sample = random.choice(high_difference)
if sample not in new_train_images:
new_train_images.append(sample)
self.trainImagesPool.remove(sample)
high_difference.remove(sample)
self.trainImages.extend(new_train_images)
self.writeSamplesToFile()
return self.trainImages, self.trainImagesPool, new_train_images
class LearningLoss(SampleSelector):
"""
Select samples by having a seperate neural network attached to the main network, which learns to predict loss.
"""
def __init__(self, inputdir, outputdir, trainImages=None, trainImagesPool=None, mode=None, seed=42):
super().__init__(inputdir, outputdir, trainImages=trainImages, trainImagesPool=trainImagesPool, seed=seed)
def selectSamples(self, amount=100):
"""
selects samples based on the learned loss from the pool
:param amount: amount of images to add to pool
:return: current train images, pool of remaining images
"""
# import active learning utils
# load network pickle
# net - loaded net
# active cycle - anything > 0 (only important because first samples are selected randomly)
# rand_state - give current random state
# unlabeled idx - list of each id in train images pool?
# dataset - Any pytorch dataset, which has member function "get_image_path".
# device - "cuda"
# count - amount
# subset_factor - size of subsets... 1?
from active_learning.active_learning.active_learning_utils import choose_indices_loss_prediction_active_learning
import LearningLossDataset
dataset = LearningLossDataset.LearningLossdataset(self.trainImagesPool)
with open("active_model.pickle", "rb") as f:
model = pickle.load(f)
rand_state = np.random
selected_sample_ids = choose_indices_loss_prediction_active_learning(net=model, active_cycle=1, rand_state=rand_state,
unlabeled_idx=list(range(len(dataset))), dataset=dataset,
device="cuda", count=amount, subset_factor=100000000)
selected_samples = []
for sample in selected_sample_ids[0]:
sample_path = dataset.get_image_path(sample)
selected_samples.append(sample_path)
self.trainImages.append(sample_path)
self.trainImagesPool.remove(sample_path)
self.writeSamplesToFile()
return self.trainImages, self.trainImagesPool, selected_samples
if __name__ == "__main__":
a = RandomSampleSelector("/homes/15hagge/deepActiveLearning/PyTorch-YOLOv3/data/custom/images",
"/homes/15hagge/deepActiveLearning/PyTorch-YOLOv3/data/custom")
a.selectSamples()
|
{"hexsha": "9141e0871f5d0559623d5c00fac5b1ce291cbefc", "size": 26941, "ext": "py", "lang": "Python", "max_stars_repo_path": "SelectSamples.py", "max_stars_repo_name": "johagge/DeepActiveLearning", "max_stars_repo_head_hexsha": "937b82710f7fa3c6c8e165ab0dc0f4d4d770499d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SelectSamples.py", "max_issues_repo_name": "johagge/DeepActiveLearning", "max_issues_repo_head_hexsha": "937b82710f7fa3c6c8e165ab0dc0f4d4d770499d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SelectSamples.py", "max_forks_repo_name": "johagge/DeepActiveLearning", "max_forks_repo_head_hexsha": "937b82710f7fa3c6c8e165ab0dc0f4d4d770499d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8960817717, "max_line_length": 126, "alphanum_fraction": 0.6274080398, "include": true, "reason": "import numpy", "num_tokens": 5602}
|
#!/usr/bin/python
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|.|c|o|m|.|t|w|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# Copyright (c) 2017, raspberrypi.com.tw
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# color_space.py
# Change color space from RGB to Gray, and HSV
#
# Author : sosorry
# Date : 08/30/2016
# Usage : python color_space.py
import cv2
import numpy as np
image = cv2.imread("lena256rgb.jpg")
cv2.imshow("Normal", image)
cv2.waitKey(0)
# Convert BGR to Gray
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray", gray)
cv2.waitKey(0)
# Threshold the gray image to binary image
# ...
# Convert BGR to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
cv2.imshow("HSV", hsv)
cv2.waitKey(0)
# Convert HSV to RGB
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow("BGR", bgr)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "7283d6a7f29feba3528da9d81ae3e66ca0b50dde", "size": 949, "ext": "py", "lang": "Python", "max_stars_repo_path": "camera-opencv/01-color_space/color_space.py", "max_stars_repo_name": "ReemDAlsh/camera-python-opencv", "max_stars_repo_head_hexsha": "6adb12b682907554645211217e970480685347b0", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2017-08-28T02:42:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-12T02:11:05.000Z", "max_issues_repo_path": "camera-opencv/01-color_space/color_space.py", "max_issues_repo_name": "ReemDAlsh/camera-python-opencv", "max_issues_repo_head_hexsha": "6adb12b682907554645211217e970480685347b0", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-02-09T08:50:00.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-09T08:50:00.000Z", "max_forks_repo_path": "camera-opencv/01-color_space/color_space.py", "max_forks_repo_name": "ReemDAlsh/camera-python-opencv", "max_forks_repo_head_hexsha": "6adb12b682907554645211217e970480685347b0", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2017-08-17T10:25:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-16T11:07:13.000Z", "avg_line_length": 21.5681818182, "max_line_length": 72, "alphanum_fraction": 0.6491043203, "include": true, "reason": "import numpy", "num_tokens": 300}
|
#BSD 3-Clause License
#
#Copyright (c) 2021, Florent Audonnet
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from datetime import datetime
from numpy.lib.function_base import vectorize
import openvr
from simple_arm import utils
import pyquaternion as pyq
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Pose, Point, Quaternion, Twist, Vector3
from std_msgs.msg import Bool
from collections import defaultdict
# https: // gist.github.com/awesomebytes/75daab3adb62b331f21ecf3a03b3ab46
def from_controller_state_to_dict(pControllerState):
# docs: https://github.com/ValveSoftware/openvr/wiki/IVRSystem::GetControllerState
d = {}
d['unPacketNum'] = pControllerState.unPacketNum
# on trigger .y is always 0.0 says the docs
d['trigger'] = pControllerState.rAxis[1].x
# 0.0 on trigger is fully released
# -1.0 to 1.0 on joystick and trackpads
d['trackpad_x'] = pControllerState.rAxis[0].x
d['trackpad_y'] = pControllerState.rAxis[0].y
# These are published and always 0.0
# for i in range(2, 5):
# d['unknowns_' + str(i) + '_x'] = pControllerState.rAxis[i].x
# d['unknowns_' + str(i) + '_y'] = pControllerState.rAxis[i].y
d['ulButtonPressed'] = pControllerState.ulButtonPressed
d['ulButtonTouched'] = pControllerState.ulButtonTouched
# To make easier to understand what is going on
# Second bit marks menu button
d['menu_button'] = bool(pControllerState.ulButtonPressed >> 1 & 1)
# 32 bit marks trackpad
d['trackpad_pressed'] = bool(pControllerState.ulButtonPressed >> 32 & 1)
d['trackpad_touched'] = bool(pControllerState.ulButtonTouched >> 32 & 1)
# third bit marks grip button
d['grip_button'] = bool(pControllerState.ulButtonPressed >> 2 & 1)
# System button can't be read, if you press it
# the controllers stop reporting
return d
class VrPublisher(Node):
def __init__(self, openvr_system, buttons=False):
super().__init__('vr_publisher')
timer_period = 0.1
self.timer = self.create_timer(timer_period, self.timer_callback)
self.devices = defaultdict(list)
self.system = openvr_system
self.poses = []
self.publishers_dict = {}
self.prev_time = datetime.now()
self.point = None
self.velocity = Vector3()
self.ang_velocity = Vector3()
self.rot = None
self.buttons = buttons
def timer_callback(self):
self.poses = self.system.getDeviceToAbsoluteTrackingPose(
openvr.TrackingUniverseStanding, 0, self.poses)
########
# # ETrackedDeviceClass = ENUM_TYPE
# # TrackedDeviceClass_Invalid = ENUM_VALUE_TYPE(0)
# # TrackedDeviceClass_HMD = ENUM_VALUE_TYPE(1)
# # TrackedDeviceClass_Controller = ENUM_VALUE_TYPE(2)
# # TrackedDeviceClass_GenericTracker = ENUM_VALUE_TYPE(3)
# # TrackedDeviceClass_TrackingReference = ENUM_VALUE_TYPE(4)
# # TrackedDeviceClass_DisplayRedirect = ENUM_VALUE_TYPE(5)
# # TrackedDeviceClass_Max = ENUM_VALUE_TYPE(6)
########
# # TrackedControllerRole_LeftHand = 1, // Tracked device associated with the left hand
# # TrackedControllerRole_RightHand = 2, // Tracked device associated with the right hand
########
for idx, controller in enumerate(self.poses):
# Needed as the order of the devices may change ( based on which thing got turned on first)
if not self.system.isTrackedDeviceConnected(idx):
continue
if self.system.getTrackedDeviceClass(idx) == 1 and len(self.devices["hmd"]) <= 1:
self.devices["hmd"].append(("hmd", controller))
elif self.system.getTrackedDeviceClass(idx) == 2 and len(self.devices["controller"]) <= 2:
controller_role = self.system.getControllerRoleForTrackedDeviceIndex(
idx)
hand = ""
if (controller_role == 1):
hand = "LeftHand"
if controller_role == 2:
hand = "RightHand"
self.devices["controller"].append((hand, controller))
for key, device in self.devices.items():
for idx, (name, el) in enumerate(device):
if key == "controller":
result, pControllerState = self.system.getControllerState(
idx)
if result:
d = from_controller_state_to_dict(pControllerState)
name = f"{key}/{name}/trigger"
msg = Bool()
msg.data = d["trigger"] > 0.0
self.publish(name, msg, Bool)
pose = utils.convert_to_quaternion(
el.mDeviceToAbsoluteTracking)
point = Point()
point.x = pose[0][0]
point.y = pose[0][1]
point.z = pose[0][2]
time = datetime.now()
dtime = (time-self.prev_time).total_seconds()
if self.point is not None:
self.velocity.x = (point.x - self.point.x) / dtime
self.velocity.y = (point.y - self.point.y) / dtime
self.velocity.z = (point.z - self.point.z) / dtime
self.point = point
rot = Quaternion()
q1 = pyq.Quaternion(pose[1])
q1 = q1.normalised
rot.w = q1[0]
rot.x = q1[1]
rot.y = q1[2]
rot.z = q1[3]
if self.rot is not None:
diffQuater = q1 - self.rot
conjBoxQuater = q1.inverse
velQuater = ((diffQuater * 2.0) / dtime) * conjBoxQuater
self.ang_velocity.x = velQuater[1]
self.ang_velocity.y = velQuater[2]
self.ang_velocity.z = velQuater[3]
# print(self.ang_velocity)
self.rot = q1
msg = Pose()
msg.orientation = rot
msg.position = point
name = f"{key}/{name}"
self.publish(name, msg, Pose)
vel = Twist()
if self.velocity.x == 0.0 and self.velocity.y == 0.0 and self.velocity.z == 0.0 and \
self.ang_velocity.x == 0.0 and self.ang_velocity.y == 0.0 and self.ang_velocity.z == 0.0:
continue
vel.linear = self.velocity
vel.angular = self.ang_velocity
name += "/vel"
self.publish(name, vel, Twist)
def publish(self, name, value, type):
pub = self.publishers_dict.get(name)
if pub is None:
pub = self.create_publisher(type, name, 10)
self.publishers_dict[name] = pub
pub.publish(value)
def main(buttons=False, args=None):
if not openvr.isRuntimeInstalled:
raise RuntimeError("OpenVR / SteamVr is not Installed Exit")
if not openvr.isHmdPresent():
raise RuntimeError(
"SteamVr is not running or Headmount is not plugged in")
rclpy.init(args=args)
system = openvr.init(openvr.VRApplication_Scene)
minimal_publisher = VrPublisher(system, buttons)
rclpy.spin(minimal_publisher)
minimal_publisher.destroy_node()
rclpy.shutdown()
openvr.shutdown()
if __name__ == '__main__':
main()
|
{"hexsha": "5e5d33bb0e972da6e7df6f5a8c7c7326f731d151", "size": 8972, "ext": "py", "lang": "Python", "max_stars_repo_path": "simple_arm/simple_arm/vr_publish.py", "max_stars_repo_name": "09ubberboy90/lvl4-ros2-sim-comp", "max_stars_repo_head_hexsha": "c197c76b29a9d864a800b81332bc3a549ecaa7c3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-03T09:16:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-03T09:16:11.000Z", "max_issues_repo_path": "simple_arm/simple_arm/vr_publish.py", "max_issues_repo_name": "09ubberboy90/lvl4-ros2-sim-comp", "max_issues_repo_head_hexsha": "c197c76b29a9d864a800b81332bc3a549ecaa7c3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-03T20:39:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T14:57:55.000Z", "max_forks_repo_path": "simple_arm/simple_arm/vr_publish.py", "max_forks_repo_name": "09ubberboy90/lvl4-ros2-sim-comp", "max_forks_repo_head_hexsha": "c197c76b29a9d864a800b81332bc3a549ecaa7c3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-03T09:16:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T09:16:21.000Z", "avg_line_length": 41.9252336449, "max_line_length": 113, "alphanum_fraction": 0.6201515827, "include": true, "reason": "from numpy", "num_tokens": 2098}
|
[STATEMENT]
lemma span_minimal: "S \<subseteq> T \<Longrightarrow> subspace T \<Longrightarrow> span S \<subseteq> T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>S \<subseteq> T; subspace T\<rbrakk> \<Longrightarrow> span S \<subseteq> T
[PROOF STEP]
by (auto simp: span_explicit intro!: subspace_sum subspace_scale)
|
{"llama_tokens": 115, "file": null, "length": 1}
|
from shor.gates import Hadamard, PauliX, CCNOT, SWAP, CRZ, CH, S, Sdg, T, Tdg, PauliY, PauliZ, ID, Cx, U1, U3, U2, Rx, Cz, Ry, Rz
from shor.layers import Qubits
from shor.operations import Measure
from shor.quantum import Circuit
from shor.backends import QuantumSimulator, QSession
import numpy as np
import math
def test_ch_integration():
circuit = Circuit()
circuit.add(Qubits(2))
circuit.add(PauliX(0))
circuit.add(CH(0, 1))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['11'] > 450
assert result['00'] == 0
assert result['10'] > 450
assert result['01'] == 0
def test_crz_integration():
circuit = Circuit()
circuit.add(Qubits(2))
circuit.add(CRZ(0, 1, angle=math.pi/3))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['11'] == 0
assert result['00'] == 1024
assert result['10'] == 0
assert result['01'] == 0
def test_dblpx_integration():
circuit = Circuit()
circuit.add(Qubits(2))
circuit.add(PauliX(0))
circuit.add(PauliX(1))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['11'] == 1024
assert result['00'] == 0
assert result['10'] == 0
assert result['01'] == 0
def test_swap_integration(): #
circuit = Circuit()
circuit.add(Qubits(2))
circuit.add(PauliX(0))
circuit.add(SWAP(0,1))
circuit.add(Measure(0,1))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['11'] == 0
assert result['00'] == 0
assert result['10'] == 0
assert result['01'] == 1024
def test_ccnot_integration():
circuit = Circuit()
circuit.add(Qubits(3))
circuit.add(PauliX(0))
circuit.add(PauliX(1))
circuit.add(CCNOT(0, 1, 2))
circuit.add(Measure(0, 1, 2))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['000'] == 0
assert result['001'] == 0
assert result['010'] == 0
assert result['100'] == 0
assert result['110'] == 0
assert result['101'] == 0
assert result['011'] == 0
assert result['111'] == 1024
def test_s_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(S(0)) # Can also use H()
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['0'] == 1024
assert result['1'] == 0
def test_sdg_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(PauliX(0))
circuit.add(Sdg(0)) # Can also use H()
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['1'] == 1024
assert result['0'] == 0
def test_t_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(T(0)) # Can also use H()
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['0'] == 1024
assert result['1'] == 0
def test_tdg_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(PauliX(0))
circuit.add(Tdg(0))
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['1'] == 1024
assert result['0'] == 0
def test_paulix_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(PauliX(0)) # Can also use H()
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
# Accounting for random noise, results won't be exact
assert result['0'] == 0
assert result['1'] == 1024
def test_pauliy_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(PauliY(0)) # Can also use H()
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
# Accounting for random noise, results won't be exact
assert result['0'] == 0
assert result['1'] == 1024
def test_pauliz_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(Hadamard(0))
circuit.add(PauliZ(0)) # Can also use H()
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
# Accounting for random noise, results won't be exact
assert result['0'] > 450
assert result['1'] > 450
def test_ID_qubit():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(ID(0))
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
# Accounting for random noise, results won't be exact
assert result['1'] == 0
assert result['0'] == 1024
def test_u1_integration():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(PauliX(0))
circuit.add(U1(0))
circuit.add(Measure())
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1024)
assert result['0'] == 0
assert result['1'] == 1024
def test_cx_int():
circuit_1 = Circuit()
circuit_1.add(Qubits(2))
circuit_1.add(Hadamard(0))
circuit_1.add(Cx(0, 1))
circuit_1.add(Measure(0, 1))
circuit_2 = Circuit()
circuit_2.add(Qubits(2))
circuit_2.add(Hadamard(1))
circuit_2.add(Cx(0, 1))
circuit_2.add(Measure(0, 1))
sess = QSession(backend=QuantumSimulator())
result_1 = sess.run(circuit_1, num_shots=1024)
result_2 = sess.run(circuit_2, num_shots=1024)
assert result_1['01'] == 0
assert result_1['10'] > 450
assert result_1['00'] > 450
assert result_1['11'] == 0
assert result_2['01'] == 0
assert result_2['10'] == 0
assert result_2['00'] > 450
assert result_2['11'] > 450
def test_Cz_int():
circuit = Circuit()
circuit.add(Qubits(2))
circuit.add(Hadamard(0))
circuit.add(Hadamard(1))
circuit.add(Cz(0, 1))
circuit.add(Measure(0, 1))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1000)
assert result['00'] > 210
assert result['01'] > 210
assert result['10'] > 210
assert result['11'] > 210
def test_ry_int():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(Ry(0, angle=np.pi / 2))
circuit.add(Measure(0))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1000)
assert result['0'] > 450
assert result['1'] > 450
def test_rz_int():
circuit = Circuit()
circuit.add(Qubits(1))
circuit.add(Rz(0, angle=np.pi / 2))
circuit.add(Measure(0))
sess = QSession(backend=QuantumSimulator())
result = sess.run(circuit, num_shots=1000)
assert result['0'] == 1000
assert result['1'] == 0
def test_U3_int():
circuit_1 = Circuit()
circuit_1.add(Qubits(1))
circuit_1.add(U3(0, theta=np.pi / 2, phi=-np.pi / 2, alpha=np.pi / 2))
circuit_1.add(Measure(0))
circuit_2 = Circuit()
circuit_2.add(Qubits(1))
circuit_2.add(Rx(theta=np.pi))
circuit_2.add(Measure(0))
sess = QSession(backend=QuantumSimulator())
result_1 = sess.run(circuit_1, num_shots=1024)
result_2 = sess.run(circuit_2, num_shots=1024)
assert result_1['0'] > 450
assert result_1['1'] > 450
assert result_2['0'] > 450
assert result_2['1'] > 450
def test_U2_int():
circuit_1 = Circuit()
circuit_1.add(Qubits(1))
circuit_1.add(U2(0, phi=-np.pi / 2, alpha=np.pi / 2))
circuit_1.add(Measure(0))
sess = QSession(backend=QuantumSimulator())
result_1 = sess.run(circuit_1, num_shots=1024)
assert result_1['0'] > 450
assert result_1['1'] > 450
|
{"hexsha": "abe9a24531ce86335ad7e74b3c3d8bc01ea3f9b2", "size": 7986, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/integration/test_gates_integration.py", "max_stars_repo_name": "jywyq/shor", "max_stars_repo_head_hexsha": "5e38c6875d68207a6d0e492d83f7b1f6ae0afb58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/integration/test_gates_integration.py", "max_issues_repo_name": "jywyq/shor", "max_issues_repo_head_hexsha": "5e38c6875d68207a6d0e492d83f7b1f6ae0afb58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/integration/test_gates_integration.py", "max_forks_repo_name": "jywyq/shor", "max_forks_repo_head_hexsha": "5e38c6875d68207a6d0e492d83f7b1f6ae0afb58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6481481481, "max_line_length": 129, "alphanum_fraction": 0.6352366642, "include": true, "reason": "import numpy", "num_tokens": 2495}
|
This organization is a successor to Davis Students Against War
Overview
Please Note:
in the past there has been an organization referred to as the Davis Students Against War, under the leadership of Karl Duesterberg. That organization has since fallen out and become inactive. THIS PAGE is not in reference to the old DSAW! The Current DSAWR was created in March of 2007 and is organized by Users/KatieDavalos and Juliana Haber. The new organization refers to itself as the Davis Students Against War Resource, due to its networkstatus. It does not identify as democratic or liberal.
Davis Students Against War Resource, or DSAWR, is a student effort to voice the youths opposition to war, including current and future conflict. It is a network for the exchange of information in collaborative efforts by Davis youth. Davis Students Against War is a tool for the contact and coordination of peace efforts. We hope to inspire action in students and seek to create change through awareness. No one person or body represents DSAW and has no affiliations. They are in the process of becoming an official Student Organizations student organization.
Facebook view the facebook group http://ucdavis.facebook.com/group.php?gid2246940574
Myspace view the myspace group http://groups.myspace.com/dsawresource
Their protests against war include, but are not limited to:
compiling Davis Students Against War Resource/Iraq War facts about the war in Iraq
the impact on global environment
the misallocation of funds
the lack of citizen and foreign support
the economic disparities that are inflicted or manipulated by the wagers of war
the ignorance of global standards of human rights and peace
and specifically:
the impact of war on students
the legality of the Iraq War and
the misrepresentation of information by government and media sources
Their forms of action to create change include: petitions, rallies, protests, boycotts, walkouts, days of silence, and guest speakers.
DSAWR encourages members to speak about their thoughts and share opinions both within the network and among their peers. The goal of DSAW is to proliferate information and discussion regarding war. All suggestions and help is encouraged and welcomed to further the cause.
Upcoming Event(s)
SEPTEMBER 15 IRAQ PROGRESS REPORT
SEPTEMBER 21st EVENT!!
Come celebrate INTERNATIONAL PEACE DAY and the first Iraq Moratorium Day, celebrated on the third friday of every month!!!!!
We will be hosting a moment of silence at 12:00pm at the local school as well as providing pinwheels for peace for everyone to decorate.
At 5:00pm there will be speakers (Including Dave Dionsi of Teach Peace, the Davis HS Teach Peace Club, and others) at CENTRAL PARK to discuss the need for peace locally as well as around the world, along with entertainment (kids and adults alike!) to celebrate our community of peace. These will be followed by a candlelight vigil in memory of all those lost to war and conflict, There will be particular emphasis on the civilian and military casualities of the Iraq War, the Gaza Strip, and the crisis in Darfur.
for more info:
internationaldayofpeace.org
iraqmoratorium.org
Past Event(s)/Achievements
Informational Meeting
Thursday March 8, 2007
March 18th ANSWER Protest
Student Walkout
UCD May 1, 2007: Day of Action
National Student Walkout
Wear White to Protest the Iraq War
Walkout of Class at 11:30
Panel on Iraq War
Wednesday, May 2nd
6:008:00pm
Rec Pool Lodge
20070311 19:24:33 nbsp COME JOIN US AT TABLING THIS WEEK!!! Users/LaurenFrederic
20070317 15:01:15 nbsp Interested in carpooling for the protest tomorrow? email me! Users/KatieDavalos
|
{"hexsha": "003d261d00896288f42231d6eca5ec23385d2e36", "size": 3700, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Davis_Students_Against_War_Resource.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Davis_Students_Against_War_Resource.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Davis_Students_Against_War_Resource.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0, "max_line_length": 559, "alphanum_fraction": 0.8013513514, "num_tokens": 838}
|
import numpy as np
from typing import Dict, Any
from dataset.dataset import Dataset
from utils.constants import INPUT_SHAPE, INPUTS, OUTPUT, SAMPLE_ID, INPUT_NOISE, SMALL_NUMBER
from utils.constants import INPUT_SCALER, NUM_OUTPUT_FEATURES, NUM_CLASSES, LABEL_MAP
class SingleDataset(Dataset):
def tensorize(self, sample: Dict[str, Any], metadata: Dict[str, Any], is_train: bool) -> Dict[str, np.ndarray]:
# Normalize inputs
input_shape = metadata[INPUT_SHAPE]
input_sample = np.array(sample[INPUTS]).reshape((-1, input_shape))
input_scaler = metadata[INPUT_SCALER]
normalized_input = input_scaler.transform(input_sample) # [1, L * D]
# Apply input noise during training
if is_train and metadata.get(INPUT_NOISE, 0.0) > SMALL_NUMBER:
input_noise = np.random.normal(loc=0.0, scale=metadata[INPUT_NOISE], size=normalized_input.shape)
normalized_input += input_noise
# Re-map labels for classification problems
output = sample[OUTPUT]
if metadata[NUM_CLASSES] > 0:
label_map = metadata[LABEL_MAP]
output = label_map[output]
return {
INPUTS: normalized_input,
OUTPUT: output,
SAMPLE_ID: sample[SAMPLE_ID]
}
|
{"hexsha": "0eee1fac4ada0ab321a61e7d8beac621a6d14952", "size": 1296, "ext": "py", "lang": "Python", "max_stars_repo_path": "budget-rnn/src/dataset/single_dataset.py", "max_stars_repo_name": "tejaskannan/ml-models", "max_stars_repo_head_hexsha": "ad5acad2c0ce75773062ffcdff088a6fbe5ffc17", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-28T15:40:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-28T15:40:41.000Z", "max_issues_repo_path": "budget-rnn/src/dataset/single_dataset.py", "max_issues_repo_name": "tejaskannan/ml-models", "max_issues_repo_head_hexsha": "ad5acad2c0ce75773062ffcdff088a6fbe5ffc17", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-03-04T19:42:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T05:46:15.000Z", "max_forks_repo_path": "budget-rnn/src/dataset/single_dataset.py", "max_forks_repo_name": "tejaskannan/budget-rnn", "max_forks_repo_head_hexsha": "ad5acad2c0ce75773062ffcdff088a6fbe5ffc17", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0285714286, "max_line_length": 115, "alphanum_fraction": 0.6759259259, "include": true, "reason": "import numpy", "num_tokens": 291}
|
from layers import layer, ABC, abstractmethod
import numpy as np
from numpy.lib.stride_tricks import as_strided
eps = 1e-8
class trainable(layer, ABC):
"""
The base class for layers with trainable parameters
"""
def set_params(self, *params):
"""
Generates a unique identifier for each trainable parameter
:param params: Tuple of distinct parameter names
"""
hash_ = str(hash(self))
self.params = tuple(map(lambda p: hash_ + ':' + p, params))
@abstractmethod
def update(self, opt):
"""
Updates all trainable parameters
:param opt: The optimiser object
"""
pass
class affine(trainable):
"""
Layer with densely connected neurons. Alternatively called
Dense/Fully Connected layer
"""
def __init__(self, in_, out_, init=np.random.randn):
"""
:param in_: The input dimension
:param out_: The output dimension
:param init: The function to initialise the weight matrix
"""
self.w = init(in_, out_)
self.b = np.random.randn(out_)
self.set_params('w', 'b')
def __call__(self, x):
self.x = x
return x.dot(self.w) + self.b
def back(self, dE_dy):
self.dE_dw = self.x.T.dot(dE_dy) / self.x.shape[0]
self.dE_db = np.sum(dE_dy, axis=0) / self.x.shape[0]
return dE_dy.dot(self.w.T)
def update(self, opt):
opt(self.params, [self.w, self.b], [self.dE_dw, self.dE_db])
# This is what you came for
def conv4d_forward(x, f, s):
"""
Forward pass through a convolutional layer
:param x: Image tensor of dimensions (Batch size, Height, Width, Input Channels)
:param f: Convolutional filter of dimensions
(Filter height, Filter width, Input Channels, Output Channels)
:param s: Tuple of strides (Stride along height, Stride along width)
:return: The convolved image of dimensions
(Batch size, 1 + (H - Fh) / Sh, 1 + (W - Fw) / Sw, Output Channels)
"""
B, H, W, C = x.shape
Fh, Fw, C, D = f.shape
Sh, Sw = s
strided_shape = B, 1 + (H - Fh) // Sh, 1 + (W - Fw) // Sw, Fh, Fw, C
'''
This converts the image to a 6-dimensional tensor, where the two extra dimensions represent
strided, windowed 'snapshots' of each image (along the H and W dimensions) for every batch
and channel
E.g. if each image is 5x5 pixels and the filter is 3x3xCxD, let
x[0, :, :, 0] = [[ 1. 2. 3. 4. 5.]
[ 6. 7. 8. 9. 10.]
[11. 12. 13. 14. 15.]
[16. 17. 18. 19. 20.]
[21. 22. 23. 24. 25.]]
After the next line, this is converted to:
[[[[ 1. 2. 3.]
[ 6. 7. 8.]
[11. 12. 13.]]
[[ 2. 3. 4.]
[ 7. 8. 9.]
[12. 13. 14.]]
[[ 3. 4. 5.]
[ 8. 9. 10.]
[13. 14. 15.]]]
[[[ 6. 7. 8.]
[11. 12. 13.]
[16. 17. 18.]]
[[ 7. 8. 9.]
[12. 13. 14.]
[17. 18. 19.]]
[[ 8. 9. 10.]
[13. 14. 15.]
[18. 19. 20.]]]
[[[11. 12. 13.]
[16. 17. 18.]
[21. 22. 23.]]
[[12. 13. 14.]
[17. 18. 19.]
[22. 23. 24.]]
[[13. 14. 15.]
[18. 19. 20.]
[23. 24. 25.]]]]
i.e. mimicking a 3x3 filter sliding over the image. This is done for each batch and channel
'''
x = as_strided(x, strided_shape,
strides=(x.strides[0], Sh * x.strides[1], Sw * x.strides[2], x.strides[1], x.strides[2], x.strides[3]))
'''
Now, the filter elements in the Fh, Fw and C dimensions are multiplied component-wise with the
image 'snapshots'. This is done for all D filters separately, and then we obtain the convolved image.
The entire code is barely 6 lines long, with 3 lines merely for tuple unpacking.
'''
return np.einsum('wxyijk,ijkd->wxyd', x, f)
def conv4d_filter_gradient(x, f, dE_dy, s):
"""
Compute the gradient of the loss with respect to the convolutional filter, given the
gradient with respect to the convolutional output
:param x: Original convolutional input tensor
:param f: Convolutional filter tensor
:param dE_dy: Gradient tensor of the loss with respect to the output
:param s: Tuple of strides
:return: Gradient tensor of the loss with respect to the filter
"""
B, H, W, C = x.shape
Fh, Fw, C, D = f.shape
Sh, Sw = s
strided_shape = B, 1 + (H - Fh) // Sh, 1 + (W - Fw) // Sw, Fh, Fw, C
x = as_strided(x, strided_shape,
strides=(x.strides[0], Sh * x.strides[1], Sw * x.strides[2], x.strides[1], x.strides[2], x.strides[3]))
'''
It's a little-known fact that if:
y = einsum('abc,cde->ace', x, f) The summation can be arbitrary, but lets use this as an example
then: dy/df = einsum('abc,ace->cde', x, ones_like(y)), where all we have to do is swap the output
indices with those of the variable whose gradient we want to calculate, and replace y with a tensor
the same size as y, but containing all ones.
If you think about it, differentiation involving matrix multiplications is just a special case
of this. Anyway, we're not trying to calculate dy/df here; we're trying to find dE/df, given dE/dy.
So by the chain rule, instead of ones_like(y), we put in dE/dy, and we're done!
'''
return np.einsum('wxyijk,wxyd->ijkd', x, dE_dy)
def conv4d_input_gradient(x, f, dE_dy, s):
"""
Compute the gradient of the loss with respect to the convolutional input, given the
gradient with respect to the convolutional output
:param x: Original convolutional input tensor
:param f: Convolutional filter tensor
:param dE_dy: Gradient tensor of the loss with respect to the output
:param s: Tuple of strides
:return: Gradient tensor of the loss with respect to the input
"""
'''
Here we use the same trick as we did to calculate the filter gradient. However in this case, we don't
recover the input gradient immediately after the einsum; rather we obtain a strided version (since the
original input was strided before it went into the einsum during the forward pass convolution). So we
need to sum over all the sub-windows to recover the original shape.
'''
dE_dx_strided = np.einsum('wxyd,ijkd->wxyijk', dE_dy, f)
imax, jmax, di, dj = dE_dx_strided.shape[1:5]
Sh, Sw = s
dE_dx = np.zeros_like(x)
for i in range(0, imax):
for j in range(0, jmax):
dE_dx[:, Sh*i:Sh*i+di, Sw*j:Sw*j+dj, :] += dE_dx_strided[:, i, j, ...]
return dE_dx
class conv4d(trainable):
"""
Convolutional layer operating on 4-dimensional tensors
"""
def __init__(self, f_shape, strides=(1, 1)):
"""
:param f_shape: Tuple of (Filter height, Filter width, Number of input channels, Number of output channels)
:param strides: Number of cells by which the filter shifts,
in the image height and width dimensions, defaults to (1, 1)
"""
self.filter = np.random.randn(*f_shape)
self.strides = strides
self.set_params('f')
def __call__(self, x):
self.x = x
return conv4d_forward(x, self.filter, self.strides)
def back(self, dE_dy):
self.dE_df = conv4d_filter_gradient(self.x, self.filter, dE_dy, self.strides) / self.x.shape[0]
return conv4d_input_gradient(self.x, self.filter, dE_dy, self.strides)
def update(self, opt):
opt(self.params, [self.filter], [self.dE_df])
class batch_norm(trainable):
def __init__(self, channels):
self.gamma = np.random.randn(channels)
self.beta = np.random.randn(channels)
self.set_params('gamma', 'beta')
def __call__(self, x):
self.x = x
self.mu, self.std = np.mean(x, axis=0), np.sqrt(np.var(x, axis=0) + eps)
xnorm = (x - self.mu) / self.std
print(self.mu.shape, self.std.shape)
return self.gamma * xnorm + self.beta
def back(self, dE_dy):
dE_dxnorm = self.gamma * dE_dy
dE_dvar = np.sum(dE_dxnorm * (self.x - self.mu) * -0.5 * np.power(self.std, 3), axis=0)
dE_dmu = -np.sum(dE_dxnorm / self.std, axis=0) + dE_dvar * -2 * np.mean(self.x - self.mu, axis=0)
self.dE_dg = np.sum(dE_dy * dE_dxnorm)
self.dE_db = np.sum(dE_dy)
dE_dx = dE_dxnorm / self.std + dE_dvar * 2 * (self.x - self.mu) / dE_dy.shape[0] + dE_dmu / dE_dy.shape[0]
return dE_dx
def update(self, opt):
opt(self.params, [self.gamma, self.beta], [self.dE_dg, self.dE_db])
|
{"hexsha": "9e2ca31f600aaa1a9f4222b843a211922dd03125", "size": 8808, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/trainable.py", "max_stars_repo_name": "nkarve/twistml", "max_stars_repo_head_hexsha": "59e168c0776e9a234037a973b608450c05ffa198", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-02T16:16:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-04T12:10:08.000Z", "max_issues_repo_path": "src/trainable.py", "max_issues_repo_name": "nkarve/twistml", "max_issues_repo_head_hexsha": "59e168c0776e9a234037a973b608450c05ffa198", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/trainable.py", "max_forks_repo_name": "nkarve/twistml", "max_forks_repo_head_hexsha": "59e168c0776e9a234037a973b608450c05ffa198", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6222222222, "max_line_length": 122, "alphanum_fraction": 0.5891235241, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2670}
|
"""
Unit Tests for CorfuncCalculator class
"""
from __future__ import division, print_function
import os.path
import unittest
import time
import numpy as np
from sas.sascalc.corfunc.corfunc_calculator import CorfuncCalculator
from sas.sascalc.dataloader.data_info import Data1D
def find(filename):
return os.path.join(os.path.dirname(__file__), filename)
class TestCalculator(unittest.TestCase):
def setUp(self):
self.data = load_data()
# Note: to generate target values from the GUI:
# * load the data from test/corfunc/test/98929.txt
# * set qrange to (0, 0.013), (0.15, 0.24)
# * select fourier transform type
# * click Calculate Bg
# * click Extrapolate
# * click Compute Parameters
# * copy the Guinier and Porod values to the extrapolate function
# * for each graph, grab the data from DataInfo and store it in _out.txt
self.calculator = CorfuncCalculator(data=self.data, lowerq=0.013,
upperq=(0.15, 0.24))
self.calculator.background = 0.3
self.extrapolation = None
self.transformation = None
self.results = [np.loadtxt(find(filename+"_out.txt")).T[2]
for filename in ("gamma1", "gamma3", "idf")]
def extrapolate(self):
params, extrapolation, s2 = self.calculator.compute_extrapolation()
# Check the extrapolation parameters
self.assertAlmostEqual(params['A'], 4.18970, places=5)
self.assertAlmostEqual(params['B'], -25469.9, places=1)
self.assertAlmostEqual(params['K'], 4.44660e-5, places=10)
#self.assertAlmostEqual(params['sigma'], 1.70181e-10, places=15)
# Ensure the extraplation tends to the background value
self.assertAlmostEqual(extrapolation.y[-1], self.calculator.background)
# Test extrapolation for q values between 0.02 and 0.24
mask = np.logical_and(self.data.x > 0.02, self.data.x < 0.24)
qs = self.data.x[mask]
iqs = self.data.y[mask]
for q, iq in zip(qs, iqs):
# Find the q value in the extraplation nearest to the value in
# the data
q_extrap = min(extrapolation.x, key=lambda x:abs(x-q))
# Find the index of this value in the extrapolation
index = list(extrapolation.x).index(q_extrap)
# Find it's corresponding intensity value
iq_extrap = extrapolation.y[index]
# Check the extrapolation agrees to the data at this point to 1 d.p
self.assertAlmostEqual(iq_extrap, iq, 1)
self.extrapolation = extrapolation
def transform(self):
self.calculator.compute_transform(self.extrapolation, 'fourier',
completefn=self.transform_callback)
# Transform is performed asynchronously; give it time to run
while True:
time.sleep(0.001)
if (not self.calculator.transform_isrunning() and
self.transformation is not None):
break
transform1, transform3, idf = self.transformation
self.assertIsNotNone(transform1)
self.assertAlmostEqual(transform1.y[0], 1)
self.assertAlmostEqual(transform1.y[-1], 0, 5)
def transform_callback(self, transforms):
self.transformation = transforms
def extract_params(self):
params = self.calculator.extract_parameters(self.transformation[0])
self.assertIsNotNone(params)
self.assertEqual(len(params), 6)
self.assertLess(abs(params['max']-75), 2.5) # L_p ~= 75
def check_transforms(self):
gamma1, gamma3, idf = self.transformation
gamma1_out, gamma3_out, idf_out = self.results
def compare(a, b):
return max(abs((a-b)/b))
#print("gamma1 diff", compare(gamma1.y[gamma1.x<=200.], gamma1_out))
#print("gamma3 diff", compare(gamma3.y[gamma3.x<=200.], gamma3_out))
#print("idf diff", compare(idf.y[idf.x<=200.], idf_out))
#self.assertLess(compare(gamma1.y[gamma1.x<=200.], gamma1_out), 1e-10)
#self.assertLess(compare(gamma3.y[gamma3.x<=200.], gamma3_out), 1e-10)
#self.assertLess(compare(idf.y[idf.x<=200.], idf_out), 1e-10)
# Ensure tests are ran in correct order;
# Each test depends on the one before it
def test_calculator(self):
steps = [self.extrapolate, self.transform, self.extract_params, self.check_transforms]
for test in steps:
try:
test()
except Exception as e:
raise
self.fail("{} failed ({}: {})".format(test, type(e), e))
def load_data(filename="98929.txt"):
data = np.loadtxt(find(filename), dtype=np.float64)
q = data[:,0]
iq = data[:,1]
return Data1D(x=q, y=iq)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "3bb3dd9890b61d20333bf35eef274c9767ce6221", "size": 4850, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/corfunc/test/utest_corfunc.py", "max_stars_repo_name": "opendatafit/sasview", "max_stars_repo_head_hexsha": "c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/corfunc/test/utest_corfunc.py", "max_issues_repo_name": "opendatafit/sasview", "max_issues_repo_head_hexsha": "c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-20T13:20:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T13:20:35.000Z", "max_forks_repo_path": "test/corfunc/test/utest_corfunc.py", "max_forks_repo_name": "opendatafit/sasview", "max_forks_repo_head_hexsha": "c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4920634921, "max_line_length": 94, "alphanum_fraction": 0.635257732, "include": true, "reason": "import numpy", "num_tokens": 1214}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class PRUNE(nn.Module):
def __init__(self, nodeCount, n_latent=128, n_emb=128, n_prox=64):
super(PRUNE, self).__init__()
'''
Parameters
----------
n_latent : hidden layer dimension
n_emb : node embedding dimension
n_prox : proximity representation dimension
'''
# Embedding
self.node_emb = nn.Embedding(nodeCount, n_emb)
# W_shared
w_init = np.identity(n_prox) + abs(np.random.randn(n_prox, n_prox) / 1000.0)
self.w_shared = torch.from_numpy(w_init).float()
if torch.cuda.is_available():
self.w_shared = self.w_shared.cuda()
# global node ranking score
self.rank = nn.Sequential(
self.node_emb,
nn.Linear(n_latent, n_latent),
nn.ELU(),
nn.Linear(n_latent, 1),
nn.Softplus()
)
# proximity representation
self.prox = nn.Sequential(
self.node_emb,
nn.Linear(n_latent, n_latent),
nn.ELU(),
nn.Linear(n_latent, n_prox),
nn.ReLU()
)
self.init_weight()
def init_weight(self):
torch.nn.init.xavier_normal_(self.node_emb.weight)
torch.nn.init.xavier_normal_(self.w_shared)
for layer in self.rank:
if isinstance(layer, nn.Linear):
torch.nn.init.xavier_normal_(layer.weight)
for layer in self.prox:
if isinstance(layer, nn.Linear):
torch.nn.init.xavier_normal_(layer.weight)
def forward(self, head, tail, pmi, indeg, outdeg, lamb=0.01):
head_rank = self.rank(head)
head_prox = self.prox(head)
tail_rank = self.rank(tail)
tail_prox = self.prox(tail)
# preserving proximity
w = F.relu(self.w_shared)
zWz = (head_prox * torch.matmul(tail_prox, w)).sum(1)
prox_loss = ((zWz - pmi)**2).mean()
# preserving global ranking
rank_loss = indeg * (-tail_rank / indeg + head_rank / outdeg).pow(2)
rank_loss = rank_loss.mean()
total_loss = prox_loss + lamb * rank_loss
return total_loss
|
{"hexsha": "6ec27e001edbab5e005f2f24654bdb475c43e1b9", "size": 2386, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model.py", "max_stars_repo_name": "j40903272/PRUNE-pytorch", "max_stars_repo_head_hexsha": "2494adbda12a0bec8fbf69252d730d32d0f57996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-08-20T15:36:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-19T18:28:34.000Z", "max_issues_repo_path": "src/model.py", "max_issues_repo_name": "j40903272/PRUNE-pytorch", "max_issues_repo_head_hexsha": "2494adbda12a0bec8fbf69252d730d32d0f57996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model.py", "max_forks_repo_name": "j40903272/PRUNE-pytorch", "max_forks_repo_head_hexsha": "2494adbda12a0bec8fbf69252d730d32d0f57996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8133333333, "max_line_length": 84, "alphanum_fraction": 0.564124057, "include": true, "reason": "import numpy", "num_tokens": 566}
|
//
// get_local_deleter_test2.cpp
//
// Copyright 2002, 2017 Peter Dimov
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/config.hpp>
#if defined( BOOST_NO_CXX11_RVALUE_REFERENCES ) || defined( BOOST_NO_CXX11_VARIADIC_TEMPLATES )
int main()
{
}
#else
#include <boost/smart_ptr/local_shared_ptr.hpp>
#include <boost/smart_ptr/make_local_shared.hpp>
#include <boost/core/lightweight_test.hpp>
struct deleter
{
};
struct deleter2;
struct X
{
};
int main()
{
{
boost::local_shared_ptr<X[]> p = boost::make_local_shared<X[]>( 1 );
BOOST_TEST(boost::get_deleter<void>(p) == 0);
BOOST_TEST(boost::get_deleter<void const>(p) == 0);
BOOST_TEST(boost::get_deleter<int>(p) == 0);
BOOST_TEST(boost::get_deleter<int const>(p) == 0);
BOOST_TEST(boost::get_deleter<X>(p) == 0);
BOOST_TEST(boost::get_deleter<X const>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter const>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter2>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter2 const>(p) == 0);
}
{
boost::local_shared_ptr<X[1]> p = boost::make_local_shared<X[1]>();
BOOST_TEST(boost::get_deleter<void>(p) == 0);
BOOST_TEST(boost::get_deleter<void const>(p) == 0);
BOOST_TEST(boost::get_deleter<int>(p) == 0);
BOOST_TEST(boost::get_deleter<int const>(p) == 0);
BOOST_TEST(boost::get_deleter<X>(p) == 0);
BOOST_TEST(boost::get_deleter<X const>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter const>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter2>(p) == 0);
BOOST_TEST(boost::get_deleter<deleter2 const>(p) == 0);
}
return boost::report_errors();
}
struct deleter2
{
};
#endif
|
{"hexsha": "999fffe7465767e672985661f2e35dfeafef19bd", "size": 1993, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdParty/boost/1.71.0/libs/smart_ptr/test/get_local_deleter_array_test2.cpp", "max_stars_repo_name": "rajeev02101987/arangodb", "max_stars_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "3rdParty/boost/1.71.0/libs/smart_ptr/test/get_local_deleter_array_test2.cpp", "max_issues_repo_name": "rajeev02101987/arangodb", "max_issues_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "3rdParty/boost/1.71.0/libs/smart_ptr/test/get_local_deleter_array_test2.cpp", "max_forks_repo_name": "rajeev02101987/arangodb", "max_forks_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 26.5733333333, "max_line_length": 95, "alphanum_fraction": 0.6427496237, "num_tokens": 609}
|
import os, sys
import numpy as np
def norm(v):
'''Normalizes a given vector and returns the normalized vector.
==========
Parameters
v: [array of floats] vector to be normalized
==========
'''
# numerical instability
# if ray goes through exact center, offset by small amount
# to avoid division by zero error in vnorm
if(v[0]==0. and v[1]==0. and v[2]==0.):
v=v+1.e-10
vmag = np.sqrt(sum(v[i]*v[i] for i in range(len(v))))
vnorm = np.array([v[i]/vmag for i in range(len(v))])
return vnorm
def sphere_hit(p0, d0, c0, r):
'''Uses a point in 3D space, direction vector, sphere center, and sphere radius to determine if the ray vector will intersect with the sphere. Returns minimum value t for the vector-sphere intersection, indicating that the vector hits the sphere closest to the initial point p0. If the ray vector does not intersect with a sphere then value of t = -1 is returned.
==========
Parameters
p0: [array of floats] initial point in 3D space [x,y,z]
d0: [array of floats] initial direction vector in 3D space [x,y,z]
c0: [array of floats] center of sphere in 3D space [x,y,z]
r: [float] radius of sphere for determining vector-sphere intersection
==========
'''
# from page: http://www.cs.umbc.edu/~olano/435f02/ray-sphere.html
# routine finds intersection of sphere-ray using determinant
t0 = 0.
t1 = 0.
t2 = 0.
a = np.dot(d0,d0)
b = 2.*np.dot(d0,(p0-c0))
c = np.dot((p0-c0),(p0-c0)) - r**2.
det = b**2. - 4.*a*c
# if det is negative, no hit
# if det is positive, ray hits sphere
# if positive, return lowest positive value of t
# if negative, return t = -1 to show that ray did not intersect
if det < 0.:
return -1.
if det == 0.:
t0 = -b/(2.*a)
if t0 >= 0.:
return t0
else:
return -1.
if det > 0.:
t1 = (-b + np.sqrt(det))/(2.*a)
t2 = (-b - np.sqrt(det))/(2.*a)
# numerical instability - in case we are really close to
# a given sphere, but don't actually hit, we treat this as a non-hitting ray
# and set it to -1 to prevent false images coming through
if t1 < 1.e-10:
t1 = -1.
if t2 < 1.e-10:
t2 = -1.
if t1 < 0. and t2 < 0.:
return -1.
if (t1*t2) < 0.:
return max(t1,t2)
else:
return min(t1,t2)
def snell_vec(nv,lv,n1,n2):
'''Uses the normal vector, light ray direction vector, and two indices of refraction from either side of a given sphere radius to calculate the direction and amplitude of a reflected and refracted ray. Returns the direction vectors of the reflected and refracted rays, and the amplitude of the reflected ray.
CAUTION: When dot product of direction vector and normal vector goes negative, routine fails and calculates a negative cth1 which produces false values for reflection coefficient
==========
Parameters
nn: [array of floats] direction vector for the normal plane [x,y,z]
nl: [array of floats] direction vector for the light ray [x,y,z]
n1: [float] index of refraction on incoming ray side of interface
n2: [float] index of refraction on refracted ray side of interface
==========
'''
#first, calculate reflected ray
n = norm(nv) #normal vector normalized
l = norm(lv) #ray direction vector normalized
nr = n1/n2
cth1 = np.dot(n,(-l))
v_reflect = l + (2.*cth1*n)
cth2_sq = 1. - nr*nr*(1.-(cth1*cth1))
#this is cosine theta2 squared, a test for total internal reflection
if(cth2_sq > 0.):
cth2 = np.sqrt(cth2_sq)
#we can safely find the square root of cth2 since cth2_sq is positive
#calculate reflection coefficients for reflected ray (2 polarizations)
rs = (n1*cth1-n2*cth2)/(n1*cth1+n2*cth2)
rp = (n1*cth2-n2*cth1)/(n1*cth2+n2*cth1)
Rs = rs*rs
Rp = rp*rp
R = (Rs+Rp)/2. #mean reflected energy
#calculate the refracted ray
v_refract = nr*l + (nr*cth1-cth2)*n
return(v_reflect, v_refract, R, Rs, Rp)
else:
#if cth2_sq is less than zero, we have total internal reflection
print "Warning: total internal reflection of ray"
v_null = np.array([0.,0.,0.])
return(v_reflect,v_null,1.0,1.0,1.0)
|
{"hexsha": "22ed29ef062b7ae83ed50c472a5114acadacb6a9", "size": 4549, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/lens_ray_tracing.py", "max_stars_repo_name": "masoncarney/stepped_luneburg", "max_stars_repo_head_hexsha": "728323f6331cc90cf8d97e9702a958ef553a284e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-10-05T12:48:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-25T14:41:48.000Z", "max_issues_repo_path": "model/lens_ray_tracing.py", "max_issues_repo_name": "masoncarney/stepped_luneburg", "max_issues_repo_head_hexsha": "728323f6331cc90cf8d97e9702a958ef553a284e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/lens_ray_tracing.py", "max_forks_repo_name": "masoncarney/stepped_luneburg", "max_forks_repo_head_hexsha": "728323f6331cc90cf8d97e9702a958ef553a284e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.204379562, "max_line_length": 367, "alphanum_fraction": 0.5944163552, "include": true, "reason": "import numpy", "num_tokens": 1302}
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers import SimpleRNN, Dense, Activation
# call load_data with allow_pickle implicitly set to true
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(path = "imdb.npz",
num_words= None,
skip_top = 0,
maxlen = None,
seed = 113,
start_char = 1,
oov_char = 2,
index_from = 3)
print("Type: ", type(X_train)) #1
print("Type: ", type(Y_train)) #1
print("X train shape: ",X_train.shape) #1
print("Y train shape: ",Y_train.shape) #1
# %% Eda
print("Y train values: ",np.unique(Y_train)) #2 Accuracy değerleri belirleme
print("Y test values: ",np.unique(Y_test)) #2
unique, counts = np.unique(Y_train, return_counts = True) # #Y train distribution: {0: 12500, 1: 12500} %100 dengeli bir veriseti
print("Y train distribution: ", dict(zip(unique,counts))) #3
unique, counts = np.unique(Y_test, return_counts = True)
print("Y testdistribution: ",dict(zip(unique,counts)))
#visualization
plt.figure() #4
sns.countplot(Y_train)
plt.xlabel("Classes")
plt.ylabel("Freg")
plt.title("Y train")
plt.figure() #4
sns.countplot(Y_test)
plt.xlabel("Classes")
plt.ylabel("Freg")
plt.title("Y test")
#kelime sayısı ve bunların dağılımlarını kontrol etme
d = X_train[0] #5
print(d)
print(len(d))
review_len_train = []
review_len_test = []
for i,ii in zip(X_train,X_test):
review_len_train.append(len(i))
review_len_test.append(len(ii))
sns.distplot(review_len_train, hist_kws = {"alpha":0.3}) #6
sns.distplot(review_len_test, hist_kws = {"alpha":0.3}) #6
print("Train mean:", np.mean(review_len_train))
print("Train median:", np.median(review_len_train))
print("Train mode:", stats.mode(review_len_train))
#word numbers
word_index = imdb.get_word_index() #7
print(type(word_index))
print(len(word_index))
#verilen sayıya göre hangi kelime olduğunu
for keys, values in word_index.items(): #8
if values == 11111:
print(keys)
#reviewlari text haline döndürme işlemi
def whatItSay(index = 24): #9
reverse_index = dict([(value,key) for (key, value) in word_index.items()])
decode_review = " ".join([reverse_index.get(i - 3,"!") for i in X_train[index]])
print(decode_review)
print(Y_train[index])
return decode_review
decoded_review = whatItSay(31)
# %% Preprocess
#call load_data
num_words = 15000 #10
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words = num_words)
maxlen = 150 #şimdilik 150 ile sınırlandırdık
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)
print(X_train[4])
for i in X_train[0:10]:
print(len(i))
decoded_review = whatItSay(2)
# %% RNN
rnn = Sequential()
rnn.add(Embedding(num_words,32,input_length = len(X_train[0])))
rnn.add(SimpleRNN(16, input_shape = (num_words, maxlen), return_sequences = False, activation = "relu" )) #Sequential yapıma simpler Rnn eklendi
rnn.add(Dense(1))
rnn.add(Activation("sigmoid")) #act.fon. sigmoid binary classnification
print(rnn.summary())
rnn.compile(loss="binary_crossentropy",optimizer="rmsprop",metrics=["accuracy"])
history = rnn.fit(X_train, Y_train, validation_data= (X_test, Y_test), epochs=5, batch_size= 128, verbose=1) #Eğit
score = rnn.evaluate(X_test, Y_test) #accuracy/doğruluk hesabı
print("Accuracy: %",score[1]*100)
#data görselleştirme
plt.figure()
plt.plot(history.history["accuracy"], label = "Train")
plt.plot(history.history["val_accuracy"], label = "Test")
plt.title("Acc")
plt.ylabel("Acc")
plt.xlabel("Epochs")
plt.legend()
plt.show()
plt.figure()
plt.plot(history.history["loss"], label = "Train")
plt.plot(history.history["val_loss"], label = "Test")
plt.title("Loss")
plt.ylabel("Loss")
plt.xlabel("Epochs")
plt.legend()
plt.show()
|
{"hexsha": "025cccd9d356751a7a8c5d7d9af450a17402282f", "size": 4550, "ext": "py", "lang": "Python", "max_stars_repo_path": "temp.py", "max_stars_repo_name": "bedirhanbuyukoz/IMDB-Sentiment-Analysis---Machine-Learning-RNN", "max_stars_repo_head_hexsha": "f6d9d0e4e19c8cf69b3a422ae52e4aaca1ed85e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "temp.py", "max_issues_repo_name": "bedirhanbuyukoz/IMDB-Sentiment-Analysis---Machine-Learning-RNN", "max_issues_repo_head_hexsha": "f6d9d0e4e19c8cf69b3a422ae52e4aaca1ed85e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "temp.py", "max_forks_repo_name": "bedirhanbuyukoz/IMDB-Sentiment-Analysis---Machine-Learning-RNN", "max_forks_repo_head_hexsha": "f6d9d0e4e19c8cf69b3a422ae52e4aaca1ed85e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5945945946, "max_line_length": 145, "alphanum_fraction": 0.6254945055, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1142}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 02:57:47 2015
1.
This script plots co-evolution history of galaxy stellar mass growth and
lambda_r evolution. Considering abrupt stellar mass growth as a consequence of
galaxy merger, the role of merger history on lambda_r might be understood.
2.
This script loads lambda_mp.py output catalogs (.pickle).
3.
For the moment, only galaxies with full halo merger history are stored in catalogs.
2015/08/18
Multiple yaxis
@author: hoseung
"""
import pickle
import numpy as np
wdir = '/home/hoseung/Work/data/05427/'
nout_fi = 187
nout_ini = 37
nouts = np.arange(nout_ini, nout_fi + 1)
nnouts = len(nouts)
def load_pickle(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
# Final galaxies
# Why set..?
#final_gals = set()
for inout, nout in enumerate(reversed(np.arange(nout_ini, nout_fi + 1))):
cat = load_pickle(wdir + 'catalog/' + 'catalog' + str(nout) + '.pickle')
#final_gals.update(cat['final'])
final_gals = cat['final_gal']
ngals = len(final_gals)
mstar = np.zeros((ngals, nnouts))
l_r = np.zeros((ngals, nnouts))
reff = np.zeros((ngals, nnouts))
fg = np.zeros((ngals, nnouts), dtype=int)
print(ngals)
final_gals = np.sort(list(final_gals))
#%%#######################################################################
# Read catalogs and extract mstar and lambda-r of galaxies at all nouts.
for inout, nout in enumerate(reversed(np.arange(nout_ini, nout_fi + 1))):
cat = load_pickle(wdir + 'catalog/' + 'catalog' + str(nout) + '.pickle')
for igal, idgal in enumerate(cat['final_gal']):
#ind = which galaxy..? -> need tree. (save the ID of final halo too)
ind = np.where(idgal == final_gals)[0]
# print(ind, final_gals[ind], inout, nout, idgal, cat['id'][igal])
if len(ind) > 0 :
fg[ind,inout] = final_gals[ind]
mstar[ind,inout] = cat['mstar'][igal]
l_r[ind,inout] = cat['lambda_r'][igal]
reff[ind,inout] = cat['rgal'][igal]
#%%
zreds=[]
aexps=[]
import load
for nout in nouts:
info = load.info.Info(nout=nout, base=wdir, load=True)
aexps.append(info.aexp)
zreds.append(info.zred)
aexps = np.array(aexps)
zreds = np.array(zreds)
#%%
def aexp2zred(aexp):
return [1.0/a - 1.0 for a in aexp]
def zred2aexp(zred):
return [1.0/(1.0 + z) for z in zred]
def lbt2aexp(lts):
import astropy.units as u
from astropy.cosmology import WMAP7, z_at_value
zreds = [z_at_value(WMAP7.lookback_time, ll * u.Gyr) for ll in lts]
return [1.0/(1+z) for z in zreds]
# For a given list of nouts,
# calculate a nice-looking set of zreds.
# AND lookback times
z_targets=[0, 0.2, 0.5, 1, 2, 3]
z_target_str=["{:.2f}".format(z) for z in z_targets]
a_targets_z = zred2aexp(z_targets)
z_pos = [nout_ini + (1 - (max(aexps) - a)/aexps.ptp()) * nnouts for a in a_targets_z]
lbt_targets=[0.00001,1,3,5,8,12]
lbt_target_str=["{:.0f}".format(l) for l in lbt_targets]
a_targets_lbt = lbt2aexp(lbt_targets)
lbt_pos = [nout_ini + (1 - (max(aexps) - a)/aexps.ptp()) * nnouts for a in a_targets_lbt]
#from astropy.cosmology import WMAP7 as cosmo
#lookback_t=[cosmo.lookback_time(i).value for i in zreds]
#%%
import matplotlib.pyplot as plt
# plot each galaxy.
# stellar mass growth and lambda_r as a function of time.
# The exponent (also called ass "offset") in the figure (1e11)
# overlaps with lookback time tick labels.
# And moving the offset around is not easy.
# So, manually divide the values.
mm = mstar/1e10
plt.close()
plt.ioff()
def make_patch_spines_invisible(ax):
"""
Useful for plotting multiple variables (more than two twinx())
"""
ax.set_frame_on(True)
ax.patch.set_visible(False)
#for sp in ax.spines.itervalues():
# Changed in Python3
for sp in ax.spines.values():
sp.set_visible(False)
for i, idgal in enumerate(cat['final_gal']):
#for i, idgal in enumerate([1618]):
if mm[i][0] < 0.2 :
print(idgal, mm[i][0], mm[i][-1])
continue
print(idgal, "!!!!")
plt.rcParams["figure.figsize"] = [12,10]
fig, axes = plt.subplots(3)
# plt.figure(num=1, figsize=[10,20])
fig.suptitle("ID: " + str(idgal).zfill(5), fontsize=18)#, y=1.01)
lns1 = axes[0].plot(nouts[::-1], l_r[i], label=r"$\lambda_{R}$")
axes[0].set_xticks(z_pos)
axes[0].set_xticklabels(z_target_str)
plt.subplots_adjust(left = 0.1, right = 0.9, \
wspace = 0.1, hspace = 0.0, \
bottom = 0.1, top = 0.85)
axes[0].set_xlim([37,187])
axes[0].set_ylim([0,1.0])
axes[0].set_ylabel(r"$\lambda_{R}$")
axes[0].set_xlabel("redshift")
# ax2 = axes[0].twinx()
lns2 = axes[1].plot(nouts[::-1], mm[i], 'r-', label="stellar mass")
axes[1].set_ylim([0, 1.3*max(mm[i])])
axes[1].set_xlim([37,187])
axes[1].set_ylabel(r"Stellar mass $[10^{10}M_{\odot}]$")
axes[1].get_yaxis().get_offset_text().set_y(1)
# ax3 = ax1.twinx() # Reff
# ax3.spines["right"].set_position(("axes", 1.2))
# make_patch_spines_invisible(ax3)
# Second, show the right spine.
# ax3.spines["right"].set_visible(True)
axes[2].set_ylabel("Reff [kpc]")
axes[2].set_xlim([37,187])
lns3 = axes[2].plot(nouts[::-1], reff[i], 'g-', label='Reff')
# hide x axes so that subplots stick together.
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
ax4 = axes[0].twiny()
ax4.set_xlabel("Lookback time", labelpad=10)
ax4.set_xticks(lbt_pos)
ax4.set_xticklabels(lbt_target_str)
lns = lns1+lns2+lns3
labs = [l.get_label() for l in lns]
axes[0].legend(lns, labs, loc=0)
# logend location codes:
# 0 ~ 10
# best, ur, ul, lr, ll, r, cl, cr, lower c, upper c, center
#
# plt.show()
plt.savefig(wdir + 'catalog/' + str(idgal).zfill(5) + '.png')
plt.close()
|
{"hexsha": "467c5b7b1c6cebd120ca74300a09f276dbb5b210", "size": 5900, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/Rotation/plot_stellarmassgrowth_lambda_growth.py", "max_stars_repo_name": "Hoseung/pyRamAn", "max_stars_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-25T16:11:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T16:11:56.000Z", "max_issues_repo_path": "scripts/Rotation/plot_stellarmassgrowth_lambda_growth.py", "max_issues_repo_name": "Hoseung/pyRamAn", "max_issues_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-02-17T13:44:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-25T15:35:05.000Z", "max_forks_repo_path": "scripts/Rotation/plot_stellarmassgrowth_lambda_growth.py", "max_forks_repo_name": "Hoseung/pyRamAn", "max_forks_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-25T16:11:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-25T16:11:56.000Z", "avg_line_length": 31.0526315789, "max_line_length": 89, "alphanum_fraction": 0.6294915254, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 1911}
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
os.chdir('../Datos//')
datos = pd.read_csv('Drug5.csv')
barras = pd.value_counts(datos['Drug'])
plt.figure()
N=len(barras)
plt.bar(np.arange(N), barras) # Gráfico de barras
plt.title('Drug') # Colocamos el título
plt.ylabel('Frecuencia')
# Colocamos las etiquetas del eje x
plt.xticks(np.arange(N), barras.index)
|
{"hexsha": "5dba0058eb6328d82286d41ce291cb4fad71cf4d", "size": 402, "ext": "py", "lang": "Python", "max_stars_repo_path": "01_Diag_Barras.py", "max_stars_repo_name": "JosefinaMedina/Deep-Learning-2021-P1", "max_stars_repo_head_hexsha": "6abd77a55f065e0072a2923f0e395cb2cec11fcc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "01_Diag_Barras.py", "max_issues_repo_name": "JosefinaMedina/Deep-Learning-2021-P1", "max_issues_repo_head_hexsha": "6abd77a55f065e0072a2923f0e395cb2cec11fcc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "01_Diag_Barras.py", "max_forks_repo_name": "JosefinaMedina/Deep-Learning-2021-P1", "max_forks_repo_head_hexsha": "6abd77a55f065e0072a2923f0e395cb2cec11fcc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6470588235, "max_line_length": 50, "alphanum_fraction": 0.7213930348, "include": true, "reason": "import numpy", "num_tokens": 114}
|
import sys
import gym
import numpy as np
from collections import defaultdict, deque
import matplotlib.pyplot as plt
import check_test
from plot_utils import plot_values
env = gym.make('CliffWalking-v0')
print(env.action_space)
print(env.observation_space)
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0:13][0] = -np.arange(3, 15)[::-1]
V_opt[0:13][1] = -np.arange(3, 15)[::-1] + 1
V_opt[0:13][2] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
def eps_greedy_act(p_q_state, p_env, p_eps):
greed = np.random.choice(np.arange(2), p=[p_eps, 1-p_eps])
if greed:
action = np.argmax(p_q_state)
else:
action = np.random.randint(0, p_env.action_space.n-1)
return action
def sarsa(env, num_episodes, alpha, gamma=1.0):
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# initialize performance monitor
# loop over episodes
for i_episode in range(1, num_episodes + 1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q[key]) if key in Q else 0 for key in np.arange(48)])
plot_values(V_sarsa)
s0 = env.reset()
"""
Q[s0, a0] = (1-alpha) * Q[s0, a0] + alpha * (r + gamma * Q[s1,a1])
"""
a0 = eps_greedy_act(Q[s0], env, 1.0/i_episode)
for i in range(1000):
[s1, r, done, info] = env.step(a0)
if not done:
a1 = eps_greedy_act(Q[s1], env, 1.0/i_episode)
Q[s0][a0] = (1 - alpha) * Q[s0][a0] + alpha * (r + gamma * Q[s1][a1])
else:
Q[s0][a0] = (1 - alpha) * Q[s0][a0] + alpha * r
break
a0 = a1
s0 = s1
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 300, .1)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
def q_learning(env, num_episodes, alpha, gamma=1.0):
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(env.nA))
# loop over episodes
for i_episode in range(1, num_episodes + 1):
# monitor progress
if i_episode % 20 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# plot the estimated optimal state-value function
#V_q = ([np.max(Q[key]) if key in Q else 0 for key in np.arange(48)])
#plot_values(V_q)
s0 = env.reset()
for i in range(1000):
a = eps_greedy_act(Q[s0], env, 1.0 / i_episode)
[s1, r, done, info]=env.step(a)
if not done:
a_max = eps_greedy_act(Q[s1],env, 0)
Q[s0][a] = (1 - alpha) * Q[s0][a] + alpha * (r + gamma * Q[s1][a_max])
else:
Q[s0][a] = (1 - alpha) * Q[s0][a] + alpha * r
break
s0 = s1
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 100, .1)
# print the estimated optimal policy
policy_sarsamax = np.array(
[np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4, 12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
def eps_greedy_p(p_q_state, p_env, p_eps):
p= np.ones(p_env.action_space.n)*p_eps/p_env.action_space.n
p[np.argmax(p_q_state)] += 1-p_eps
return p
def expected_sarsa(env, num_episodes, alpha, gamma=1.0):
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(env.nA))
# loop over episodes
for i_episode in range(1, num_episodes + 1):
# monitor progress
if i_episode % 20 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# plot the estimated optimal state-value function
#V_q = ([np.max(Q[key]) if key in Q else 0 for key in np.arange(48)])
#plot_values(V_q)
s0 = env.reset()
for i in range(1000):
a = eps_greedy_act(Q[s0], env, 1.0 / i_episode)
[s1, r, done, info]=env.step(a)
if not done:
p = eps_greedy_p(Q[s1],env, 1.0 / i_episode)
Q[s0][a] = (1 - alpha) * Q[s0][a] + alpha * (r + gamma * np.sum(Q[s1]*p))
else:
Q[s0][a] = (1 - alpha) * Q[s0][a] + alpha * r
break
s0 = s1
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expected_sarsa(env, 300, 0.1)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
|
{"hexsha": "85a46f2fa41d628290fc6c4afe190d82b035ffd2", "size": 5955, "ext": "py", "lang": "Python", "max_stars_repo_path": "temporal-difference/temporal_difference.py", "max_stars_repo_name": "csggnn/deep-reinforcement-learning", "max_stars_repo_head_hexsha": "73795e831832590d252dd57b95b877715e84e2fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-12-30T14:03:40.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-30T14:03:40.000Z", "max_issues_repo_path": "temporal-difference/temporal_difference.py", "max_issues_repo_name": "csggnn/deep-reinforcement-learning", "max_issues_repo_head_hexsha": "73795e831832590d252dd57b95b877715e84e2fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "temporal-difference/temporal_difference.py", "max_forks_repo_name": "csggnn/deep-reinforcement-learning", "max_forks_repo_head_hexsha": "73795e831832590d252dd57b95b877715e84e2fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0909090909, "max_line_length": 124, "alphanum_fraction": 0.6082283795, "include": true, "reason": "import numpy", "num_tokens": 1772}
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* Yul code and data object container.
*/
#include <libyul/Object.h>
#include <libyul/AsmPrinter.h>
#include <libyul/Exceptions.h>
#include <libsolutil/Visitor.h>
#include <libsolutil/CommonData.h>
#include <boost/algorithm/string/replace.hpp>
using namespace std;
using namespace solidity;
using namespace solidity::yul;
using namespace solidity::util;
namespace
{
string indent(std::string const& _input)
{
if (_input.empty())
return _input;
return boost::replace_all_copy(" " + _input, "\n", "\n ");
}
}
string Data::toString(Dialect const*) const
{
return "data \"" + name.str() + "\" hex\"" + util::toHex(data) + "\"";
}
string Object::toString(Dialect const* _dialect) const
{
yulAssert(code, "No code");
string inner = "code " + (_dialect ? AsmPrinter{*_dialect} : AsmPrinter{})(*code);
for (auto const& obj: subObjects)
inner += "\n" + obj->toString(_dialect);
return "object \"" + name.str() + "\" {\n" + indent(inner) + "\n}";
}
set<YulString> Object::dataNames() const
{
set<YulString> names;
names.insert(name);
for (auto const& subObject: subIndexByName)
names.insert(subObject.first);
// The empty name is not valid
names.erase(YulString{});
return names;
}
|
{"hexsha": "13ad9f8caf4862eef72d9e1e01c3f224712bada8", "size": 1864, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libyul/Object.cpp", "max_stars_repo_name": "MrChico/solidity", "max_stars_repo_head_hexsha": "5b4ea1eb895d5edc9a24ee5c6f96d8580eceec08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libyul/Object.cpp", "max_issues_repo_name": "MrChico/solidity", "max_issues_repo_head_hexsha": "5b4ea1eb895d5edc9a24ee5c6f96d8580eceec08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-06-17T14:24:49.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-17T14:24:49.000Z", "max_forks_repo_path": "libyul/Object.cpp", "max_forks_repo_name": "step21/solidity", "max_forks_repo_head_hexsha": "2a0d701f709673162e8417d2f388b8171a34e892", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1891891892, "max_line_length": 83, "alphanum_fraction": 0.7103004292, "num_tokens": 474}
|
"""Tests coreml.data.data_module.py"""
from os.path import join, exists
import multiprocessing as mp
import torch
import numpy as np
import unittest
from coreml.config import DATA_ROOT
from coreml.data.data_module import DataModule
class DataModuleTestCase(unittest.TestCase):
"""Class to check the creation of DataModule"""
@classmethod
def setUpClass(cls):
if not exists(join(DATA_ROOT, 'CIFAR10')):
subprocess.call(
'python /workspace/coreml/coreml/data/process/CIFAR10.py',
shell=True)
def test_classification_data_module(self):
"""Test get_dataloader for classification"""
cfg = {
'root': DATA_ROOT,
'data_type': 'image',
'dataset': {
'name': 'classification_dataset',
'params': {
'test': {
'fraction': 0.1
}
},
'config': [
{
'name': 'CIFAR10',
'version': 'default',
'mode': 'test'
}
]
},
'target_transform': {
'name': 'classification',
'params': {
'classes': [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9
]
}
},
'signal_transform': {
'train': [
{
'name': 'Permute',
'params': {
'order': [2, 0, 1]
}
},
{
'name': 'Resize',
'params': {
'size': [30, 30]
}
}
],
'val': [
{
'name': 'Permute',
'params': {
'order': [2, 0, 1]
}
},
{
'name': 'Resize',
'params': {
'size': [30, 30]
}
}
],
'test': [
{
'name': 'Permute',
'params': {
'order': [2, 0, 1]
}
},
{
'name': 'Resize',
'params': {
'size': [30, 30]
}
}
]
},
'sampler': {
'train': {
'name': 'default'
},
'val': {
'name': 'default'
},
'test': {
'name': 'default'
}
},
'collate_fn': {
'name': 'classification_collate'
}
}
batch_size = 8
data_module = DataModule(cfg, batch_size, mp.cpu_count())
train_dataloader = data_module.train_dataloader()
batch = next(iter(train_dataloader))
signals, labels = batch['signals'], batch['labels']
self.assertTrue(signals.shape, (batch_size, 3, 30, 30))
val_dataloader = data_module.val_dataloader()
batch = next(iter(val_dataloader))
signals, labels = batch['signals'], batch['labels']
self.assertTrue(signals.shape, (batch_size, 3, 30, 30))
test_dataloader = data_module.test_dataloader()
batch = next(iter(test_dataloader))
signals, labels = batch['signals'], batch['labels']
self.assertTrue(signals.shape, (batch_size, 3, 30, 30))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "1ef73a5f879d2187962a8a9a5113c204972ca597", "size": 3985, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/data/test_data_module.py", "max_stars_repo_name": "core-ml/coreml", "max_stars_repo_head_hexsha": "c983b919ec9041a9a8d71c03829158ed41da7890", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-08-28T08:15:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T04:50:03.000Z", "max_issues_repo_path": "tests/data/test_data_module.py", "max_issues_repo_name": "core-ml/coreml", "max_issues_repo_head_hexsha": "c983b919ec9041a9a8d71c03829158ed41da7890", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-12-21T18:03:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T22:15:54.000Z", "max_forks_repo_path": "tests/data/test_data_module.py", "max_forks_repo_name": "core-ml/coreml", "max_forks_repo_head_hexsha": "c983b919ec9041a9a8d71c03829158ed41da7890", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-05T16:49:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-20T01:40:07.000Z", "avg_line_length": 31.1328125, "max_line_length": 74, "alphanum_fraction": 0.3593475533, "include": true, "reason": "import numpy", "num_tokens": 759}
|
#include <boost/algorithm/string.hpp>
#include "Conversion.hpp"
#include "ScoreFileEntry.hpp"
#include "Serialize.hpp"
#include "StringTable.hpp"
#include "TextKeys.hpp"
#include "TextMessages.hpp"
using namespace std;
// Default constructor
ScoreFileEntry::ScoreFileEntry()
: score(0), sex(CreatureSex::CREATURE_SEX_MALE), is_current_char(false), level(0)
{
}
// Construct a score file entry with all required values.
ScoreFileEntry::ScoreFileEntry(const long long new_score, const string& new_name, const string& new_username, const CreatureSex new_sex, const bool new_is_current_char, const int new_level, const vector<CreatureWin> new_wins, const string& new_race_class_abrv)
: score(new_score), name(new_name), username(new_username), sex(new_sex), is_current_char(new_is_current_char), level(new_level), all_wins(new_wins), race_class_abrv(new_race_class_abrv)
{
}
bool ScoreFileEntry::operator<(const ScoreFileEntry& sfe) const
{
return (score < sfe.get_score());
}
bool ScoreFileEntry::serialize(ostream& stream) const
{
Serialize::write_longlong(stream, score);
Serialize::write_string(stream, name);
Serialize::write_string(stream, username);
Serialize::write_enum(stream, sex);
// The constant below is "is_current_char". After writing the entry to
// disk, the entry will never again reference the current character, so
// a value of false is always correct.
Serialize::write_bool(stream, false);
Serialize::write_int(stream, level);
Serialize::write_size_t(stream, all_wins.size());
for (const auto& win : all_wins)
{
Serialize::write_enum(stream, win);
}
Serialize::write_string(stream, race_class_abrv);
return true;
}
bool ScoreFileEntry::deserialize(istream& stream)
{
Serialize::read_longlong(stream, score);
Serialize::read_string(stream, name);
Serialize::read_string(stream, username);
Serialize::read_enum(stream, sex);
Serialize::read_bool(stream, is_current_char);
Serialize::read_int(stream, level);
size_t wins_size = 0;
Serialize::read_size_t(stream, wins_size);
for (size_t i = 0; i < wins_size; i++)
{
CreatureWin cw = CreatureWin::CREATURE_WIN_NO_WIN;
Serialize::read_enum(stream, cw);
all_wins.push_back(cw);
}
Serialize::read_string(stream, race_class_abrv);
return true;
}
ClassIdentifier ScoreFileEntry::internal_class_identifier() const
{
return ClassIdentifier::CLASS_ID_SCORE_FILE_ENTRY;
}
long long ScoreFileEntry::get_score() const
{
return score;
}
string ScoreFileEntry::get_name() const
{
return name;
}
string ScoreFileEntry::get_username() const
{
return username;
}
CreatureSex ScoreFileEntry::get_sex() const
{
return sex;
}
bool ScoreFileEntry::get_is_current_char() const
{
return is_current_char;
}
int ScoreFileEntry::get_level() const
{
return level;
}
vector<CreatureWin> ScoreFileEntry::get_wins() const
{
return all_wins;
}
string ScoreFileEntry::get_race_class_abrv() const
{
return race_class_abrv;
}
string ScoreFileEntry::str(const int score_number) const
{
ostringstream ss;
string rc_abrv = get_race_class_abrv();
boost::algorithm::trim(rc_abrv);
CreatureSex sex = get_sex();
ss << score_number << ". " << get_score() << ". " << get_name() << " (" << get_username() << ") - " << StringTable::get(TextKeys::LEVEL_ABRV) << get_level() << " " << rc_abrv;
if (sex != CreatureSex::CREATURE_SEX_NOT_SPECIFIED)
{
ss << " (" << TextMessages::get_sex_abrv(sex) << ")";
}
ss << ".";
for (const CreatureWin winner : all_wins)
{
switch (winner)
{
case CreatureWin::CREATURE_WIN_REGULAR:
ss << " " << StringTable::get(TextKeys::WINNER) << "!";
break;
case CreatureWin::CREATURE_WIN_EVIL:
ss << " " << StringTable::get(TextKeys::WINNER_EVIL) << "!";
break;
case CreatureWin::CREATURE_WIN_GODSLAYER:
ss << " " << StringTable::get(TextKeys::WINNER_GODSLAYER) << "!";
break;
case CreatureWin::CREATURE_WIN_NO_WIN:
default:
break;
}
}
return ss.str();
}
#ifdef UNIT_TESTS
#include "unit_tests/ScoreFileEntry_test.cpp"
#endif
|
{"hexsha": "faac4ac6f2e7c7ebca9dea6eeae3af3d7d20ef93", "size": 4125, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "engine/source/ScoreFileEntry.cpp", "max_stars_repo_name": "sidav/shadow-of-the-wyrm", "max_stars_repo_head_hexsha": "747afdeebed885b1a4f7ab42f04f9f756afd3e52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 60.0, "max_stars_repo_stars_event_min_datetime": "2019-08-21T04:08:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T13:48:04.000Z", "max_issues_repo_path": "engine/source/ScoreFileEntry.cpp", "max_issues_repo_name": "cleancoindev/shadow-of-the-wyrm", "max_issues_repo_head_hexsha": "51b23e98285ecb8336324bfd41ebf00f67b30389", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2021-03-18T15:11:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-20T12:13:07.000Z", "max_forks_repo_path": "engine/source/ScoreFileEntry.cpp", "max_forks_repo_name": "cleancoindev/shadow-of-the-wyrm", "max_forks_repo_head_hexsha": "51b23e98285ecb8336324bfd41ebf00f67b30389", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2019-11-16T06:29:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T17:33:43.000Z", "avg_line_length": 25.0, "max_line_length": 260, "alphanum_fraction": 0.7134545455, "num_tokens": 1057}
|
c
c The small main program below is an example of how to compute field
c components with T89C.
c See GEOPACK.DOC for an example of field line tracing.
c
dimension parmod(10)
1 print *, ' enter x,y,z,ps,iopt'
read*, x,y,z,ps,iopt
call t89c(iopt,parmod,ps,x,y,z,bx,by,bz)
print *, bx,by,bz
goto 1
end
C
SUBROUTINE T89C(IOPT,PARMOD,PS,X,Y,Z,BX,BY,BZ)
C
C
C COMPUTES GSM COMPONENTS OF THE MAGNETIC FIELD PRODUCED BY EXTRA-
C TERRESTRIAL CURRENT SYSTEMS IN THE GEOMAGNETOSPHERE. THE MODEL IS
C VALID UP TO GEOCENTRIC DISTANCES OF 70 RE AND IS BASED ON THE MER-
C GED IMP-A,C,D,E,F,G,H,I,J (1966-1974), HEOS-1 AND -2 (1969-1974),
C AND ISEE-1 AND -2 SPACECRAFT DATA SET.
C
C THIS IS A MODIFIED VERSION (T89c), WHICH REPLACED THE ORIGINAL ONE
C IN 1992 AND DIFFERS FROM IT IN THE FOLLOWING:
C
C (1) ISEE-1,2 DATA WERE ADDED TO THE ORIGINAL IMP-HEOS DATASET
C (2) TWO TERMS WERE ADDED TO THE ORIGINAL TAIL FIELD MODES, ALLOWING
C A MODULATION OF THE CURRENT BY THE GEODIPOLE TILT ANGLE
C
C
C REFERENCE FOR THE ORIGINAL MODEL: N.A. TSYGANENKO, A MAGNETOSPHERIC MAGNETIC
C FIELD MODEL WITH A WARPED TAIL CURRENT SHEET: PLANET.SPACE SCI., V.37,
C PP.5-20, 1989.
C
C----INPUT PARAMETERS: IOPT - SPECIFIES THE GROUND DISTURBANCE LEVEL:
C
C IOPT= 1 2 3 4 5 6 7
C CORRESPOND TO:
C KP= 0,0+ 1-,1,1+ 2-,2,2+ 3-,3,3+ 4-,4,4+ 5-,5,5+ > =6-
C
C PS - GEODIPOLE TILT ANGLE IN RADIANS
C X, Y, Z - GSM COORDINATES OF THE POINT IN EARTH RADII
C
C----OUTPUT PARAMETERS: BX,BY,BZ - GSM COMPONENTS OF THE MODEL MAGNETIC
C FIELD IN NANOTESLAS
c
c THE PARAMETER PARMOD(10) IS A DUMMY ARRAY. IT IS NOT USED IN THIS
C SUBROUTINE AND IS PROVIDED JUST FOR MAKING IT COMPATIBLE WITH THE
C NEW VERSION (4/16/96) OF THE GEOPACK SOFTWARE.
C
C THIS RELEASE OF T89C IS DATED FEB 12, 1996;
C--------------------------------------------------------------------------
C
C
C AUTHOR: NIKOLAI A. TSYGANENKO
C HSTX CORP./NASA GSFC
C
DIMENSION XI(4),F(3),DER(3,30),PARAM(30,7),A(30),PARMOD(10)
DOUBLE PRECISION F,DER
DATA PARAM/-116.53,-10719.,42.375,59.753,-11363.,1.7844,30.268,
* -0.35372E-01,-0.66832E-01,0.16456E-01,-1.3024,0.16529E-02,
* 0.20293E-02,20.289,-0.25203E-01,224.91,-9234.8,22.788,7.8813,
* 1.8362,-0.27228,8.8184,2.8714,14.468,32.177,0.01,0.0,
* 7.0459,4.0,20.0,-55.553,-13198.,60.647,61.072,-16064.,
* 2.2534,34.407,-0.38887E-01,-0.94571E-01,0.27154E-01,-1.3901,
* 0.13460E-02,0.13238E-02,23.005,-0.30565E-01,55.047,-3875.7,
* 20.178,7.9693,1.4575,0.89471,9.4039,3.5215,14.474,36.555,
* 0.01,0.0,7.0787,4.0,20.0,-101.34,-13480.,111.35,12.386,-24699.,
* 2.6459,38.948,-0.34080E-01,-0.12404,0.29702E-01,-1.4052,
* 0.12103E-02,0.16381E-02,24.49,-0.37705E-01,-298.32,4400.9,18.692,
* 7.9064,1.3047,2.4541,9.7012,7.1624,14.288,33.822,0.01,0.0,6.7442,
* 4.0,20.0,-181.69,-12320.,173.79,-96.664,-39051.,3.2633,44.968,
* -0.46377E-01,-0.16686,0.048298,-1.5473,0.10277E-02,0.31632E-02,
* 27.341,-0.50655E-01,-514.10,12482.,16.257,8.5834,1.0194,3.6148,
* 8.6042,5.5057,13.778,32.373,0.01,0.0,7.3195,4.0,20.0,-436.54,
* -9001.0,323.66,-410.08,-50340.,3.9932,58.524,-0.38519E-01,
* -0.26822,0.74528E-01,-1.4268,-0.10985E-02,0.96613E-02,27.557,
* -0.56522E-01,-867.03,20652.,14.101,8.3501,0.72996,3.8149,9.2908,
* 6.4674,13.729,28.353,0.01,0.0,7.4237,4.0,20.0,-707.77,-4471.9,
* 432.81,-435.51,-60400.,4.6229,68.178,-0.88245E-01,-0.21002,
* 0.11846,-2.6711,0.22305E-02,0.10910E-01,27.547,-0.54080E-01,
* -424.23,1100.2,13.954,7.5337,0.89714,3.7813,8.2945,5.174,14.213,
* 25.237,0.01,0.0,7.0037,4.0,20.0,-1190.4,2749.9,742.56,-1110.3,
* -77193.,7.6727,102.05,-0.96015E-01,-0.74507,0.11214,-1.3614,
* 0.15157E-02,0.22283E-01,23.164,-0.74146E-01,-2219.1,48253.,
* 12.714,7.6777,0.57138,2.9633,9.3909,9.7263,11.123,21.558,0.01,
* 0.0,4.4518,4.0,20.0/
DATA IOP/10/
C
IF (IOP.NE.IOPT) THEN
C
ID=1
IOP=IOPT
DO 1 I=1,30
1 A(I)=PARAM(I,IOPT)
C
ENDIF
C
XI(1)=X
XI(2)=Y
XI(3)=Z
XI(4)=PS
CALL T89(ID,A,XI,F,DER)
IF (ID.EQ.1) ID=2
BX=F(1)
BY=F(2)
BZ=F(3)
RETURN
END
C-------------------------------------------------------------------
C
SUBROUTINE T89 (ID, A, XI, F, DER)
C
C *** N.A. Tsyganenko *** 8-10.12.1991 ***
C
C Calculates dependent model variables and their deriva-
C tives for given independent variables and model parame-
C ters. Specifies model functions with free parameters which
C must be determined by means of least squares fits (RMS
C minimization procedure).
C
C Description of parameters:
C
C ID - number of the data point in a set (initial assignments are performed
c only for ID=1, saving thus CPU time)
C A - input vector containing model parameters;
C XI - input vector containing independent variables;
C F - output double precision vector containing
C calculated values of dependent variables;
C DER - output double precision vector containing
C calculated values for derivatives of dependent
C variables with respect to model parameters;
C
C - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
C
C T89 represents external magnetospheric magnetic field
C in Cartesian SOLAR MAGNETOSPHERIC coordinates (Tsyganenko N.A.,
C Planet. Space Sci., 1989, v.37, p.5-20; the "T89 model" with the warped
c tail current sheet) + A MODIFICATION ADDED IN APRIL 1992 (SEE BELOW)
C
C Model formulas for the magnetic field components contain in total
c 30 free parameters (17 linear and 13 nonlinear parameters).
C First 2 independent linear parameters A(1)-A(2) correspond to contribu-
c tion from the tail current system, then follow A(3) and A(4) which are the
c amplitudes of symmetric and antisymmetric terms in the contribution from
c the closure currents; A(5) is the ring current amplitude. Then follow the
c coefficients A(6)-A(15) which define Chapman-Ferraro+Birkeland current field.
c The coefficients c16-c19 (see Formula 20 in the original paper),
c due to DivB=0 condition, are expressed through A(6)-A(15) and hence are not
c independent ones.
c A(16) AND A(17) CORRESPOND TO THE TERMS WHICH YIELD THE TILT ANGLE DEPEN-
C DENCE OF THE TAIL CURRENT INTENSITY (ADDED ON APRIL 9, 1992)
C
C Nonlinear parameters:
C
C A(18) : DX - Characteristic scale of the Chapman-Ferraro field along the
c X-axis
C A(19) : ADR (aRC) - Characteristic radius of the ring current
c A(20) : D0 - Basic half-thickness of the tail current sheet
C A(21) : DD (GamRC)- defines rate of thickening of the ring current, as
c we go from night- to dayside
C A(22) : Rc - an analog of "hinging distance" entering formula (11)
C A(23) : G - amplitude of tail current warping in the Y-direction
C A(24) : aT - Characteristic radius of the tail current
c A(25) : Dy - characteristic scale distance in the Y direction entering
c in W(x,y) in (13)
c A(26) : Delta - defines the rate of thickening of the tail current sheet
c in the Y-direction (in T89 it was fixed at 0.01)
c A(27) : Q - this parameter was fixed at 0 in the final version of T89;
c initially it was introduced for making Dy to depend on X
c A(28) : Sx (Xo) - enters in W(x,y) ; see (13)
c A(29) : Gam (GamT) - enters in DT in (13) and defines rate of tail sheet
c thickening on going from night to dayside; in T89 fixed at 4.0
c A(30) : Dyc - the Dy parameter for closure current system; in T89 fixed
c at 20.0
c - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
C
IMPLICIT REAL * 8 (A - H, O - Z)
C
REAL A(1), XI(1)
C
DIMENSION F(3), DER(3,30)
C
INTEGER ID, I, L
DATA A02,XLW2,YN,RPI,RT/25.D0,170.D0,30.D0,0.31830989D0,30.D0/
DATA XD,XLD2/0.D0,40.D0/
C
C The last four quantities define variation of tail sheet thickness along X
C
DATA SXC,XLWC2/4.D0,50.D0/
C
C The two quantities belong to the function WC which confines tail closure
c current in X- and Y- direction
C
DATA DXL/20.D0/
C
C
IF (ID.NE.1) GOTO 3
DO 2 I = 1, 30
DO 1 L = 1, 3
1 DER(L,I) = 0.0D0
2 CONTINUE
C
DYC=A(30)
DYC2=DYC**2
DX=A(18)
HA02=0.5D0*A02
RDX2M=-1.D0/DX**2
RDX2=-RDX2M
RDYC2=1.D0/DYC2
HLWC2M=-0.5D0*XLWC2
DRDYC2=-2.D0*RDYC2
DRDYC3=2.D0*RDYC2*DSQRT(RDYC2)
HXLW2M=-0.5D0*XLW2
ADR=A(19)
D0=A(20)
DD=A(21)
RC=A(22)
G=A(23)
AT=A(24)
DT=D0
DEL=A(26)
P=A(25)
Q=A(27)
SX=A(28)
GAM=A(29)
HXLD2M=-0.5D0*XLD2
ADSL=0.D0
XGHS=0.D0
H=0.D0
HS=0.D0
GAMH=0.D0
W1=-0.5D0/DX
DBLDEL=2.D0*DEL
W2=W1*2.D0
W4=-1.D0/3.D0
W3=W4/DX
W5=-0.5D0
W6=-3.D0
AK1=A(1)
AK2=A(2)
AK3=A(3)
AK4=A(4)
AK5=A(5)
AK6=A(6)
AK7=A(7)
AK8=A(8)
AK9=A(9)
AK10=A(10)
AK11=A(11)
AK12=A(12)
AK13=A(13)
AK14=A(14)
AK15=A(15)
AK16=A(16)
AK17=A(17)
SXA=0.D0
SYA=0.D0
SZA=0.D0
AK610=AK6*W1+AK10*W5
AK711=AK7*W2-AK11
AK812=AK8*W2+AK12*W6
AK913=AK9*W3+AK13*W4
RDXL=1.D0/DXL
HRDXL=0.5D0*RDXL
A6H=AK6*0.5D0
A9T=AK9/3.D0
YNP=RPI/YN*0.5D0
YND=2.D0*YN
C
3 CONTINUE
C
X = XI(1)
Y = XI(2)
Z = XI(3)
TILT=XI(4)
TLT2=TILT**2
SPS = DSIN(TILT)
CPS = DSQRT (1.0D0 - SPS ** 2)
C
X2=X*X
Y2=Y*Y
Z2=Z*Z
TPS=SPS/CPS
HTP=TPS*0.5D0
GSP=G*SPS
XSM=X*CPS-Z*SPS
ZSM=X*SPS+Z*CPS
C
C CALCULATE THE FUNCTION ZS DEFINING THE SHAPE OF THE TAIL CURRENT SHEET
C AND ITS SPATIAL DERIVATIVES:
C
XRC=XSM+RC
XRC16=XRC**2+16.D0
SXRC=DSQRT(XRC16)
Y4=Y2*Y2
Y410=Y4+1.D4
SY4=SPS/Y410
GSY4=G*SY4
ZS1=HTP*(XRC-SXRC)
DZSX=-ZS1/SXRC
ZS=ZS1-GSY4*Y4
D2ZSGY=-SY4/Y410*4.D4*Y2*Y
DZSY=G*D2ZSGY
C
C CALCULATE THE COMPONENTS OF THE RING CURRENT CONTRIBUTION:
C
XSM2=XSM**2
DSQT=DSQRT(XSM2+A02)
FA0=0.5D0*(1.D0+XSM/DSQT)
DDR=D0+DD*FA0
DFA0=HA02/DSQT**3
ZR=ZSM-ZS
TR=DSQRT(ZR**2+DDR**2)
RTR=1.D0/TR
RO2=XSM2+Y2
ADRT=ADR+TR
ADRT2=ADRT**2
FK=1.D0/(ADRT2+RO2)
DSFC=DSQRT(FK)
FC=FK**2*DSFC
FACXY=3.0D0*ADRT*FC*RTR
XZR=XSM*ZR
YZR=Y*ZR
DBXDP=FACXY*XZR
DER(2,5)=FACXY*YZR
XZYZ=XSM*DZSX+Y*DZSY
FAQ=ZR*XZYZ-DDR*DD*DFA0*XSM
DBZDP=FC*(2.D0*ADRT2-RO2)+FACXY*FAQ
DER(1,5)=DBXDP*CPS+DBZDP*SPS
DER(3,5)=DBZDP*CPS-DBXDP*SPS
C
C CALCULATE THE TAIL CURRENT SHEET CONTRIBUTION:
C
DELY2=DEL*Y2
D=DT+DELY2
IF (DABS(GAM).LT.1.D-6) GOTO 8
XXD=XSM-XD
RQD=1.D0/(XXD**2+XLD2)
RQDS=DSQRT(RQD)
H=0.5D0*(1.D0+XXD*RQDS)
HS=-HXLD2M*RQD*RQDS
GAMH=GAM*H
D=D+GAMH
XGHS=XSM*GAM*HS
ADSL=-D*XGHS
8 D2=D**2
T=DSQRT(ZR**2+D2)
XSMX=XSM-SX
RDSQ2=1.D0/(XSMX**2+XLW2)
RDSQ=DSQRT(RDSQ2)
V=0.5D0*(1.D0-XSMX*RDSQ)
DVX=HXLW2M*RDSQ*RDSQ2
OM=DSQRT(DSQRT(XSM2+16.D0)-XSM)
OMS=-OM/(OM*OM+XSM)*0.5D0
RDY=1.D0/(P+Q*OM)
OMSV=OMS*V
RDY2=RDY**2
FY=1.D0/(1.D0+Y2*RDY2)
W=V*FY
YFY1=2.D0*FY*Y2*RDY2
FYPR=YFY1*RDY
FYDY=FYPR*FY
DWX=DVX*FY+FYDY*Q*OMSV
YDWY=-V*YFY1*FY
DDY=DBLDEL*Y
ATT=AT+T
S1=DSQRT(ATT**2+RO2)
F5=1.D0/S1
F7=1.D0/(S1+ATT)
F1=F5*F7
F3=F5**3
F9=ATT*F3
FS=ZR*XZYZ-D*Y*DDY+ADSL
XDWX=XSM*DWX+YDWY
RTT=1.D0/T
WT=W*RTT
BRRZ1=WT*F1
BRRZ2=WT*F3
DBXC1=BRRZ1*XZR
DBXC2=BRRZ2*XZR
DER(2,1)=BRRZ1*YZR
DER(2,2)=BRRZ2*YZR
DER(2,16)=DER(2,1)*TLT2
DER(2,17)=DER(2,2)*TLT2
WTFS=WT*FS
DBZC1=W*F5+XDWX*F7+WTFS*F1
DBZC2=W*F9+XDWX*F1+WTFS*F3
DER(1,1)=DBXC1*CPS+DBZC1*SPS
DER(1,2)=DBXC2*CPS+DBZC2*SPS
DER(3,1)=DBZC1*CPS-DBXC1*SPS
DER(3,2)=DBZC2*CPS-DBXC2*SPS
DER(1,16)=DER(1,1)*TLT2
DER(1,17)=DER(1,2)*TLT2
DER(3,16)=DER(3,1)*TLT2
DER(3,17)=DER(3,2)*TLT2
C
C CALCULATE CONTRIBUTION FROM THE CLOSURE CURRENTS
C
ZPL=Z+RT
ZMN=Z-RT
ROGSM2=X2+Y2
SPL=DSQRT(ZPL**2+ROGSM2)
SMN=DSQRT(ZMN**2+ROGSM2)
XSXC=X-SXC
RQC2=1.D0/(XSXC**2+XLWC2)
RQC=DSQRT(RQC2)
FYC=1.D0/(1.D0+Y2*RDYC2)
WC=0.5D0*(1.D0-XSXC*RQC)*FYC
DWCX=HLWC2M*RQC2*RQC*FYC
DWCY=DRDYC2*WC*FYC*Y
SZRP=1.D0/(SPL+ZPL)
SZRM=1.D0/(SMN-ZMN)
XYWC=X*DWCX+Y*DWCY
WCSP=WC/SPL
WCSM=WC/SMN
FXYP=WCSP*SZRP
FXYM=WCSM*SZRM
FXPL=X*FXYP
FXMN=-X*FXYM
FYPL=Y*FXYP
FYMN=-Y*FXYM
FZPL=WCSP+XYWC*SZRP
FZMN=WCSM+XYWC*SZRM
DER(1,3)=FXPL+FXMN
DER(1,4)=(FXPL-FXMN)*SPS
DER(2,3)=FYPL+FYMN
DER(2,4)=(FYPL-FYMN)*SPS
DER(3,3)=FZPL+FZMN
DER(3,4)=(FZPL-FZMN)*SPS
C
C NOW CALCULATE CONTRIBUTION FROM CHAPMAN-FERRARO SOURCES + ALL OTHER
C
EX=DEXP(X/DX)
EC=EX*CPS
ES=EX*SPS
ECZ=EC*Z
ESZ=ES*Z
ESZY2=ESZ*Y2
ESZZ2=ESZ*Z2
ECZ2=ECZ*Z
ESY=ES*Y
C
DER(1,6)=ECZ
DER(1,7)=ES
DER(1,8)=ESY*Y
DER(1,9)=ESZ*Z
DER(2,10)=ECZ*Y
DER(2,11)=ESY
DER(2,12)=ESY*Y2
DER(2,13)=ESY*Z2
DER(3,14)=EC
DER(3,15)=EC*Y2
DER(3,6)=ECZ2*W1
DER(3,10)=ECZ2*W5
DER(3,7)=ESZ*W2
DER(3,11)=-ESZ
DER(3,8)=ESZY2*W2
DER(3,12)=ESZY2*W6
DER(3,9)=ESZZ2*W3
DER(3,13)=ESZZ2*W4
C
C FINALLY, CALCULATE NET EXTERNAL MAGNETIC FIELD COMPONENTS,
C BUT FIRST OF ALL THOSE FOR C.-F. FIELD:
C
SX1=AK6*DER(1,6)+AK7*DER(1,7)+AK8*DER(1,8)+AK9*DER(1,9)
SY1=AK10*DER(2,10)+AK11*DER(2,11)+AK12*DER(2,12)+AK13*DER(2,13)
SZ1=AK14*DER(3,14)+AK15*DER(3,15)+AK610*ECZ2+AK711*ESZ+AK812
* *ESZY2+AK913*ESZZ2
BXCL=AK3*DER(1,3)+AK4*DER(1,4)
BYCL=AK3*DER(2,3)+AK4*DER(2,4)
BZCL=AK3*DER(3,3)+AK4*DER(3,4)
BXT=AK1*DER(1,1)+AK2*DER(1,2)+BXCL +AK16*DER(1,16)+AK17*DER(1,17)
BYT=AK1*DER(2,1)+AK2*DER(2,2)+BYCL +AK16*DER(2,16)+AK17*DER(2,17)
BZT=AK1*DER(3,1)+AK2*DER(3,2)+BZCL +AK16*DER(3,16)+AK17*DER(3,17)
F(1)=BXT+AK5*DER(1,5)+SX1+SXA
F(2)=BYT+AK5*DER(2,5)+SY1+SYA
F(3)=BZT+AK5*DER(3,5)+SZ1+SZA
C
RETURN
END
c%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
{"hexsha": "d14aa3a9dfea685ad39706a1cb3065d7bfd06e5f", "size": 15257, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "fortran/T89.f", "max_stars_repo_name": "scivision/tsyganenko", "max_stars_repo_head_hexsha": "416f1366caf745ec28cf15a7263a07a249f5f587", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-07-14T06:56:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-09T18:09:37.000Z", "max_issues_repo_path": "fortran/T89.f", "max_issues_repo_name": "scivision/tsyganenko", "max_issues_repo_head_hexsha": "416f1366caf745ec28cf15a7263a07a249f5f587", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-05-26T13:13:52.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-25T16:27:42.000Z", "max_forks_repo_path": "tsyganenko/T89.f", "max_forks_repo_name": "johncoxon/tsyganenko", "max_forks_repo_head_hexsha": "8acfbdbb9efebb18b76b767a429e5235d2eec1d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-07-13T21:12:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T14:28:19.000Z", "avg_line_length": 31.0733197556, "max_line_length": 79, "alphanum_fraction": 0.5666251557, "num_tokens": 6665}
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("h5file")
parser.add_argument("--pattern")
args = parser.parse_args()
import h5py
import rapprentice.cv_plot_utils as cpu
import numpy as np
import cv2
import fnmatch
hdf = h5py.File(args.h5file,"r")
all_imgnames = [(np.asarray(seg["rgb"]),name) for (name,seg) in hdf.items() if (args.pattern is None) or fnmatch.fnmatch(name, args.pattern)]
nrows = 7
chunksize = nrows**2
for i in xrange(0,len(all_imgnames),chunksize):
imgnames = all_imgnames[i:i+chunksize]
imgs = []
for (img, name) in imgnames:
cv2.putText(img, name,(30,30), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), thickness = 3)
imgs.append(img)
bigimg = cpu.tile_images(imgs, nrows, nrows)
cv2.imshow("bigimg", bigimg)
cv2.waitKey()
|
{"hexsha": "89b2a158f9095a8c38afa842e590197f4f3ec090", "size": 833, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/view_demos.py", "max_stars_repo_name": "wjchen84/rapprentice", "max_stars_repo_head_hexsha": "9232a6a21e2c80f00854912f07dcdc725b0be95a", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2015-08-25T19:40:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-27T09:23:06.000Z", "max_issues_repo_path": "scripts/view_demos.py", "max_issues_repo_name": "wjchen84/rapprentice", "max_issues_repo_head_hexsha": "9232a6a21e2c80f00854912f07dcdc725b0be95a", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/view_demos.py", "max_forks_repo_name": "wjchen84/rapprentice", "max_forks_repo_head_hexsha": "9232a6a21e2c80f00854912f07dcdc725b0be95a", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-05-18T20:13:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-03T16:09:50.000Z", "avg_line_length": 26.03125, "max_line_length": 141, "alphanum_fraction": 0.6986794718, "include": true, "reason": "import numpy", "num_tokens": 245}
|
import numpy as np
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import matplotlib.dates as mdates
import requests
import io
#hide
def load_timeseries(name,
base_url='https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series'):
import requests
# Thanks to kasparthommen for the suggestion to directly download
url = f'{base_url}/time_series_covid19_{name}_global.csv'
csv = requests.get(url).text
df = pd.read_csv(io.StringIO(csv),
index_col=['Country/Region', 'Province/State', 'Lat', 'Long'])
df['type'] = name.lower()
df.columns.name = 'date'
df = (df.set_index('type', append=True)
.reset_index(['Lat', 'Long'], drop=True)
.stack()
.reset_index()
.set_index('date')
)
df.index = pd.to_datetime(df.index)
df.columns = ['country', 'state', 'type', 'cases']
# Fix South Korea
df.loc[df.country =='Korea, South', 'country'] = 'South Korea'
# Move HK to country level
df.loc[df.state =='Hong Kong', 'country'] = 'Hong Kong'
df.loc[df.state =='Hong Kong', 'state'] = np.nan
# Aggregate large countries split by states
df = (df.groupby(["date", "country", "type"])
.sum()
.reset_index()
.sort_values(["country", "date"])
.set_index("date"))
return df
#hide
def get_countries_with_min_cases_for_df(df_cases, by='cases', min_cases = 10):
MIN_CASES = min_cases
countries = df_cases[df_cases[by].ge(MIN_CASES)
].sort_values(by=by, ascending=False)
countries = countries['country'].values
return countries
def get_countries_with_min_cases(df_cases, by='cases', min_cases = 10):
MIN_CASES = min_cases
countries = df_cases[df_cases[by].ge(MIN_CASES)].sort_values(by=by, ascending=False)
countries = countries['country'].unique()
return countries
def filter_cases(df_cases, by='cases', min_cases = 10, since_cases=None):
countries = get_countries_with_min_cases_for_df(df_cases, by, min_cases)
if since_cases is None:
SINCE_CASES_NUM = 100
else:
SINCE_CASES_NUM = since_cases
COL_X = f'Days since case {SINCE_CASES_NUM}'
dff2 = df_cases[df_cases['country'].isin(countries)].copy()
dff2['date'] = dff2.index
days_since = (dff2.assign(F=dff2[by].ge(SINCE_CASES_NUM))
.set_index('date')
.groupby('country')['F'].transform('idxmax'))
dff2[COL_X] = (dff2['date'] - days_since.values).dt.days.values
dff2 = dff2[dff2[COL_X].ge(0)]
return dff2
def get_df(MIN_CASES = 1000, SINCE_CASES = 100):
#hide
df_deaths = load_timeseries("deaths")
df_confirmed = load_timeseries("confirmed")
# concatenate both timeseries
df_cases = pd.concat((df_confirmed, df_deaths), axis=1)
df_cases.columns = ['country', 'type', 'confirmed', 'country', 'type', 'deaths']
df_cases = df_cases.loc[:,~df_cases.columns.duplicated()]
#hide
# create diffs
df_cases['new_deaths'] = df_cases.deaths.diff()
# flatten artifacts from one country to the next
df_cases.loc[df_cases['new_deaths']<0, 'new_deaths'] = 0
df_cases['new_confirmed'] = df_cases.confirmed.diff()
df_cases.loc[df_cases['new_confirmed']<0, 'new_confirmed'] = 0
#hide
dff2 = filter_cases(df_cases, 'confirmed', MIN_CASES, SINCE_CASES)
return dff2
|
{"hexsha": "82ced2b9e199cc6f46d1c293f966fe2034320c1f", "size": 3535, "ext": "py", "lang": "Python", "max_stars_repo_path": "_notebooks/lib/covid_data.py", "max_stars_repo_name": "caglorithm/notebooks", "max_stars_repo_head_hexsha": "6d9bb0f2e8483c39bc13dd3e435f7f05dd8f3bce", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-14T05:29:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-24T01:48:36.000Z", "max_issues_repo_path": "_notebooks/lib/.ipynb_checkpoints/covid_data-checkpoint.py", "max_issues_repo_name": "caglorithm/notebooks", "max_issues_repo_head_hexsha": "6d9bb0f2e8483c39bc13dd3e435f7f05dd8f3bce", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-05-05T18:28:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T06:54:58.000Z", "max_forks_repo_path": "_notebooks/lib/covid_data.py", "max_forks_repo_name": "caglorithm/covid19-analysis", "max_forks_repo_head_hexsha": "e7fca7e8736fb88314ba87b031f5afd9982879d8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-11T09:36:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-10T16:58:34.000Z", "avg_line_length": 33.0373831776, "max_line_length": 131, "alphanum_fraction": 0.6449787836, "include": true, "reason": "import numpy", "num_tokens": 960}
|
import cv2
import numpy as np
import os
import glob
import math
def find(img, cap, DIM, K, D):
sift = cv2.xfeatures2d.SIFT_create()
kp_image, desc_image = sift.detectAndCompute(img,None)
img = cv2.drawKeypoints(img,kp_image,img)
#matching
index_params = dict(algorithm=0,trees = 5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
while True:
_,frame = cap.read()
h,w = frame.shape[:2]
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2)
undistorted_img = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
grayframe = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2GRAY)
kp_grayframe, desc_grayframe = sift.detectAndCompute(grayframe,None)
grayframe = cv2.drawKeypoints(grayframe, kp_grayframe, grayframe)
try:
matches = flann.knnMatch(desc_image, desc_grayframe, k=2)
except Exception as e:
print(e)
matches = []
good_points=[]
for m,n in matches:
if m.distance < 0.8*n.distance:
good_points.append(m)
#homography
if len(good_points) > 10:
query_points = np.float32([kp_image[m.queryIdx].pt for m in good_points]).reshape(-1,1,2)
train_pts = np.float32([kp_grayframe[m.trainIdx].pt for m in good_points]).reshape(-1,1,2)
matrix = np.array([])
matrix, mask = cv2.findHomography(query_points, train_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
h, w = img.shape[:-1]
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
try:
if not matrix.size == 0:
dst = cv2.perspectiveTransform(pts, matrix)
#homography = cv2.polylines(frame, [np.int32(dst)], True, (255, 0, 0), 3)
dst_1 = tuple(np.int32(dst[0]))
a = dst_1[0]
pt_1 = (a[0],a[1])
dst_2 = tuple(np.int32(dst[1]))
b = dst_2[0]
pt_2 = (b[0],b[1])
dst_3 = tuple(np.int32(dst[2]))
c = dst_3[0]
pt_3 = (c[0],c[1])
dst_4 = tuple(np.int32(dst[3]))
d = dst_4[0]
pt_4 = (d[0],d[1])
crn_1 = (np.int32((a[0]+b[0])/2),np.int32((a[1]+b[1])/2))
crn_2 = (np.int32((c[0]+d[0])/2),np.int32((c[1]+d[1])/2))
cv2.rectangle(frame, crn_1, crn_2, (255, 0, 0), 3)
rect1center = ((168+2)/2, (95+20)/2)
rect2center = ((366+40)/2, (345+522)/2)
center = ((crn_1[0]+crn_2[0])/2,(crn_1[1]+crn_2[1])/2)
m = np.int32(center[0])
n = np.int32(center[1])
i = (m,n)
cv2.circle(frame, i, 3, (255, 0, 0), 1)
cv2.circle(frame, (320,240), 3, (255, 0, 0), 1)
cv2.line(frame, i, (320,240), (255, 0, 0), 1)
difference = (i[0]-320,i[1]-240)
if difference[0]<0:
cv2.putText(frame, 'go left', (10,450), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)
elif difference[0]>0:
cv2.putText(frame, 'go right', (10,450), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)
if difference[1]<0:
cv2.putText(frame, 'go up', (10,350), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)
elif difference[1]>0:
cv2.putText(frame, 'go down', (10,350), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow("Homography", frame)
if abs(difference[0])<20 and abs(difference[1])<20:
return True
except Exception as e:
print(e)
else:
cv2.circle(frame, (320,240), 3, (255, 0, 0), 1)
cv2.imshow("Homography", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "65ca92217a407afbe00f635744a583900794f194", "size": 4418, "ext": "py", "lang": "Python", "max_stars_repo_path": "ObjectFinder.py", "max_stars_repo_name": "thereturn932/Fly-n-Forget", "max_stars_repo_head_hexsha": "44e7585fd52679110a064824f278dab9319b725d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ObjectFinder.py", "max_issues_repo_name": "thereturn932/Fly-n-Forget", "max_issues_repo_head_hexsha": "44e7585fd52679110a064824f278dab9319b725d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ObjectFinder.py", "max_forks_repo_name": "thereturn932/Fly-n-Forget", "max_forks_repo_head_hexsha": "44e7585fd52679110a064824f278dab9319b725d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7606837607, "max_line_length": 119, "alphanum_fraction": 0.4938886374, "include": true, "reason": "import numpy", "num_tokens": 1315}
|
import os
import argparse
import pickle
import numpy as np
import random
import torch
import torch.optim
"""
Utility functions for handling parsed arguments
"""
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser('Train a ProtoTree')
parser.add_argument('--dataset',
type=str,
default='CUB-200-2011',
help='Data set on which the ProtoTree should be trained')
parser.add_argument('--net',
type=str,
default='resnet50_inat',
help='Base network used in the tree. Pretrained network on iNaturalist is only available for resnet50_inat (default). Others are pretrained on ImageNet. Options are: resnet18, resnet34, resnet50, resnet50_inat, resnet101, resnet152, densenet121, densenet169, densenet201, densenet161, vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn or vgg19_bn')
parser.add_argument('--batch_size',
type=int,
default=64,
help='Batch size when training the model using minibatch gradient descent')
parser.add_argument('--depth',
type=int,
default=9,
help='The tree is initialized as a complete tree of this depth')
parser.add_argument('--epochs',
type=int,
default=100,
help='The number of epochs the tree should be trained')
parser.add_argument('--optimizer',
type=str,
default='AdamW',
help='The optimizer that should be used when training the tree')
parser.add_argument('--lr',
type=float,
default=0.001,
help='The optimizer learning rate for training the prototypes')
parser.add_argument('--lr_block',
type=float,
default=0.001,
help='The optimizer learning rate for training the 1x1 conv layer and last conv layer of the underlying neural network (applicable to resnet50 and densenet121)')
parser.add_argument('--lr_net',
type=float,
default=1e-5,
help='The optimizer learning rate for the underlying neural network')
parser.add_argument('--lr_pi',
type=float,
default=0.001,
help='The optimizer learning rate for the leaf distributions (only used if disable_derivative_free_leaf_optim flag is set')
parser.add_argument('--momentum',
type=float,
default=0.9,
help='The optimizer momentum parameter (only applicable to SGD)')
parser.add_argument('--weight_decay',
type=float,
default=0.0,
help='Weight decay used in the optimizer')
parser.add_argument('--disable_cuda',
action='store_true',
help='Flag that disables GPU usage if set')
parser.add_argument('--log_dir',
type=str,
default='./runs/run_prototree',
help='The directory in which train progress should be logged')
parser.add_argument('--W1',
type=int,
default = 1,
help='Width of the prototype. Correct behaviour of the model with W1 != 1 is not guaranteed')
parser.add_argument('--H1',
type=int,
default = 1,
help='Height of the prototype. Correct behaviour of the model with H1 != 1 is not guaranteed')
parser.add_argument('--num_features',
type=int,
default = 256,
help='Depth of the prototype and therefore also depth of convolutional output')
parser.add_argument('--milestones',
type=str,
default='',
help='The milestones for the MultiStepLR learning rate scheduler')
parser.add_argument('--gamma',
type=float,
default=0.5,
help='The gamma for the MultiStepLR learning rate scheduler. Needs to be 0<=gamma<=1')
parser.add_argument('--state_dict_dir_net',
type=str,
default='',
help='The directory containing a state dict with a pretrained backbone network')
parser.add_argument('--state_dict_dir_tree',
type=str,
default='',
help='The directory containing a state dict (checkpoint) with a pretrained prototree. Note that training further from a checkpoint does not seem to work correctly. Evaluating a trained prototree does work.')
parser.add_argument('--freeze_epochs',
type=int,
default = 30,
help='Number of epochs where pretrained features_net will be frozen'
)
parser.add_argument('--dir_for_saving_images',
type=str,
default='upsampling_results',
help='Directoy for saving the prototypes, patches and heatmaps')
parser.add_argument('--upsample_threshold',
type=float,
default=0.98,
help='Threshold (between 0 and 1) for visualizing the nearest patch of an image after upsampling. The higher this threshold, the larger the patches.')
parser.add_argument('--disable_pretrained',
action='store_true',
help='When set, the backbone network is initialized with random weights instead of being pretrained on another dataset). When not set, resnet50_inat is initalized with weights from iNaturalist2017. Other networks are initialized with weights from ImageNet'
)
parser.add_argument('--disable_derivative_free_leaf_optim',
action='store_true',
help='Flag that optimizes the leafs with gradient descent when set instead of using the derivative-free algorithm'
)
parser.add_argument('--kontschieder_train',
action='store_true',
help='Flag that first trains the leaves for one epoch, and then trains the rest of ProtoTree (instead of interleaving leaf and other updates). Computationally more expensive.'
)
parser.add_argument('--kontschieder_normalization',
action='store_true',
help='Flag that disables softmax but uses a normalization factor to convert the leaf parameters to a probabilitiy distribution, as done by Kontschieder et al. (2015). Will iterate over the data 10 times to update the leaves. Computationally more expensive.'
)
parser.add_argument('--log_probabilities',
action='store_true',
help='Flag that uses log probabilities when set. Useful when getting NaN values.'
)
parser.add_argument('--pruning_threshold_leaves',
type=float,
default=0.01,
help='An internal node will be pruned when the maximum class probability in the distributions of all leaves below this node are lower than this threshold.')
parser.add_argument('--nr_trees_ensemble',
type=int,
default=5,
help='Number of ProtoTrees to train and (optionally) use in an ensemble. Used in main_ensemble.py')
args = parser.parse_args()
args.milestones = get_milestones(args)
return args
"""
Parse the milestones argument to get a list
:param args: The arguments given
"""
def get_milestones(args: argparse.Namespace):
if args.milestones != '':
milestones_list = args.milestones.split(',')
for m in range(len(milestones_list)):
milestones_list[m]=int(milestones_list[m])
else:
milestones_list = []
return milestones_list
def save_args(args: argparse.Namespace, directory_path: str) -> None:
"""
Save the arguments in the specified directory as
- a text file called 'args.txt'
- a pickle file called 'args.pickle'
:param args: The arguments to be saved
:param directory_path: The path to the directory where the arguments should be saved
"""
# If the specified directory does not exists, create it
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
# Save the args in a text file
with open(directory_path + '/args.txt', 'w') as f:
for arg in vars(args):
val = getattr(args, arg)
if isinstance(val, str): # Add quotation marks to indicate that the argument is of string type
val = f"'{val}'"
f.write('{}: {}\n'.format(arg, val))
# Pickle the args for possible reuse
with open(directory_path + '/args.pickle', 'wb') as f:
pickle.dump(args, f)
def load_args(directory_path: str) -> argparse.Namespace:
"""
Load the pickled arguments from the specified directory
:param directory_path: The path to the directory from which the arguments should be loaded
:return: the unpickled arguments
"""
with open(directory_path + '/args.pickle', 'rb') as f:
args = pickle.load(f)
return args
def get_optimizer(tree, args: argparse.Namespace) -> torch.optim.Optimizer:
"""
Construct the optimizer as dictated by the parsed arguments
:param tree: The tree that should be optimized
:param args: Parsed arguments containing hyperparameters. The '--optimizer' argument specifies which type of
optimizer will be used. Optimizer specific arguments (such as learning rate and momentum) can be passed
this way as well
:return: the optimizer corresponding to the parsed arguments, parameter set that can be frozen, and parameter set of the net that will be trained
"""
optim_type = args.optimizer
#create parameter groups
params_to_freeze = []
params_to_train = []
dist_params = []
for name,param in tree.named_parameters():
if 'dist_params' in name:
dist_params.append(param)
# set up optimizer
if 'resnet50' in args.net:
# freeze resnet50 except last convolutional layer
for name,param in tree._net.named_parameters():
if 'layer4.2' not in name:
params_to_freeze.append(param)
else:
params_to_train.append(param)
if optim_type == 'SGD':
paramlist = [
{"params": params_to_freeze, "lr": args.lr_net, "weight_decay_rate": args.weight_decay, "momentum": args.momentum},
{"params": params_to_train, "lr": args.lr_block, "weight_decay_rate": args.weight_decay,"momentum": args.momentum},
{"params": tree._add_on.parameters(), "lr": args.lr_block, "weight_decay_rate": args.weight_decay,"momentum": args.momentum},
{"params": tree.prototype_layer.parameters(), "lr": args.lr,"weight_decay_rate": 0,"momentum": 0}]
if args.disable_derivative_free_leaf_optim:
paramlist.append({"params": dist_params, "lr": args.lr_pi, "weight_decay_rate": 0})
else:
paramlist = [
{"params": params_to_freeze, "lr": args.lr_net, "weight_decay_rate": args.weight_decay},
{"params": params_to_train, "lr": args.lr_block, "weight_decay_rate": args.weight_decay},
{"params": tree._add_on.parameters(), "lr": args.lr_block, "weight_decay_rate": args.weight_decay},
{"params": tree.prototype_layer.parameters(), "lr": args.lr,"weight_decay_rate": 0}]
if args.disable_derivative_free_leaf_optim:
paramlist.append({"params": dist_params, "lr": args.lr_pi, "weight_decay_rate": 0})
elif args.net == 'densenet121':
# freeze densenet121 except last convolutional layer
for name,param in tree._net.named_parameters():
if 'denseblock4' not in name and 'norm5' not in name:
params_to_freeze.append(param)
else:
params_to_train.append(param)
paramlist = [
{"params": params_to_freeze, "lr": args.lr_net, "weight_decay_rate": args.weight_decay},
{"params": params_to_train, "lr": args.lr_block, "weight_decay_rate": args.weight_decay},
{"params": tree._add_on.parameters(), "lr": args.lr_block, "weight_decay_rate": args.weight_decay},
{"params": tree.prototype_layer.parameters(), "lr": args.lr,"weight_decay_rate": 0}]
if args.disable_derivative_free_leaf_optim:
paramlist.append({"params": dist_params, "lr": args.lr_pi, "weight_decay_rate": 0})
else:
paramlist = [
{"params": tree._net.parameters(), "lr": args.lr_net, "weight_decay_rate": args.weight_decay},
{"params": tree._add_on.parameters(), "lr": args.lr_block, "weight_decay_rate": args.weight_decay},
{"params": tree.prototype_layer.parameters(), "lr": args.lr,"weight_decay_rate": 0}]
if args.disable_derivative_free_leaf_optim:
paramlist.append({"params": dist_params, "lr": args.lr_pi, "weight_decay_rate": 0})
if optim_type == 'SGD':
return torch.optim.SGD(paramlist,
lr=args.lr,
momentum=args.momentum), params_to_freeze, params_to_train
if optim_type == 'Adam':
return torch.optim.Adam(paramlist,lr=args.lr,eps=1e-07), params_to_freeze, params_to_train
if optim_type == 'AdamW':
return torch.optim.AdamW(paramlist,lr=args.lr,eps=1e-07, weight_decay=args.weight_decay), params_to_freeze, params_to_train
raise Exception('Unknown optimizer argument given!')
|
{"hexsha": "6c3c3fd39c79755f52db0b78815889cfb1952cf9", "size": 14801, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/args.py", "max_stars_repo_name": "TristanGomez44/ProtoTree", "max_stars_repo_head_hexsha": "d9e77a90b47cb1efe19f1736c6701872a3c4a62e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2020-12-03T14:38:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:41:26.000Z", "max_issues_repo_path": "util/args.py", "max_issues_repo_name": "TristanGomez44/ProtoTree", "max_issues_repo_head_hexsha": "d9e77a90b47cb1efe19f1736c6701872a3c4a62e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-01T10:55:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-27T10:40:14.000Z", "max_forks_repo_path": "util/args.py", "max_forks_repo_name": "TristanGomez44/ProtoTree", "max_forks_repo_head_hexsha": "d9e77a90b47cb1efe19f1736c6701872a3c4a62e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-03-13T12:17:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T15:12:13.000Z", "avg_line_length": 54.4154411765, "max_line_length": 380, "alphanum_fraction": 0.5785419904, "include": true, "reason": "import numpy", "num_tokens": 2887}
|
"""
Runs a one round screening simulation for experiment 4 - PstP.
Initial training data was sampled from PstP dataset using uniform random sampling or diversity (Tanimoto dissimilarity) sampling.
Experiment 4 - PstP: prospective screening of PstP target.
Usage:
python experiment_ors_pstp_runner.py \
--pipeline_params_json_file=../param_configs/experiment_pstp_hyperparams/one_round_screening/ors_pstp_pipeline_config.json \
--training_data_dir=../datasets/pstp/one_round_screening/random/size_400/sample_0/ \
--max_size=4000
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import pathlib
import numpy as np
import pandas as pd
import csv
import time
import os
import shutil
from active_learning_dd.models.prepare_model import prepare_model
from active_learning_dd.database_loaders.prepare_loader import prepare_loader
if __name__ == '__main__':
# read args
parser = argparse.ArgumentParser()
parser.add_argument('--pipeline_params_json_file', action="store", dest="pipeline_params_json_file", required=True)
parser.add_argument('--training_data_dir', default=None, action="store", dest="training_data_dir", required=True)
parser.add_argument('--max_size', default=4000, type=int, action="store", dest="max_size", required=False)
given_args = parser.parse_args()
pipeline_params_json_file = given_args.pipeline_params_json_file
training_data_dir = given_args.training_data_dir
max_size = given_args.max_size
ors_start_time = time.time()
# load param json configs
with open(pipeline_params_json_file) as f:
pipeline_config = json.load(f)
model_params = pipeline_config['model']
task_names = pipeline_config['task_names']
training_data_file = training_data_dir + '/train.csv.gz'
print('Set {} as starting initial training set.'.format(training_data_file))
# load training and unlabeled data
import copy
unlabeled_loader_params = pipeline_config['data_params']
training_loader_params = copy.deepcopy(pipeline_config['data_params'])
training_loader_params['data_path_format'] = training_data_file
start_time = time.time()
training_loader = prepare_loader(data_loader_params=training_loader_params,
task_names=task_names)
unlabeled_loader = prepare_loader(data_loader_params=unlabeled_loader_params,
task_names=task_names)
# remove training data from unlabeled pool dataset
unlabeled_loader.drop_duplicates_via_smiles(training_loader.get_smiles())
X_train, y_train = training_loader.get_features_and_labels()
X_unlabeled = unlabeled_loader.get_features()
end_time = time.time()
print('Finished loading data. Took {} seconds.'.format(end_time - start_time))
print('Training data shape X_train: {}, y_train: {}'.format(X_train.shape, y_train.shape))
print('Unlabeled data shape X_unlabeled: {}'.format(X_unlabeled.shape))
# batch_size is max_size - X_train.shape[0]
batch_size = max_size - X_train.shape[0]
# load and train model
start_time = time.time()
model = prepare_model(model_params=model_params,
task_names=task_names)
model.fit(X_train, y_train)
end_time = time.time()
print('Finished training model. Took {} seconds.'.format(end_time - start_time))
# predict on unlabeled pool
preds_unlabeled = model.predict(unlabeled_loader.get_features())[:,0]
# select top batch_size predicted instances
selection_start_time = time.time()
top_predicted_idx = np.argsort(preds_unlabeled)[::-1][:batch_size]
unlabeled_df = unlabeled_loader.get_dataframe()
selected_df = unlabeled_df.iloc[top_predicted_idx,:]
selection_end_time = time.time()
total_selection_time = selection_end_time - selection_start_time
# save results
output_file = training_data_dir + '/selected.csv.gz'
selected_df.to_csv(output_file, compression='gzip', index=False)
ors_end_time = time.time()
print('Finished processing one round screen. Took {} seconds.'.format(ors_end_time-ors_start_time))
|
{"hexsha": "f47d117689b419bd5ab6ace58a798a366f439622", "size": 4307, "ext": "py", "lang": "Python", "max_stars_repo_path": "chtc_runners/experiment_ors_pstp_runner.py", "max_stars_repo_name": "gitter-lab/active-learning-drug-discovery", "max_stars_repo_head_hexsha": "b24004a359037b3a1175a61c181ec231b711c797", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chtc_runners/experiment_ors_pstp_runner.py", "max_issues_repo_name": "gitter-lab/active-learning-drug-discovery", "max_issues_repo_head_hexsha": "b24004a359037b3a1175a61c181ec231b711c797", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chtc_runners/experiment_ors_pstp_runner.py", "max_forks_repo_name": "gitter-lab/active-learning-drug-discovery", "max_forks_repo_head_hexsha": "b24004a359037b3a1175a61c181ec231b711c797", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2254901961, "max_line_length": 133, "alphanum_fraction": 0.7318319016, "include": true, "reason": "import numpy", "num_tokens": 941}
|
import math
import numpy as np
from knn_robustness.utils import top_k_min_indices
from knn_robustness.utils import KnnPredictor
from knn_robustness.utils import QpSolver
class ExactSolver:
def __init__(
self, X_train, y_train, qp_solver: QpSolver,
n_pos_for_screen, bounded, upper=1., lower=0.
):
self._X_train = X_train
self._y_train = y_train
self._qp_solver = qp_solver
self._n_pos_for_screen = n_pos_for_screen
self._bounded = bounded
self._upper = upper
self._lower = lower
self._predictor = KnnPredictor(X_train, y_train, n_neighbors=1)
def predict_batch(self, X_eval):
return self._predictor.predict_batch(X_eval)
def predict_individual(self, x_eval):
return self._predictor.predict_individual(x_eval)
def __call__(self, x_eval):
X_pos, X_neg = self._partition(x_eval)
X_screen = self._compute_pos_for_screen(x_eval, X_pos)
inner_product_pos = X_pos @ X_pos.T
best_perturbation = None
min_perturbation_norm = math.inf
for x_neg in self._neg_generator(x_eval, X_neg):
if self._screenable(
x_eval, x_neg, X_screen, min_perturbation_norm
):
continue
else:
perturbation = self._solve_subproblem(
x_eval, x_neg, X_pos, inner_product_pos
)
perturbation_norm = np.linalg.norm(perturbation)
if perturbation_norm < min_perturbation_norm:
min_perturbation_norm = perturbation_norm
best_perturbation = perturbation
return best_perturbation
def _partition(self, x_eval):
y_pred = self.predict_individual(x_eval)
mask = (self._y_train == y_pred)
X_pos = self._X_train[mask]
X_neg = self._X_train[~mask]
return X_pos, X_neg
def _compute_pos_for_screen(self, x_eval, X_pos):
indices = top_k_min_indices(
np.linalg.norm(x_eval - X_pos, axis=1),
self._n_pos_for_screen
)
return X_pos[indices]
def _neg_generator(self, x_eval, X_neg):
indices = np.argsort(
np.linalg.norm(
X_neg - x_eval, axis=1
)
)
for i in indices:
yield X_neg[i]
def _screenable(self, x_eval, x_neg, X_screen, threshold):
return threshold <= np.max(
np.maximum(
np.sum(
np.multiply(
2 * x_eval - X_screen - x_neg, X_screen - x_neg
),
axis=1
),
0
) / (2 * np.linalg.norm(X_screen - x_neg, axis=1))
)
def _solve_subproblem(self, x_eval, x_neg, X_pos, inner_product_pos=None):
A, b, Q = self._compute_qp_params(
x_eval, x_neg, X_pos, inner_product_pos
)
lamda = self._qp_solver(Q, b)
return -A.T @ lamda
def _compute_qp_params(
self, x_eval, x_neg, X_pos, inner_product_pos
):
if inner_product_pos is None:
inner_product_pos = X_pos @ X_pos.T
# A @ u <= b
A = 2 * (X_pos - x_neg)
# test: this one is much more efficient due to less multiplications
b = np.sum(np.multiply(X_pos + x_neg - 2 * x_eval,
X_pos - x_neg), axis=1)
# X @ y
temp = X_pos @ x_neg
# A @ A.T = 4 * (X @ X.T - X @ y - (X @ y).T + y.T @ y)
Q = 4 * (inner_product_pos - temp[np.newaxis, :]
- temp[:, np.newaxis] + x_neg @ x_neg)
# min 0.5 * v.T @ P @ v + v.T @ b, v >= 0
# max - 0.5 * v.T @ P @ v - v.T @ b, v >= 0
if not self._bounded:
return A, b, Q
else:
# upper bound
# A1 @ delta <= b1
# z + delta <= upper
A1 = np.identity(X_pos.shape[1], dtype=X_pos.dtype)
b1 = self._upper - x_eval
# lower bound
# A2 @ delta <= b2
# z + delta >= lower
A2 = -np.identity(X_pos.shape[1], dtype=X_pos.dtype)
b2 = x_eval - self._lower
# A_full @ A_full.T
Q_full = np.block([
[Q, A, -A],
[A.T, A1, A2],
[-A.T, A2, A1],
])
A_full = np.block([
[A],
[A1],
[A2]
])
b_full = np.concatenate([b, b1, b2])
return A_full, b_full, Q_full
|
{"hexsha": "9cf1da28ea3ccaeff5014962b9a949d6ae7f377f", "size": 4639, "ext": "py", "lang": "Python", "max_stars_repo_path": "knn_robustness/nn_only/exact.py", "max_stars_repo_name": "wangwllu/knn_robustness", "max_stars_repo_head_hexsha": "9a02f92bf00febf900b2817c1b7230284816b21b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-09-22T05:09:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-25T04:46:33.000Z", "max_issues_repo_path": "wang_et_al/knn_robustness/nn_only/exact.py", "max_issues_repo_name": "wagner-group/geoadex", "max_issues_repo_head_hexsha": "693856dc4537937fa09ec7a22e175f8243483b44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wang_et_al/knn_robustness/nn_only/exact.py", "max_forks_repo_name": "wagner-group/geoadex", "max_forks_repo_head_hexsha": "693856dc4537937fa09ec7a22e175f8243483b44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3202614379, "max_line_length": 78, "alphanum_fraction": 0.5253287346, "include": true, "reason": "import numpy", "num_tokens": 1193}
|
[STATEMENT]
lemma ClassI [intro, simp]:
"(a, b) \<in> E \<Longrightarrow> a \<in> Class b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a, b) \<in> E \<Longrightarrow> a \<in> Class b
[PROOF STEP]
unfolding Class_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a, b) \<in> E \<Longrightarrow> a \<in> (\<lambda>a\<in>S. {b \<in> S. (b, a) \<in> E}) b
[PROOF STEP]
by (simp add: left_closed right_closed)
|
{"llama_tokens": 179, "file": "Jacobson_Basic_Algebra_Set_Theory", "length": 2}
|
import numpy as np
import nltk
def wordcount_fn(file_uri):
nparr_words = open(file_uri, 'r')
text_as_str = nparr_words.read() #for small textfiles
text_as_list = text_as_str.split()
counts = nltk.FreqDist(text_as_list).items()
for key, val in counts:
print(str(val) + " " + str(key))
return
print(f"TF (Frequency Distribution) for each word in your file: {wordcount_fn('./textfile.txt')}")
|
{"hexsha": "17b3e687ac48a4380d8c09fd50b9472cf230cedd", "size": 443, "ext": "py", "lang": "Python", "max_stars_repo_path": "term-frequency/wordcounts.py", "max_stars_repo_name": "paulowe/python-data-programming", "max_stars_repo_head_hexsha": "96fdb3f888a554ac66e69e1f6958f3e0ef5b1075", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "term-frequency/wordcounts.py", "max_issues_repo_name": "paulowe/python-data-programming", "max_issues_repo_head_hexsha": "96fdb3f888a554ac66e69e1f6958f3e0ef5b1075", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "term-frequency/wordcounts.py", "max_forks_repo_name": "paulowe/python-data-programming", "max_forks_repo_head_hexsha": "96fdb3f888a554ac66e69e1f6958f3e0ef5b1075", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0588235294, "max_line_length": 98, "alphanum_fraction": 0.6568848758, "include": true, "reason": "import numpy", "num_tokens": 117}
|
import numpy as np
piOvr4 = np.pi/4
def get_2d_rot(angle=0):
return np.array([[np.cos(angle),-np.sin(angle)],
[np.sin(angle),np.cos(angle)]])
def get_2d_refl(angle=0):
return np.array([[-np.cos(angle),np.sin(angle)],
[np.sin(angle),np.cos(angle)]])
def make_ngon(nsides=6, angle_offset=0):
ns = np.arange(0,2*np.pi, 2*np.pi/nsides) + angle_offset
return np.column_stack((np.cos(ns),np.sin(ns)))
def make_haus():
"""Makes a weird house + its rotated, reflected, aligned versions.
The roof of the house is tilted to the left, which creates asymmetry.
"""
haus = np.row_stack((make_ngon(4, 3*piOvr4),
make_ngon(3, -piOvr4/2) + [0, 1]))
haus_c = np.array([0,3/7])
haus0 = haus - haus_c
haus0_b = np.sqrt(np.sum(np.square(haus0)))
two = 2
haus0_scld = two*haus0
haus0_Ro = get_2d_rot(piOvr4)
haus0_Rf = get_2d_refl(piOvr4)
haus0_rot = np.dot(haus0, haus0_Ro)
haus0_refl = np.dot(haus0, haus0_Rf)
haus0_refl_al = np.dot(haus0, [[-1,0],[0,1]]) # haus0 refl across y axis
return (haus,
haus_c, haus0_b, haus0, two, haus0_scld,
haus0_Ro, haus0_Rf, haus0_rot, haus0_refl,
haus0_refl_al)
|
{"hexsha": "d5e8cd20e39de0c1fafa55fc5aed3d8b6ce0d719", "size": 1262, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/helpers.py", "max_stars_repo_name": "hakonanes/morphops", "max_stars_repo_head_hexsha": "10b77498e444d2e5a64268b418e5c686041868d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2019-02-25T13:30:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T00:15:57.000Z", "max_issues_repo_path": "tests/helpers.py", "max_issues_repo_name": "hakonanes/morphops", "max_issues_repo_head_hexsha": "10b77498e444d2e5a64268b418e5c686041868d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2018-12-16T11:02:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-12T11:25:15.000Z", "max_forks_repo_path": "tests/helpers.py", "max_forks_repo_name": "hakonanes/morphops", "max_forks_repo_head_hexsha": "10b77498e444d2e5a64268b418e5c686041868d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-27T20:50:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-30T17:16:32.000Z", "avg_line_length": 34.1081081081, "max_line_length": 76, "alphanum_fraction": 0.6085578447, "include": true, "reason": "import numpy", "num_tokens": 416}
|
import os
import numpy as np
from pgimp.GimpFile import GimpFile
from pgimp.util import file
from pgimp.util.TempFile import TempFile
if __name__ == '__main__':
img_path = file.relative_to(__file__, '../../../doc/source/_static/img')
png_file = os.path.join(img_path, 'mask_applied.png')
height = 100
width = 200
# layer content
bg = np.zeros(shape=(height, width), dtype=np.uint8)
fg = np.ones(shape=(height, width), dtype=np.uint8) * 255
mask = np.zeros(shape=(height, width), dtype=np.uint8)
mask[:, width//4:3*width//4+1] = 255
with TempFile('.xcf') as xcf, TempFile('.npz') as npz:
# create gimp file
gimp_file = GimpFile(xcf) \
.create('Background', bg) \
.add_layer_from_numpy('Foreground', fg) \
.add_layer_from_numpy('Mask', mask)
# save layer data to numpy arrays
arr_bg = gimp_file.layer_to_numpy('Background')
arr_fg = gimp_file.layer_to_numpy('Foreground')
arr_mask = gimp_file.layer_to_numpy('Mask')
# save data as npz
np.savez_compressed(npz, bg=arr_bg, fg=arr_fg, mask=arr_mask)
# load data from npz
loaded = np.load(npz)
loaded_bg = loaded['bg']
loaded_fg = loaded['fg']
loaded_mask = loaded['mask']
# merge background and foreground using mask
mask_idxs = loaded_mask == 255
img = loaded_bg.copy()
img[mask_idxs] = loaded_fg[mask_idxs]
with TempFile('.xcf') as xcf:
# create a temporary gimp file and export to png
gimp_file = GimpFile(xcf) \
.create('Background', img) \
.export(png_file)
|
{"hexsha": "77ffb883cad3f3c8dddd79ed42e9d6cd67079ed8", "size": 1659, "ext": "py", "lang": "Python", "max_stars_repo_path": "pgimp/doc/examples/multilayer_to_npz.py", "max_stars_repo_name": "netogallo/pgimp", "max_stars_repo_head_hexsha": "bb86254983e1673d702e1fa2ed207166fd15ec65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-10-29T10:09:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-28T04:47:32.000Z", "max_issues_repo_path": "pgimp/doc/examples/multilayer_to_npz.py", "max_issues_repo_name": "netogallo/pgimp", "max_issues_repo_head_hexsha": "bb86254983e1673d702e1fa2ed207166fd15ec65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-21T18:35:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-17T06:27:26.000Z", "max_forks_repo_path": "pgimp/doc/examples/multilayer_to_npz.py", "max_forks_repo_name": "netogallo/pgimp", "max_forks_repo_head_hexsha": "bb86254983e1673d702e1fa2ed207166fd15ec65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-09-20T05:14:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-05T01:55:47.000Z", "avg_line_length": 31.3018867925, "max_line_length": 76, "alphanum_fraction": 0.6250753466, "include": true, "reason": "import numpy", "num_tokens": 444}
|
import pyparsing as pp
#from pyparsing import (
#Suppress, Group, Optional, Word, ZeroOrMore, White, Combine,
#Dict, Literal, OneOrMore, Regex,
#alphas, alphanums, nums, oneOf, delimitedList, quotedString
#)
pword = pp.Word(pp.alphas).setName('word')
pword_underscore = pp.Word(pp.alphas + '_').setName('word_underscore')
pword_num_underscore = pp.Word(pp.alphas + pp.nums + '_').setName('word_num_underscore')
pint = pp.Word(pp.nums).setName('integer')
pint_sign = pp.Combine(pp.Optional(pp.oneOf("+ -")) + pp.Word(pp.nums)).setName('signed_integer')
pminus1 = pp.Word('-1')
#pint = Regex('/^[-+]?\d+$/') # all integers
boolean = (pp.Literal('TRUE') | pp.Literal('FALSE')).setName('boolean')
# fourth pass, add parsing of dicts
cvt_int = lambda s, l, toks: int(toks[0])
cvt_float = lambda s, l, toks: float(toks[0])
#cvtDict = lambda s, l, toks: dict(toks[0])
# super fast at 31 sec
#float_regex = r"\d+\.\d+([Ee][+-]?\d+)?"
#float_regex = '[-+]?[0-9]*\.?[0-9]*'
float_regex = '[+-]?([0-9]*[.])?[0-9]+'
# 31.8 sec -> 28.2 sec if we drop casting...
pfloat = pp.Regex(float_regex).setName('real').setParseAction(cvt_float)
#pfloat_lazy = (pfloat1 | pfloat2 | pint_sign).setName('real').setParseAction(cvt_float)
pfloat.parseString('1.0')
pfloat.parseString('+1.0')
pfloat.parseString('-1.0')
pfloat.parseString('1.')
pfloat.parseString('+1.')
pfloat.parseString('-1.')
pfloat.parseString('-1')
pfloat.parseString('0')
pfloat.parseString('3')
#------------------------------------------------------
name_str = pword + pp.quotedString
comma = pp.Word(',').setName('comma')
xyz = pp.Group(pfloat * 3 + pp.Optional(comma.suppress())).setName('xyz')
xy = pp.Group(pfloat * 2 + pp.Optional(comma.suppress())).setName('xy')
# 0xFFFFFF77
hexa = pp.Word('0123456789ABCDEFx', min=10, max=10).setName('hex')
hexa.parseString("0xFFFFFF77")
hexa.parseString("0xFF0000FF")
hexa.parseString("0xFFCC0077")
hexa.parseString("0xFFFF00FF")
hexa.parseString("0x77FF00FF")
hexa.parseString("0x00FF00FF")
hexa.parseString("0x00FFFFFF")
hexa.parseString("0x7700FF77")
hexa.parseString("0x444444FF")
#0xFF0000FF 0xFFCC0077 0xFFFF00FF
#0x77FF00FF 0x00FF00FF 0x00FFFFFF 0x0000FFFF
#0x7700FF77 0x444444FF
#-----------------------------------------
list_open = pp.Literal('[').suppress()
list_close = pp.Literal(']').suppress()
dict_open = pp.Literal('{').suppress()
dict_close = pp.Literal('}').suppress()
color_datai = pp.Group(pfloat * 3)
#-----------------------------------------------
ambient_intensity = pp.Literal('ambientIntensity') + pfloat
diffuse_color = pp.Literal('diffuseColor') + color_datai
specular_color = pp.Literal('specularColor') + color_datai
transparency = pp.Literal('transparency') + pfloat
shininess = pp.Literal('shininess') + pfloat
material_values = pp.Group(pp.OneOrMore(
ambient_intensity | diffuse_color | specular_color |
transparency | shininess))
material = (
pword + pp.Literal('Material') +
dict_open +
material_values +
dict_close)
material_values.parseString("""
ambientIntensity 0.210
diffuseColor 0.596 0.667 0.686
specularColor 0.500 0.500 0.500
transparency 0.000
shininess 0.600
""")
material.parseString("""
material Material {
ambientIntensity 0.210
diffuseColor 0.596 0.667 0.686
specularColor 0.500 0.500 0.500
transparency 0.000
shininess 0.600
}
""")
# --------------------------------------
direction = pp.Literal('direction') + xyz
color = pp.Literal('color') + color_datai
#specular_color = pp.Literal('specularColor') + color_datai
intensity = pp.Literal('intensity') + pfloat
#shininess = pp.Literal('shininess') + pfloat
directional_light_values = pp.Group(pp.OneOrMore(
direction | color | intensity | ambient_intensity))
directional_light = pp.Literal('DirectionalLight') + dict_open + directional_light_values + dict_close
directional_light.parseString("""
DirectionalLight {
direction 0.577 -0.577 -0.577
color 1.000 1.000 1.000
intensity 0.450
ambientIntensity 1.0
}
""")
# --------------------------------------
title = pp.Literal('title') + pp.quotedString
info = pp.Literal('info') + pp.quotedString
world_info_values = pp.OneOrMore(title | info)
world_info = (
pp.Literal('WorldInfo') +
dict_open +
pp.Group(world_info_values) +
dict_close)
world_info.parseString("""
WorldInfo {
title "Texture-mapped pyramid"
info "Gravity: on"
}
""")
# --------------------------------------
sky_color = (pp.Literal('skyColor') + color_datai).setName('sky_color')
background_values = pp.OneOrMore(sky_color)
background = (
pp.Literal('Background') +
dict_open +
pp.Group(background_values) +
dict_close).setName('background')
background.parseString("""
Background {
skyColor 0.1 0.3 1
}
""")
# --------------------------------------
typei = (pp.Literal('type') + pp.quotedString).setName('type')
headlight = (pp.Literal('headlight') + boolean).setName('headlight')
navigation_info_values = pp.OneOrMore(typei | headlight)
navigation_info = (
pp.Literal('NavigationInfo') +
dict_open +
pp.Group(navigation_info_values) +
dict_close).setName('navigation_info')
navigation_info.parseString("""
NavigationInfo {
type "EXAMINE"
headlight TRUE
}
""")
#-----------------------------------------------------
image = pp.Group(
pp.Literal('image') + pp.Group(pint * 3) +
pp.Group(pp.OneOrMore(hexa))
).setName('image')
image.parseString("""
image 1 10 4 0xFFFFFF77 0xFF0000FF 0xFFCC0077 0xFFFF00FF
0x77FF00FF 0x00FF00FF 0x00FFFFFF 0x0000FFFF
0x7700FF77 0x444444FF
""")
pixel_texturei = (
pp.Literal('PixelTexture') +
dict_open +
image +
dict_close).setName('pixel_texture')
pixel_texturei.parseString("""
PixelTexture {
image 1 10 4 0xFFFFFF77 0xFF0000FF 0xFFCC0077 0xFFFF00FF
0x77FF00FF 0x00FF00FF 0x00FFFFFF 0x0000FFFF
0x7700FF77 0x444444FF
}
""")
# url "http://www.rt.cs.boeing.com/people/davidk/wrl/geo/colors.jpg"
# repeatS FALSE
# repeatT FALSE
url = (pp.Literal('url') + pp.quotedString).setName('url')
repeat_s = (pp.Literal('repeatS') + boolean).setName('repeat_s')
repeat_t = (pp.Literal('repeatT') + boolean).setName('repeat_t')
image_texture_values = pp.Group(pp.OneOrMore(url | repeat_s | repeat_t))
image_texturei = (
pp.Literal('ImageTexture') +
dict_open +
image_texture_values +
dict_close).setName('image_texture')
texture_types = pixel_texturei | image_texturei
texture = (
pp.Literal('texture') + pp.Literal('DEF').suppress() + pword +
texture_types).setName('texture')
texture.parseString("""
texture DEF PICBAND ImageTexture {
url "http://www.rt.cs.boeing.com/people/davidk/wrl/geo/colors.jpg"
repeatS FALSE
repeatT FALSE
}
""")
texture.parseString("""
texture DEF PICBAND PixelTexture {
image 1 10 4 0xFFFFFF77 0xFF0000FF 0xFFCC0077 0xFFFF00FF
0x77FF00FF 0x00FF00FF 0x00FFFFFF 0x0000FFFF
0x7700FF77 0x444444FF
}
""")
#-----------------------------------------
point3d = (pp.Literal('point') + list_open + pp.Group(pp.OneOrMore(xyz)) + list_close).setName('point')
point2d = (pp.Literal('point') + list_open + pp.Group(pp.OneOrMore(xy)) + list_close).setName('point')
coord_values = point3d
coord = (pp.Literal('coord') + pp.Literal('Coordinate') + dict_open + pp.Group(coord_values) + dict_close).setName('coord')
coord.parseString("""
coord Coordinate {
point [
3.303 -6.738 -16.931, 3.275 -6.738 -16.932, 3.285 -6.821 -17.012,
3.641 -6.636 -16.832, 3.642 -6.624 -16.82, 3.509 -6.622 -16.819,
2.885 -7.116 -17.299, 3.019 -7.116 -17.299
]
}
""")
vector = (pp.Literal('vector') + list_open + pp.Group(pp.OneOrMore(xyz)) + list_close).setName('vector')
normal_values = vector
normal = (pp.Literal('normal') + pp.Literal('Normal') + dict_open + pp.Group(normal_values) + dict_close).setName('normal')
vector.parseString("""
vector [
0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
]
""")
normal.parseString("""
normal Normal {
vector [
0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
]
}
""")
# this should be (1, 2, 3, -1), (1, 2, 3, -1), etc.
def cast_to_ints(args):
ints = np.array(list(args), dtype='int32')
return ints
if 1:
# 54.56 sec, 52.45 sec, 49, 35 by moving numpy import
coord_indicies = pp.OneOrMore(comma.suppress() | pint.setParseAction(cvt_int) | pminus1.setParseAction(cvt_int)) # works and parses
coord_index = (pp.Literal('coordIndex') + list_open + pp.Group(coord_indicies) + list_close).setName('coord_index') # works with A
elif 0: # pragma: no cover
# back to 51.2 sec
coord_indicies = pp.OneOrMore(comma.suppress() | pp.Word(pp.nums + '-').setParseAction(cvt_int)) # works and parses
coord_index = (pp.Literal('coordIndex') + list_open + pp.Group(coord_indicies) + list_close).setName('coord_index') # works with A
elif 0: # pragma: no cover
# has issues with the big problem
coord_indicies = pp.delimitedList(pint.setParseAction(cvt_int) | pminus1.setParseAction(cvt_int)) # good
coord_index = (pp.Literal('coordIndex') + list_open + coord_indicies + list_close).setName('coord_index')
else: # pragma: no cover
# has issues with the big problem
# probably will be beneficial in other cases
import numpy as np
coord_indicies = pp.delimitedList(pp.Word(pp.nums + '-')).setParseAction(cast_to_ints) # single numpy array cast
#coord_indicies = pp.pyparsing_common.comma_separated_list # bad...
#coord_indicies = OneOrMore(comma.suppress() | pint.setParseAction(cvt_int)) + pminus1.setParseAction(cvt_int)))
coord_indicies.parseString("0, 1, 2, -1, 3, 4, 5, -1, 1, 6, 2, -1")
coord_index.parseString("coordIndex [0, 1, 2, -1, 3, 4, 5, -1, 1, 6, 2, -1]")
#aaab
crease_angle = (pp.Literal('creaseAngle') + pfloat).setName('crease_angle')
tex_coord = (pp.Literal('texCoord') + pp.Literal('TextureCoordinate') + dict_open + point2d + dict_close).setName('tex_coord')
index_face_set_values = pp.OneOrMore(crease_angle | coord | normal | coord_index | tex_coord)
index_face_set = (
pp.Literal('IndexedFaceSet') + dict_open +
pp.Group(index_face_set_values) + dict_close).setName('indexed_face_set')
#-----------------------------------------
appearance_values = texture | material
appearance = (
pp.Literal('appearance') + pp.Literal('Appearance')
+ dict_open + pp.Group(appearance_values) + dict_close).setName('appearance')
sphere = (pp.Literal('Sphere') + dict_open + dict_close).setName('sphere')
geometry_values = sphere | index_face_set
geometry = (pp.Literal('geometry') + geometry_values).setName('geometry')
shape_values = pp.Group(pp.OneOrMore(appearance | geometry))
shape = (pp.Literal('Shape') + dict_open + shape_values + dict_close).setName('shape')
#print(geometry.parseString("""
#geometry IndexedFaceSet {
#creaseAngle 0.1
#coord Coordinate {
#[
#3.303 -6.738 -16.931, 3.275 -6.738 -16.932, 3.285 -6.821 -17.012,
#3.641 -6.636 -16.832, 3.642 -6.624 -16.82, 3.509 -6.622 -16.819,
#2.885 -7.116 -17.299, 3.019 -7.116 -17.299
#]
#}
#normal Normal {
#vector [
#0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
#0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
#]
#}
#coordIndex [
#0, 1, 2, -1, 3, 4, 5, -1, 1, 6, 2, -1,
#]
#}
#"""))
#print('geometry...')
# crease_angle + coord + normal + coord_index
geometry.parseString("""
geometry IndexedFaceSet {
coord Coordinate{
point[
-2 -2 0,
2 -2 0,
2 2 0,
-2 2 0,
0 0 5,
]
}
coordIndex [
0, 1, 4, -1,
1, 2, 4, -1,
2, 3, 4, -1,
3, 0, 4, -1,
3, 2, 1, 0, -1,
]
texCoord TextureCoordinate {
point [
0 0,
0 .3,
0 .5,
0 .7,
0 1,
]
}
}
""")
shape.parseString("""
Shape {
appearance Appearance{
texture DEF PICBAND PixelTexture {
image 1 10 4 0xFFFFFF77 0xFF0000FF 0xFFCC0077 0xFFFF00FF
0x77FF00FF 0x00FF00FF 0x00FFFFFF 0x0000FFFF
0x7700FF77 0x444444FF
}
}
geometry Sphere{}
}
""")
# wow...super dead link; google has info from him back in 1994...
shape.parseString("""
Shape{
appearance Appearance{
texture DEF PICBAND ImageTexture {
url "http://www.rt.cs.boeing.com/people/davidk/wrl/geo/colors.jpg"
repeatS FALSE
repeatT FALSE
}
}
}
""")
appearance.parseString("""
appearance Appearance {
material Material {
ambientIntensity 0.210
diffuseColor 1.000 0.000 0.000
specularColor 0.500 0.500 0.500
transparency 0.000
shininess 0.600
}
}
""")
shape.parseString("""
Shape {
appearance Appearance {
material Material {
ambientIntensity 0.210
diffuseColor 1.000 0.000 0.000
specularColor 0.500 0.500 0.500
transparency 0.000
shininess 0.600
}
}
}
""")
#---------------------------------------
shape.parseString("""
Shape {
appearance Appearance {
material Material {
ambientIntensity 0.210
diffuseColor 1.000 0.000 0.000
specularColor 0.500 0.500 0.500
transparency 0.000
shininess 0.600
}
}
geometry IndexedFaceSet {
creaseAngle 0.1
coord Coordinate {
point [
3.303 -6.738 -16.931, 3.275 -6.738 -16.932, 3.285 -6.821 -17.012,
3.641 -6.636 -16.832, 3.642 -6.624 -16.82, 3.509 -6.622 -16.819,
2.885 -7.116 -17.299, 3.019 -7.116 -17.299
]
}
normal Normal {
vector [
0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
0 -0.697 0.717, 0 -0.697 0.717, 0 -0.697 0.717,
]
}
coordIndex [
0, 1, 2, -1, 3, 4, 5, -1, 1, 6, 2, -1,
]
}
}
}
""")
#print('done with pre-parsing!')
|
{"hexsha": "1429f13167720315e90f3ffa01b9761ae36e7c09", "size": 14371, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyNastran/converters/dev/vrml/parsing_help.py", "max_stars_repo_name": "luzpaz/pyNastran", "max_stars_repo_head_hexsha": "939e9eefdc87a3bf67939a23dc09f155b93969a0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 293, "max_stars_repo_stars_event_min_datetime": "2015-03-22T20:22:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T20:28:24.000Z", "max_issues_repo_path": "pyNastran/converters/dev/vrml/parsing_help.py", "max_issues_repo_name": "luzpaz/pyNastran", "max_issues_repo_head_hexsha": "939e9eefdc87a3bf67939a23dc09f155b93969a0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 512, "max_issues_repo_issues_event_min_datetime": "2015-03-14T18:39:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:15:43.000Z", "max_forks_repo_path": "pyNastran/converters/dev/vrml/parsing_help.py", "max_forks_repo_name": "luzpaz/pyNastran", "max_forks_repo_head_hexsha": "939e9eefdc87a3bf67939a23dc09f155b93969a0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 136, "max_forks_repo_forks_event_min_datetime": "2015-03-19T03:26:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T22:14:54.000Z", "avg_line_length": 31.654185022, "max_line_length": 135, "alphanum_fraction": 0.617563148, "include": true, "reason": "import numpy", "num_tokens": 4478}
|
"""
author: Antoine Spahr
date : 04.11.2020
----------
TO DO :
"""
import sys
sys.path.append('../../')
import click
import os
import pandas as pd
import numpy as np
import nibabel as nib
import skimage
import skimage.io
from src.utils.ct_utils import window_ct
from src.utils.print_utils import print_progessbar
@click.command()
@click.argument('input_data_path', type=click.Path(exists=True))
@click.option('--output_data_path', type=click.Path(exists=False), default=None, help='Where to save the 2D data.')
@click.option('--window', type=tuple, default=None, help='The windowing center and width as a tuple for CT intensity rescaling. Default: None. (Windowing not applied)')
def main(input_data_path, output_data_path, window):
"""
Convert the Volumetric CT data and mask (in NIfTI format) to a dataset of 2D images in tif and masks in bitmap for the brain extraction.
"""
# open data info dataframe
info_df = pd.read_csv(os.path.join(input_data_path, 'info.csv'), index_col=0)
# make patient directory
if not os.path.exists(output_data_path): os.mkdir(output_data_path)
# iterate over volume to extract data
output_info = []
for n, id in enumerate(info_df.id.values):
# read nii volume
ct_nii = nib.load(os.path.join(input_data_path, f'ct_scans/{id}.nii'))
mask_nii = nib.load(os.path.join(input_data_path, f'masks/{id}.nii.gz'))
# get np.array
ct_vol = ct_nii.get_fdata()
mask_vol = skimage.img_as_bool(mask_nii.get_fdata())
# rotate 90° counter clockwise for head pointing upward
ct_vol = np.rot90(ct_vol, axes=(0,1))
mask_vol = np.rot90(mask_vol, axes=(0,1))
# window the ct volume to get better contrast of soft tissues
if window is not None:
ct_vol = window_ct(ct_vol, win_center=window[0], win_width=window[1], out_range=(0,1))
if mask_vol.shape != ct_vol.shape:
print(f'>>> Warning! The ct volume of patient {id} does not have '
f'the same dimension as the ground truth. CT ({ct_vol.shape}) vs Mask ({mask_vol.shape})')
# make patient directory
if not os.path.exists(os.path.join(output_data_path, f'{id:03}/ct/')): os.makedirs(os.path.join(output_data_path, f'{id:03}/ct/'))
if not os.path.exists(os.path.join(output_data_path, f'{id:03}/mask/')): os.makedirs(os.path.join(output_data_path, f'{id:03}/mask/'))
# iterate over slices to save slices
for i, slice in enumerate(range(ct_vol.shape[2])):
ct_slice_fn =f'{id:03}/ct/{slice+1}.tif'
# save CT slice
skimage.io.imsave(os.path.join(output_data_path, ct_slice_fn), ct_vol[:,:,slice], check_contrast=False)
is_low = True if skimage.exposure.is_low_contrast(ct_vol[:,:,slice]) else False
# save mask if some brain on slice
if np.any(mask_vol[:,:,slice]):
mask_slice_fn = f'{id:03}/mask/{slice+1}_Seg.bmp'
skimage.io.imsave(os.path.join(output_data_path, mask_slice_fn), skimage.img_as_ubyte(mask_vol[:,:,slice]), check_contrast=False)
else:
mask_slice_fn = 'None'
# add info to output list
output_info.append({'volume':id, 'slice':slice+1, 'ct_fn':ct_slice_fn, 'mask_fn':mask_slice_fn, 'low_contrast_ct':is_low})
print_progessbar(i, ct_vol.shape[2], Name=f'Volume {id:03} {n+1:03}/{len(info_df.id):03}',
Size=20, erase=False)
# Make dataframe of outputs
output_info_df = pd.DataFrame(output_info)
# save df
output_info_df.to_csv(os.path.join(output_data_path, 'slice_info.csv'))
print('>>> Slice informations saved at ' + os.path.join(output_data_path, 'slice_info.csv'))
# save patient df
info_df.to_csv(os.path.join(output_data_path, 'volume_info.csv'))
print('>>> Volume informations saved at ' + os.path.join(output_data_path, 'volume_info.csv'))
if __name__ == '__main__':
main()
|
{"hexsha": "59dd9292a8b71a7583d19bdc94080615ce7dd392", "size": 4010, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/scripts/data_preparation/generate_2DBrainDataset.py", "max_stars_repo_name": "antoine-spahr/Label-Efficient-Volumetric-Deep-Semantic-Segmentation-of-ICH", "max_stars_repo_head_hexsha": "61e74a6188fe82843085e87da7d9c4ec7bdbf85e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-03-16T09:52:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T00:59:18.000Z", "max_issues_repo_path": "code/scripts/data_preparation/generate_2DBrainDataset.py", "max_issues_repo_name": "antoine-spahr/Label-Efficient-Volumetric-Deep-Semantic-Segmentation-of-ICH", "max_issues_repo_head_hexsha": "61e74a6188fe82843085e87da7d9c4ec7bdbf85e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/scripts/data_preparation/generate_2DBrainDataset.py", "max_forks_repo_name": "antoine-spahr/Label-Efficient-Volumetric-Deep-Semantic-Segmentation-of-ICH", "max_forks_repo_head_hexsha": "61e74a6188fe82843085e87da7d9c4ec7bdbf85e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.6279069767, "max_line_length": 168, "alphanum_fraction": 0.66159601, "include": true, "reason": "import numpy", "num_tokens": 1011}
|
# import standard modules
# import third party modules
import numpy as np
# import project related modules
class CostFunction(object):
"""
Parent class for all cost Functions within NumpyNet. It is used to reduce the amount of code for initialization
since all cost functions share the same attributes.
"""
def __init__(self):
self.loss = list() # list of losses produced
self.error = None # error between x and y values
self.cache = None # copy of input data x
self.target = None # copy of input data y
class CrossEntropy(CostFunction):
"""
cross entropy used as loss function for a classification problem with n possible classes with a one hot encoded
vector for y and predicted values. CrossEntropy inherits from CostFunction class.
"""
def __init__(self):
super().__init__()
def forward(self, x: np.array, y: np.array):
"""
to calculate the cross entropy the H(p, q) formula is applied. with p being y and q being x.
and x is a result coming from an applied Softmax(x) function.
:param: x: numpy.array: predicted data x with applied Softmax function and in shape of a one hot encoded vector
:param: y: numpy.array: target variables (label) in a shape of a one hot encoded vector
:return: float: absolute value of cost
"""
# create a copy of x and y inputs in order to make them available for the error definition step
self.cache = x.copy()
self.target = y.copy()
# get the amount of samples to calculate
# calculate the cost the with the given H(p, q formula)
N = self.cache.shape[0]
cost = -np.sum(self.target*np.log(self.cache+1e-9))/N
return cost
def backward(self):
"""
calculates the error between predicted values and target values. It will basically return all values
as they are but subtracts 1 from the index which should be the target class.
"""
# get amount of samples
m = self.target.shape[0]
# subtract one from the index where the highest probability should be divide the error values by the amount
# of samples and return the error array
self.cache[:m, np.argmax(self.target)] -= 1
self.cache = self.cache/m
return self.cache
class Accuracy(Metrics):
def __init__(self):
super().__init__()
self.values = list()
def add(self, x, y):
pred = np.argmax(x)
target = np.argmax(y)
if int(pred)-int(target) == 0:
self.values.append(1)
else:
self.values.append(0)
def clean(self):
self.values = list()
def get(self):
return f"accuracy: {sum(self.values)/len(self.values) * 100}%"
|
{"hexsha": "bf3bfaceb31706229d5a6a96b8213121e002951c", "size": 2822, "ext": "py", "lang": "Python", "max_stars_repo_path": "simple_numpy_net/loss.py", "max_stars_repo_name": "Perledition/GeneticNumpyNet", "max_stars_repo_head_hexsha": "f175fc71f63e9e4f79c8c63f080c853bb5b96f4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simple_numpy_net/loss.py", "max_issues_repo_name": "Perledition/GeneticNumpyNet", "max_issues_repo_head_hexsha": "f175fc71f63e9e4f79c8c63f080c853bb5b96f4a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simple_numpy_net/loss.py", "max_forks_repo_name": "Perledition/GeneticNumpyNet", "max_forks_repo_head_hexsha": "f175fc71f63e9e4f79c8c63f080c853bb5b96f4a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4367816092, "max_line_length": 119, "alphanum_fraction": 0.6353649894, "include": true, "reason": "import numpy", "num_tokens": 633}
|
import os
import albumentations as A
import cv2
import numpy as np
import pandas as pd
import torch
from pandas import DataFrame
from torch.utils.data import Dataset, DataLoader
from constants import DFDC
from training.datasets.transform import create_train_transform, create_val_test_transform
class DFDCDataset(Dataset):
def __init__(self, data_root, df: DataFrame, mode, transform: A.Compose):
self.data_root = data_root
self.df = df
self.mode = mode
self.transform = transform
def __getitem__(self, index):
video, img_file, label, ori_video, frame = self.df.iloc[index].values
# image
img_path = os.path.join(self.data_root, 'crops', video, img_file)
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# mask
if label == 1:
mask_path = os.path.join(self.data_root, 'diffs', video, '{}_diff.png'.format(img_file[:-4]))
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
else:
mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
# data augmentation
transformed = self.transform(image=image, mask=mask)
image = transformed["image"]
mask = transformed["mask"]
mask = mask.unsqueeze(0) / 255.
return {'images': image, 'labels': label, 'masks': mask}
def __len__(self):
r = self.df.shape[0]
return r
def get_dfdc_dataloader(model_cfg, args):
"""
:param model_cfg:
:param args:
:return:
"""
train_df = pd.read_csv(f'data/{DFDC}/data_{DFDC}_train.csv')
# train_real_df = pd.read_csv(f'data/{DFDC}/data_{DFDC}_train_real.csv')
# train_fake_df = pd.read_csv(f'data/{DFDC}/data_{DFDC}_train_fake.csv')
# train_fake_df = train_fake_df.sample(len(train_real_df.index))
# train_df = pd.concat([train_real_df, train_fake_df])
train_transform = create_train_transform(model_cfg)
train_data = DFDCDataset(data_root=args.data_dir, df=train_df, mode='train', transform=train_transform)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
else:
train_sampler = None
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=(train_sampler is None),
sampler=train_sampler, num_workers=args.workers, pin_memory=True, drop_last=True)
val_df = pd.read_csv(f'data/{DFDC}/data_{DFDC}_val.csv')
val_transform = create_val_test_transform(model_cfg)
val_data = DFDCDataset(data_root=args.data_dir, df=val_df, mode='validation', transform=val_transform)
val_loader = DataLoader(val_data, batch_size=30, shuffle=False, num_workers=args.workers,
pin_memory=True, drop_last=False)
return train_sampler, train_loader, val_loader
def get_dfdc_test_dataloader(model_cfg, args):
"""
:param model_cfg:
:param args:
:return:
"""
test_df = pd.read_csv(f'data/{DFDC}/data_{DFDC}_test.csv')
test_transform = create_val_test_transform(model_cfg)
test_data = DFDCDataset(data_root=args.data_dir, df=test_df, mode='test', transform=test_transform)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,
pin_memory=True, drop_last=True)
return test_loader
|
{"hexsha": "3617f982c083055d0e9bd0d4ad633a2356f8365a", "size": 3429, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/datasets/dfdc_dataset.py", "max_stars_repo_name": "joizhang/sifdnet", "max_stars_repo_head_hexsha": "9b3efa8c709bcea502a3989f6c62389f74099bae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "training/datasets/dfdc_dataset.py", "max_issues_repo_name": "joizhang/sifdnet", "max_issues_repo_head_hexsha": "9b3efa8c709bcea502a3989f6c62389f74099bae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/datasets/dfdc_dataset.py", "max_forks_repo_name": "joizhang/sifdnet", "max_forks_repo_head_hexsha": "9b3efa8c709bcea502a3989f6c62389f74099bae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6813186813, "max_line_length": 111, "alphanum_fraction": 0.6838728492, "include": true, "reason": "import numpy", "num_tokens": 823}
|
subroutine qqb_wbjet(p,msq)
implicit none
c--- R.K. Ellis, 8/3/04
c--- matrix element squared and averaged over initial colours and spins
c q(-p1) + b(-p2) --> W^+ + b(p5) + f(p6)
c |
c --> nu(p3) + e^+(p4)
c---or
c q(-p1) + b(-p2) --> W^- + b(p5) + f(p6)
c |
c --> e^-(p3) + nu~(p4)
c--- all momenta are incoming
c--- all momenta are incoming
c--- Extended to include charm quark production via the variable
c--- "flav". Note that NLO corrections are not yet extended this way.
include 'constants.f'
include 'masses.f'
include 'ewcouple.f'
include 'qcdcouple.f'
include 'heavyflav.f'
include 'ckm.f'
include 'sprods_com.f'
include 'zprods_com.f'
include 'msq_cs.f'
double precision msq(-nf:nf,-nf:nf),p(mxpart,4),
. facqq,prop,Vsm(-nf:nf),qQ_jkki,Qq_jkji,qbQb_jkki,Qbqb_jkji
double complex aLLL
integer j,k,i1,i2,i3,i4,i5,i6
C Statement function defines the sum of the following two diagrams
c q5(L)----<----------q2 q5(L)------<--------q2
c 0 0
c 0 0
c 0 0
c q6(L)------<--------q1 q6(L)------<--------q1
c ) )
c ( (
c ) )
c l3(L)-------<-------l4 l3(L)-------<-------l4
aLLL(i1,i2,i3,i4,i5,i6)=-cone/(s(i3,i4)*s(i2,i5))*
. (+za(i6,i3)*zb(i2,i1)/(s(i3,i4)+s(i4,i6)+s(i3,i6))
. *(+zb(i4,i3)*za(i3,i5)+zb(i4,i6)*za(i6,i5))
. +za(i6,i5)*zb(i4,i1)/(s(i2,i6)+s(i5,i6)+s(i2,i5))
. *(+zb(i2,i6)*za(i6,i3)+zb(i2,i5)*za(i5,i3)))
c--- initialize matrix elements
do j=-nf,nf
do k=-nf,nf
msq(j,k)=0d0
enddo
enddo
c--- set up spinors (and dotproducts)
call spinoru(6,p,za,zb)
prop=s(3,4)**2/((s(3,4)-wmass**2)**2+wmass**2*wwidth**2)
facqq=4d0*V*gsq**2*(gwsq/2d0)**2*aveqq*prop
c--- now square these amplitudes separating into color structures
c 1) Amplitude
c 2) Amplitude with (5<-->6)
c 0) Interference between above
c
c--- q(i) q(j) --> q(i) ( --> W q(k) ) q(j)
c--- eg u(1)b(2)->nu(3)e^+(4)b(5)d(6)
qQ_jkki=abs(aLLL(1,2,3,4,5,6))**2+abs(aLLL(1,5,3,4,2,6))**2
c--- q(i) q(j) --> q(i) q(j) ( --> W q(k) ), eg
c--- eg b(1)u(2)->nu(3)e^+(4)b(5)d(6)
c--- exchange (1<-->2) in above
Qq_jkji=abs(aLLL(2,1,3,4,5,6))**2+abs(aLLL(2,5,3,4,1,6))**2
c--- qb(i) qb(j) --> qb(i) qb(j) ( --> W qb(k) )
c--- eg b~(1)d~(2)->nu(3)e^+(4)b~(5)u~(6)
Qbqb_jkji=abs(aLLL(6,1,3,4,5,2))**2+abs(aLLL(6,5,3,4,1,2))**2
c--- qb(i) qb(j) --> qb(i) ( --> W qb(k) ) qb(j)
C--- eg d~(1)b~(2)->nu(3)e^+(4)b~(5)u~(6)
C--- exchange (1<-->2) in above
qbQb_jkki=abs(aLLL(6,2,3,4,5,1))**2+abs(aLLL(6,5,3,4,2,1))**2
c--- set up auxiliary array
do j=-nf,nf
Vsm(j)=Vsum(j)
if (abs(j) .ge. flav) Vsm(j)=0d0
c--- make sure that elements are either one or zero
if (Vsm(j) .gt. 0d0) Vsm(j)=1d0
enddo
do j=-nf,nf
do k=-nf,nf
msq_cs(0,j,k)=0d0
msq_cs(1,j,k)=0d0
msq_cs(2,j,k)=0d0
if ((abs(j) .ne. flav) .and. (abs(k) .ne. flav)) goto 99
if ((abs(j) .eq. flav) .and. (abs(k) .eq. flav)) goto 99
c--- so that either abs(j) or abs(k) = flav (but not both).
c--- note that, for (q,qb) and (qb,q) contribution (2) refers to the
c--- straightforward case and (1) to the case with 5<->6
c--- This is reversed for (q,q) and (qb,qb)
C--- Note added RKE 8/17/04
C--- Actually this comment doesn't seem to be true for (qb,q)
C--- in order to get the poles to cancel
C--- There is presumably some interplay with the definition
C--- of the various color terms in qqb_wbjet_z.f,
if ((j .gt. 0) .and. (k .lt. 0)) then
c--- e.g. b d~ -> b u~
msq_cs(2,j,k)=facqq*Vsm(k)*Qbqb_jkji
c--- e.g. u b~ -> b~ d
msq_cs(1,j,k)=facqq*Vsm(j)*qQ_jkki
elseif ((j .lt. 0) .and. (k .gt. 0)) then
c--- e.g. b~ u -> b~ d
msq_cs(2,j,k)=facqq*Vsm(k)*Qq_jkji
c--- e.g. d~ b -> b u~
msq_cs(1,j,k)=facqq*Vsm(j)*qbQb_jkki
elseif ((j .gt. 0) .and. (k .gt. 0)) then
c--- e.g. b u -> b d
msq_cs(1,j,k)=facqq*Vsm(k)*Qq_jkji
c--- e.g. u b -> b d
msq_cs(2,j,k)=facqq*Vsm(j)*qQ_jkki
elseif ((j .lt. 0) .and. (k .lt. 0)) then
c--- e.g. b~ d~ -> b~ u~
msq_cs(1,j,k)=facqq*Vsm(k)*Qbqb_jkji
c--- e.g. d~ b~ -> b~ u~
msq_cs(2,j,k)=facqq*Vsm(j)*qbQb_jkki
endif
msq(j,k)=msq_cs(0,j,k)+msq_cs(1,j,k)+msq_cs(2,j,k)
99 continue
enddo
enddo
return
end
|
{"hexsha": "6a1b3677eaaa139ce28c21f3a9711acd0c6b8fe3", "size": 4838, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MCFM-JHUGen/src/Wbjet/qqb_wbjet.f", "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_issues_repo_path": "MCFM-JHUGen/src/Wbjet/qqb_wbjet.f", "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_forks_repo_path": "MCFM-JHUGen/src/Wbjet/qqb_wbjet.f", "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "avg_line_length": 32.6891891892, "max_line_length": 71, "alphanum_fraction": 0.4824307565, "num_tokens": 1952}
|
import abc
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from .models import model
from .parsers.common_parser import CommonParser
from . import checks
class Tester:
def __init__(self, metric_name="MeanF1Score", border=0.5,
invert_list=None):
"""
Initializing object of main class with testing algorithm.
:param metric_name: str, optional (default="MeanF1Score").
Name of the metric to check quality.
:param border: float, optional (default=0.5).
The accuracy boundary at which the algorithm is considered to be
exact.
:param invert_list: list, optional (default=None).
List of the metrics name which need to invert comparison with
border.
"""
self._metric_name = metric_name
checks.check_types(self._metric_name, str, var_name="metric_name")
class_ = globals()[self._metric_name]
self._metric = class_(border)
checks.check_inheritance(self._metric, IMetric)
self._invert_list = invert_list
checks.check_types(self._invert_list, list, var_name="invert_list")
def test(self, validation_labels, predictions, **kwargs):
"""
Main testing function.
:param predictions: array-like, sparse matrix.
Predicted data.
:param validation_labels: array-like, sparse matrix.
Known data.
:param kwargs: dict, optional(default={}).
Additional arguments for metric test method.
:return: float.
A numerical estimate of the accuracy of the algorithm.
"""
return self._metric.test(validation_labels, predictions, **kwargs)
def quality_control(self, validation_labels, predictions):
"""
Function to get threshold estimation of the accuracy of the algorithm.
:param predictions: array-like, sparse matrix.
Predicted data.
:param validation_labels: array-like, sparse matrix.
Known data.
:return: float.
Bool value which define quality of the algorithm.
"""
invert_comparison = self._metric_name in self._invert_list
return self._metric.quality_control(validation_labels, predictions,
invert_comparison)
class IMetric(abc.ABC):
def __init__(self, border):
"""
Initializing object of testing algorithm's class.
:param border: float.
The accuracy boundary at which the algorithm is considered to be
exact.
"""
self._border = border
checks.check_types(self._border, float, var_name="border")
checks.check_value(self._border, 0.0, 1.0, var_name="border")
self._cache = None
@abc.abstractmethod
def test(self, validation_labels, predictions, **kwargs):
"""
Main testing function.
:param predictions: array-like, sparse matrix.
Predicted data.
:param validation_labels: array-like, sparse matrix.
Known data.
:param kwargs: dict.
Additional arguments for test method.
:return: float.
A numerical estimate of the accuracy of the algorithm.
"""
raise NotImplementedError("Called abstract class method!")
def quality_control(self, validation_labels, predictions,
invert_comparison=False):
"""
Function to get threshold estimation of the accuracy of the algorithm.
:param predictions: array-like, sparse matrix.
Predicted data.
:param validation_labels: array-like, sparse matrix.
Known data.
:param invert_comparison: bool.
Bool value that changes the direction of comparison.
:return: bool, optional (default=False).
Bool value which define quality of the algorithm.
"""
if self._cache is None:
self._cache = self.test(validation_labels, predictions)
if invert_comparison:
return self._cache > self._border
return self._cache < self._border
class MeanSquaredError(IMetric):
def test(self, validation_labels, predictions, r2=False):
"""
Main testing function.
:param validation_labels: list.
List of lists with known data.
:param predictions: list.
List of lists with predicted data.
:param r2: bool, optional (default=False).
Flag for additional metric.
:return: float, tuple (float, float).
A numerical estimate of the accuracy of the algorithm. 0.0 is
perfect prediction. For r2 score 1.0 is perfect prediction.
"""
self._cache = mean_squared_error(validation_labels, predictions)
if r2:
return self._cache, r2_score(validation_labels, predictions)
return self._cache
class MeanF1Score(IMetric):
@staticmethod
def _format_data(validation_label, prediction):
"""
Formatted input data.
:param validation_label: list.
Known data.
:param prediction: list.
Predicted data.
:return: tuple (list, list).
Return tuple with formatted lists.
"""
int_prediction = [int(round(x)) for x in prediction]
int_prediction = CommonParser.to_final_label(int_prediction)
validation_label = CommonParser.to_final_label(validation_label)
return int_prediction, validation_label
@staticmethod
def zero_check(conj, arr_len):
"""
Check if goods list is empty.
:param conj: int.
Cardinality of conjunction of two sets of goods.
:param arr_len: int.
Cardinality of goods list.
:return: int, float.
Return 0 if goods list is empty, otherwise division conj and
arr_len.
"""
if arr_len == 0:
return 0
else:
return conj / arr_len
@staticmethod
def conjunction(lst1, lst2):
"""
Calculate conjunction of two arrays. Arrays must be sorted!
:param lst1: list.
First sorted array.
:param lst2: list.
Second sorted array.
:return: int.
Cardinality of conjunction.
"""
it1 = iter(lst1)
it2 = iter(lst2)
try:
value1 = next(it1)
value2 = next(it2)
except StopIteration:
return 0
result = 0
while True:
try:
if value1 == value2:
result += 1
value1 = next(it1)
value2 = next(it2)
elif value1 > value2:
value2 = next(it2)
else:
value1 = next(it1)
except StopIteration:
break
return result
def test_check(self, validation_label, prediction, need_format=False):
"""
Main testing function for one list of data.
:param validation_label: list.
Known data.
:param prediction: list.
Predicted data.
:param need_format: bool, optional (default=False).
Used to define that data is not formatted.
:return: float.
A numerical estimate of the accuracy of the algorithm. 1.0 is
perfect prediction.
"""
if need_format:
validation_label, prediction = self._format_data(validation_label,
prediction)
conj = self.conjunction(prediction, validation_label)
p = self.zero_check(conj, len(prediction))
r = self.zero_check(conj, len(validation_label))
if p == 0 and r == 0:
return 0
return 2 * p * r / (p + r)
def test(self, validation_labels, predictions, need_format=False):
"""
Main testing function.
:param validation_labels: list.
List of lists with known data.
:param predictions: list.
List of lists with predicted data.
:param need_format: bool, optional (default=False).
Used to define that data is not formatted.
:return: float.
A numerical estimate of the accuracy of the algorithm. 1.0 is
perfect prediction.
"""
checks.check_equality(self.conjunction([1, 1, 2, 3, 5],
[1, 2, 4, 5]), 3,
message="There are error in conjunction method")
num_checks = len(validation_labels)
result = [self.test_check(validation_labels[i],
predictions[i],
need_format) for i in range(num_checks)]
self._cache = sum(result) / num_checks
return self._cache
class TestModel(model.IModel):
def fit(self, train_samples, train_labels, **kwargs):
checks.check_equality(len(train_samples), len(train_labels),
message="Samples and labels have different "
"sizes")
def predict(self, samples, **kwargs):
checks.check_equality(len(samples), len(kwargs["labels"]),
message="Samples and labels have different "
"sizes")
predictions = []
for _, label in zip(samples, kwargs["labels"]):
prediction = np.array(label)
predictions.append(prediction)
return predictions
|
{"hexsha": "b1a0f3ef16f768e82f7a0d5c44c0fb5f7ce32f43", "size": 9757, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlalgorithms/tester.py", "max_stars_repo_name": "robot-lab/tinkoff-optimization-of-procurement", "max_stars_repo_head_hexsha": "6f34ede84f8b51520a6a269d4a4cefae4d8f54c1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlalgorithms/tester.py", "max_issues_repo_name": "robot-lab/tinkoff-optimization-of-procurement", "max_issues_repo_head_hexsha": "6f34ede84f8b51520a6a269d4a4cefae4d8f54c1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2018-08-16T12:03:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-29T09:33:16.000Z", "max_forks_repo_path": "mlalgorithms/tester.py", "max_forks_repo_name": "robot-lab/tinkoff-optimization-of-procurement", "max_forks_repo_head_hexsha": "6f34ede84f8b51520a6a269d4a4cefae4d8f54c1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8765822785, "max_line_length": 78, "alphanum_fraction": 0.582248642, "include": true, "reason": "import numpy", "num_tokens": 1914}
|
#
# Copyright John Reid 2006
#
import numpy, numpy.random
from _maths import *
def reverse_complement( s ):
result = numpy.zeros_like( s )
for i in xrange( len( s ) ):
result[ len(s) - i - 1 ] = 3 - s[i]
return result
class GappedPssm( object ):
def __init__(
self,
phi,
varphi,
K,
alpha = [ 1.0, 1.0 ]
):
"""Generate a pssm from a base distribution
K: PSSM length
phi: dirichlet prior for theta
varphi: dirichlet prior for pi
alpha: prior on gamma parameter
"""
self.theta = numpy.empty( (K+1,4), numpy.float64 )
for j in xrange( K + 1 ):
if 0 == j: self.theta[j,:] = sample_from_dirichlet( varphi )
else: self.theta[j,:] = sample_from_dirichlet( phi )
self.h = numpy.random.randint(0, K - 1)
self.gamma = numpy.random.beta( alpha[0], alpha[1] )
self.K = K
def __str__( self ):
return 'Theta:\n%s\nh:%d\ngamma:%f' % (
str( self.theta ),
self.h,
self.gamma
)
def sample_from( self ):
has_gap = bool( numpy.random.random() > self.gamma ) # does it have a gap?
result = numpy.zeros( self.K + 1, dtype = numpy.int32 )
# fill in the pssm bases
for i in xrange( self.K ):
if has_gap and i > self.h: idx = i + 1
else: idx = i
result[ idx ] = sample_from_discrete( self.theta[i+1] )
# fill in the gap base
if has_gap: gap_base = self.h + 1
else: gap_base = self.K
result[ gap_base ] = sample_from_discrete( self.theta[0] )
return result, has_gap
def generate_synthetic_sequence( pi, L ):
"""Generate one sequence
pi: sequence distribution
L: length
"""
return numpy.array(
[
sample_from_discrete( pi )
for i in xrange( L )
],
dtype = numpy.int32
)
def base_to_str( base ):
"""Converts 0,1,2,3 to A,C,G,T"""
if 0 == base: return 'A'
if 1 == base: return 'C'
if 2 == base: return 'G'
if 3 == base: return 'T'
raise RuntimeError( 'Bad base: %d' % base )
def seq_to_str( seq ):
"""Our sequences are held as arrays, this converts to A,C,G,T strings"""
return ''.join( [ base_to_str(s) for s in seq ] )
def place_binding_site_in_sequence( seq, pssm ):
"""Replaces part of a sequence with a binding site from the pssm
seq: sequence
pssm: pssm
returns (s,g) where s is the position the site starts at and g is whether
there is a gap
"""
sample_seq, has_gap = pssm.sample_from()
rev_comp = bool( numpy.random.random() > 0.5 ) # is it reverse complemented?
if rev_comp: sample_seq = reverse_complement( sample_seq )
s = numpy.random.randint( 0, len( seq ) - pssm.K ) # where in sequence?
# replace sequence
seq[s:s+pssm.K+1] = sample_seq
return s, has_gap, rev_comp
class ModelSample( object ):
def __init__(
self,
phi,
varphi,
K,
N,
av_length,
alpha = [ 1.0, 1.0 ],
verbose = False
):
"""Generate some synthetic sequences
N: number of sequences
K: length of PSSM
av_length: expected length
phi: prior for pssm
varphi: prior for background
alpha: prior for gamma
"""
if verbose:
print 'Creating PSSM of length %d' % K
self.pssm = GappedPssm( K = K, phi = phi, varphi = varphi, alpha = alpha )
if verbose:
print 'Creating %d sequences of average length %d' % (N, av_length)
length = 0
while length < K + 1:
length = numpy.random.poisson( av_length )
self.seqs = [
generate_synthetic_sequence(
self.pssm.theta[0,:],
length
)
for n in xrange( N )
]
self.locations = []
self.has_gap = []
self.rev_comp = []
self.ungapped_sites = []
for n in xrange( N ):
s, g, rev_comp = place_binding_site_in_sequence( self.seqs[n], self.pssm )
self.locations.append( s )
self.has_gap.append( g )
self.rev_comp.append( rev_comp )
# Calculate the pssm that would be generated if the sites were known
gapped_site = self.seqs[n][s:s+K+1]
if rev_comp: gapped_site = reverse_complement( gapped_site )
if g:
ungapped_site = numpy.concatenate(
(
gapped_site[:1+self.pssm.h],
gapped_site[2+self.pssm.h:]
)
)
else:
ungapped_site = gapped_site[:-1]
# print g, self.pssm.h, gapped_site, ungapped_site
assert len( ungapped_site ) == self.pssm.K
self.ungapped_sites.append( ungapped_site )
def dist_of_sites( self ):
"""The distribution of bases at the actual sites in the sequences
This will not be the same as the actual pssm
"""
return dist_from_seqs( self.ungapped_sites )
def __str__( self ):
return (
'Gapped Pssm Model Sample:\n'
'Pssm: %s\n'
'# sequences: %d\n'
'Lengths: %s\n'
'Starts: %s\n'
'Has gap: %s'
) % (
str( self.pssm ),
len( self.seqs ),
','.join( [ str(len(s)) for s in self.seqs ] ),
','.join( [ str(l) for l in self.locations ] ),
','.join( [ str(g) for g in self.has_gap ] ),
)
def dist_from_seqs( seqs ):
if not len( seqs ): return numpy.array((), dtype = numpy.float64)
K = len( seqs[0] )
result = numpy.zeros( (K,4), dtype = numpy.float64 )
for i in xrange( K ):
for s in seqs:
assert s[i] < 4
result[i,s[i]] += 1.0
return result / len( seqs )
|
{"hexsha": "4850e2a79c23b43fe54e13e7ed514345ec6f30a4", "size": 6206, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/biopsy/gapped_pssms/_generate.py", "max_stars_repo_name": "JohnReid/biopsy", "max_stars_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/biopsy/gapped_pssms/_generate.py", "max_issues_repo_name": "JohnReid/biopsy", "max_issues_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/biopsy/gapped_pssms/_generate.py", "max_forks_repo_name": "JohnReid/biopsy", "max_forks_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4215686275, "max_line_length": 86, "alphanum_fraction": 0.5156300354, "include": true, "reason": "import numpy", "num_tokens": 1606}
|
import unittest
import numpy as np
import image_stitching as stit
class TestImageStitching(unittest.TestCase):
def test_stitching(self):
# prepare input files
paths_to_input_file = [
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_1.mp4',
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_2.mp4',
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_3.mp4',
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_4.mp4',
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_5.mp4',
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_6.mp4',
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_7.mp4',
'/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_8.mp4'
]
path_vector = stit.StringVector()
for path in paths_to_input_file:
path_vector.push_back(path)
# initialize
provider = stit.MultipleStreamProvider(
path_vector, 'gst-libav', 'h264', '720p', 25)
stitcher = stit.Stitcher(len(paths_to_input_file), stit.Size(1280, 720))
# process
frame = stit.MatVector()
result, result_mask = stit.Mat(), stit.Mat()
if provider.isOpened():
provider.read(frame)
stitcher.calibrate(frame, result, result_mask)
self.assertFalse(result.empty())
def test_stream_provider(self):
# path_to_input_file = 'rtsp://184.72.239.149/vod/mp4:BigBuckBunny_115k.mov'
path_to_input_file = '/home/yuthon/Workspace/image-stitching/assets/data/test2/test_cam_1.mp4'
provider = stit.StreamProvider(
path_to_input_file, 'gst-libav', 'h264', '720p', 25)
self.assertTrue(provider.isOpened())
frame = stit.Mat()
if provider.isOpened():
provider.read(frame)
self.assertFalse(frame.empty())
def test_stream_writer(self):
writer = stit.StreamWriter(
"rtmp://192.168.6.3/live/test", "gst-nvidia", "h264",
"720p", 25, stit.Size(1280, 720))
self.assertTrue(writer.isOpened())
frame = stit.Mat.from_array(
np.zeros([100, 100, 3], dtype=np.uint8))
if writer.isOpened():
writer.write(frame)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "2d80b318e9a9cac09174082aba3087c4a13a2f6e", "size": 2456, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_python_bindings.py", "max_stars_repo_name": "corenel/image-stitching", "max_stars_repo_head_hexsha": "844af322999f47868d71d250b77d9ca0bc8b143f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_python_bindings.py", "max_issues_repo_name": "corenel/image-stitching", "max_issues_repo_head_hexsha": "844af322999f47868d71d250b77d9ca0bc8b143f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_python_bindings.py", "max_forks_repo_name": "corenel/image-stitching", "max_forks_repo_head_hexsha": "844af322999f47868d71d250b77d9ca0bc8b143f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6129032258, "max_line_length": 102, "alphanum_fraction": 0.6404723127, "include": true, "reason": "import numpy", "num_tokens": 619}
|
% ******************************* Thesis Appendix A ****************************
\chapter{Source Code}
Some of our implementations are originally written and some are modified from previous open source projects. So we upload related codes to GitHub in my personal repositories as \url{https://github.com/SeleneLI}. It is convenient to download and explore the codes. A brief introduction of each repository is provided as follows.
\section{LISP Stability and Consistency analyzer}
\url{https://github.com/SeleneLI/LispMdsAnalyzer}
This project is totally originally written to analyze the stability and consistency of LISP mapping system.
\section{LISP-Views}
\url{https://github.com/SeleneLI/LISP-Views}
This project is the original codes for LISP-Views architecture.
\section{LISP measurements on Atlas}
\url{https://github.com/SeleneLI/Atlas}
This project is used to conduct the experiment on RIPE Atlas and analyze the ping / traceroute results for LISP interworking performance.
\section{LISP implementation on ns-3}
\url{https://github.com/SeleneLI/LISP_ns-3}
This project is an implementation of LISP mobility extensions on an existing LISP simulator under ns-3. It also provides the scripts evaluating LISP mobility performance.
|
{"hexsha": "ef9e77fd5f28feed2752bdb939eec63b8a704c96", "size": 1256, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Appendix1/appendix1.tex", "max_stars_repo_name": "SeleneLI/YueLI_thesis", "max_stars_repo_head_hexsha": "f2ae3525afe1e4f5be42daca2e932addbc66e00d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Appendix1/appendix1.tex", "max_issues_repo_name": "SeleneLI/YueLI_thesis", "max_issues_repo_head_hexsha": "f2ae3525afe1e4f5be42daca2e932addbc66e00d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Appendix1/appendix1.tex", "max_forks_repo_name": "SeleneLI/YueLI_thesis", "max_forks_repo_head_hexsha": "f2ae3525afe1e4f5be42daca2e932addbc66e00d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0606060606, "max_line_length": 327, "alphanum_fraction": 0.7611464968, "num_tokens": 272}
|
module test_julia_int
using Test
@test UInt8(0b11111111) == 0xff
@test UInt8(0b01111111) == 0x7f
@test UInt8(0b00111111) == 0x3f
@test UInt8(0b00011111) == 0x1f
@test UInt8(0b00001111) == 0x0f
@test UInt8(0b00000111) == 0x07
@test UInt8(0b00000011) == 0x03
@test UInt8(0b00000001) == 0x01
@test UInt8(0b00000000) == 0x00
@test trailing_ones(UInt8(0b11111111)) == 8
@test trailing_ones(UInt8(0b01111111)) == 7
@test trailing_ones(UInt8(0b00000001)) == 1
@test trailing_zeros(UInt8(0b00000000)) == 8
@test trailing_zeros(UInt8(0b10000000)) == 7
@test trailing_zeros(UInt8(0b11111110)) == 1
@test leading_ones(UInt8(0b11111111)) == 8
@test leading_ones(UInt8(0b11111110)) == 7
@test leading_ones(UInt8(0b10000000)) == 1
@test leading_zeros(UInt8(0b00000000)) == 8
@test leading_zeros(UInt8(0b00000001)) == 7
@test leading_zeros(UInt8(0b01111111)) == 1
@test widen(Int64) === Int128
@test 1 isa Int64
@test typeof(widen(1)) === Int128
@test signed(UInt64) === Int64
@test unsigned(Int64) === UInt64
@test signbit(1) === false
@test signbit(-1)
bits(::Int64) = 64
bits(::Int32) = 32
@test bits(1) == 64
@test bits(Int32(1)) == 32
end # module test_julia_int
module test_julia_integer
using Test
f(n::Integer) = (:integer, n)
@test f(1) == (:integer, 1)
f(n::Int) = (:int, n)
@test f(1) == (:int, 1)
@test f(true) == (:integer, true)
end # module test_julia_integer
module test_julia_bswap
using Test
@test bswap(0x3039) == 0x3930
@test (bswap ∘ bswap)(0x3039) == 0x3039
end # module test_julia_bswap
|
{"hexsha": "259c8212828f11ad0c41838ad74c7d9b596198a6", "size": 1515, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/julia/int.jl", "max_stars_repo_name": "wookay/TestJulia07.jl", "max_stars_repo_head_hexsha": "17f139763d96e456fdb4b59fbb7964273523cb00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/julia/int.jl", "max_issues_repo_name": "wookay/TestJulia07.jl", "max_issues_repo_head_hexsha": "17f139763d96e456fdb4b59fbb7964273523cb00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/julia/int.jl", "max_forks_repo_name": "wookay/TestJulia07.jl", "max_forks_repo_head_hexsha": "17f139763d96e456fdb4b59fbb7964273523cb00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.9565217391, "max_line_length": 44, "alphanum_fraction": 0.702970297, "num_tokens": 589}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.